1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Functions related to generic helpers functions |
4 | */ |
5 | #include <linux/kernel.h> |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/scatterlist.h> |
10 | |
11 | #include "blk.h" |
12 | |
13 | static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) |
14 | { |
15 | unsigned int discard_granularity = bdev_discard_granularity(bdev); |
16 | sector_t granularity_aligned_sector; |
17 | |
18 | if (bdev_is_partition(bdev)) |
19 | sector += bdev->bd_start_sect; |
20 | |
21 | granularity_aligned_sector = |
22 | round_up(sector, discard_granularity >> SECTOR_SHIFT); |
23 | |
24 | /* |
25 | * Make sure subsequent bios start aligned to the discard granularity if |
26 | * it needs to be split. |
27 | */ |
28 | if (granularity_aligned_sector != sector) |
29 | return granularity_aligned_sector - sector; |
30 | |
31 | /* |
32 | * Align the bio size to the discard granularity to make splitting the bio |
33 | * at discard granularity boundaries easier in the driver if needed. |
34 | */ |
35 | return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; |
36 | } |
37 | |
38 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
39 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) |
40 | { |
41 | struct bio *bio = *biop; |
42 | sector_t bs_mask; |
43 | |
44 | if (bdev_read_only(bdev)) |
45 | return -EPERM; |
46 | if (!bdev_max_discard_sectors(bdev)) |
47 | return -EOPNOTSUPP; |
48 | |
49 | /* In case the discard granularity isn't set by buggy device driver */ |
50 | if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) { |
51 | pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n" , |
52 | bdev); |
53 | return -EOPNOTSUPP; |
54 | } |
55 | |
56 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
57 | if ((sector | nr_sects) & bs_mask) |
58 | return -EINVAL; |
59 | |
60 | if (!nr_sects) |
61 | return -EINVAL; |
62 | |
63 | while (nr_sects) { |
64 | sector_t req_sects = |
65 | min(nr_sects, bio_discard_limit(bdev, sector)); |
66 | |
67 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_DISCARD, gfp: gfp_mask); |
68 | bio->bi_iter.bi_sector = sector; |
69 | bio->bi_iter.bi_size = req_sects << 9; |
70 | sector += req_sects; |
71 | nr_sects -= req_sects; |
72 | |
73 | /* |
74 | * We can loop for a long time in here, if someone does |
75 | * full device discards (like mkfs). Be nice and allow |
76 | * us to schedule out to avoid softlocking if preempt |
77 | * is disabled. |
78 | */ |
79 | cond_resched(); |
80 | } |
81 | |
82 | *biop = bio; |
83 | return 0; |
84 | } |
85 | EXPORT_SYMBOL(__blkdev_issue_discard); |
86 | |
87 | /** |
88 | * blkdev_issue_discard - queue a discard |
89 | * @bdev: blockdev to issue discard for |
90 | * @sector: start sector |
91 | * @nr_sects: number of sectors to discard |
92 | * @gfp_mask: memory allocation flags (for bio_alloc) |
93 | * |
94 | * Description: |
95 | * Issue a discard request for the sectors in question. |
96 | */ |
97 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
98 | sector_t nr_sects, gfp_t gfp_mask) |
99 | { |
100 | struct bio *bio = NULL; |
101 | struct blk_plug plug; |
102 | int ret; |
103 | |
104 | blk_start_plug(&plug); |
105 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); |
106 | if (!ret && bio) { |
107 | ret = submit_bio_wait(bio); |
108 | if (ret == -EOPNOTSUPP) |
109 | ret = 0; |
110 | bio_put(bio); |
111 | } |
112 | blk_finish_plug(&plug); |
113 | |
114 | return ret; |
115 | } |
116 | EXPORT_SYMBOL(blkdev_issue_discard); |
117 | |
118 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
119 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
120 | struct bio **biop, unsigned flags) |
121 | { |
122 | struct bio *bio = *biop; |
123 | unsigned int max_write_zeroes_sectors; |
124 | |
125 | if (bdev_read_only(bdev)) |
126 | return -EPERM; |
127 | |
128 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
129 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); |
130 | |
131 | if (max_write_zeroes_sectors == 0) |
132 | return -EOPNOTSUPP; |
133 | |
134 | while (nr_sects) { |
135 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_WRITE_ZEROES, gfp: gfp_mask); |
136 | bio->bi_iter.bi_sector = sector; |
137 | if (flags & BLKDEV_ZERO_NOUNMAP) |
138 | bio->bi_opf |= REQ_NOUNMAP; |
139 | |
140 | if (nr_sects > max_write_zeroes_sectors) { |
141 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; |
142 | nr_sects -= max_write_zeroes_sectors; |
143 | sector += max_write_zeroes_sectors; |
144 | } else { |
145 | bio->bi_iter.bi_size = nr_sects << 9; |
146 | nr_sects = 0; |
147 | } |
148 | cond_resched(); |
149 | } |
150 | |
151 | *biop = bio; |
152 | return 0; |
153 | } |
154 | |
155 | /* |
156 | * Convert a number of 512B sectors to a number of pages. |
157 | * The result is limited to a number of pages that can fit into a BIO. |
158 | * Also make sure that the result is always at least 1 (page) for the cases |
159 | * where nr_sects is lower than the number of sectors in a page. |
160 | */ |
161 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) |
162 | { |
163 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
164 | |
165 | return min(pages, (sector_t)BIO_MAX_VECS); |
166 | } |
167 | |
168 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
169 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
170 | struct bio **biop) |
171 | { |
172 | struct bio *bio = *biop; |
173 | int bi_size = 0; |
174 | unsigned int sz; |
175 | |
176 | if (bdev_read_only(bdev)) |
177 | return -EPERM; |
178 | |
179 | while (nr_sects != 0) { |
180 | bio = blk_next_bio(bio, bdev, nr_pages: __blkdev_sectors_to_bio_pages(nr_sects), |
181 | opf: REQ_OP_WRITE, gfp: gfp_mask); |
182 | bio->bi_iter.bi_sector = sector; |
183 | |
184 | while (nr_sects != 0) { |
185 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
186 | bi_size = bio_add_page(bio, ZERO_PAGE(0), len: sz, off: 0); |
187 | nr_sects -= bi_size >> 9; |
188 | sector += bi_size >> 9; |
189 | if (bi_size < sz) |
190 | break; |
191 | } |
192 | cond_resched(); |
193 | } |
194 | |
195 | *biop = bio; |
196 | return 0; |
197 | } |
198 | |
199 | /** |
200 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
201 | * @bdev: blockdev to issue |
202 | * @sector: start sector |
203 | * @nr_sects: number of sectors to write |
204 | * @gfp_mask: memory allocation flags (for bio_alloc) |
205 | * @biop: pointer to anchor bio |
206 | * @flags: controls detailed behavior |
207 | * |
208 | * Description: |
209 | * Zero-fill a block range, either using hardware offload or by explicitly |
210 | * writing zeroes to the device. |
211 | * |
212 | * If a device is using logical block provisioning, the underlying space will |
213 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
214 | * |
215 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return |
216 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
217 | */ |
218 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
219 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
220 | unsigned flags) |
221 | { |
222 | int ret; |
223 | sector_t bs_mask; |
224 | |
225 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
226 | if ((sector | nr_sects) & bs_mask) |
227 | return -EINVAL; |
228 | |
229 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
230 | biop, flags); |
231 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
232 | return ret; |
233 | |
234 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
235 | biop); |
236 | } |
237 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
238 | |
239 | /** |
240 | * blkdev_issue_zeroout - zero-fill a block range |
241 | * @bdev: blockdev to write |
242 | * @sector: start sector |
243 | * @nr_sects: number of sectors to write |
244 | * @gfp_mask: memory allocation flags (for bio_alloc) |
245 | * @flags: controls detailed behavior |
246 | * |
247 | * Description: |
248 | * Zero-fill a block range, either using hardware offload or by explicitly |
249 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the |
250 | * valid values for %flags. |
251 | */ |
252 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
253 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
254 | { |
255 | int ret = 0; |
256 | sector_t bs_mask; |
257 | struct bio *bio; |
258 | struct blk_plug plug; |
259 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
260 | |
261 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
262 | if ((sector | nr_sects) & bs_mask) |
263 | return -EINVAL; |
264 | |
265 | retry: |
266 | bio = NULL; |
267 | blk_start_plug(&plug); |
268 | if (try_write_zeroes) { |
269 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, |
270 | gfp_mask, biop: &bio, flags); |
271 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
272 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, |
273 | gfp_mask, biop: &bio); |
274 | } else { |
275 | /* No zeroing offload support */ |
276 | ret = -EOPNOTSUPP; |
277 | } |
278 | if (ret == 0 && bio) { |
279 | ret = submit_bio_wait(bio); |
280 | bio_put(bio); |
281 | } |
282 | blk_finish_plug(&plug); |
283 | if (ret && try_write_zeroes) { |
284 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
285 | try_write_zeroes = false; |
286 | goto retry; |
287 | } |
288 | if (!bdev_write_zeroes_sectors(bdev)) { |
289 | /* |
290 | * Zeroing offload support was indicated, but the |
291 | * device reported ILLEGAL REQUEST (for some devices |
292 | * there is no non-destructive way to verify whether |
293 | * WRITE ZEROES is actually supported). |
294 | */ |
295 | ret = -EOPNOTSUPP; |
296 | } |
297 | } |
298 | |
299 | return ret; |
300 | } |
301 | EXPORT_SYMBOL(blkdev_issue_zeroout); |
302 | |
303 | int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, |
304 | sector_t nr_sects, gfp_t gfp) |
305 | { |
306 | sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
307 | unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); |
308 | struct bio *bio = NULL; |
309 | struct blk_plug plug; |
310 | int ret = 0; |
311 | |
312 | /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ |
313 | if (max_sectors > UINT_MAX >> SECTOR_SHIFT) |
314 | max_sectors = UINT_MAX >> SECTOR_SHIFT; |
315 | max_sectors &= ~bs_mask; |
316 | |
317 | if (max_sectors == 0) |
318 | return -EOPNOTSUPP; |
319 | if ((sector | nr_sects) & bs_mask) |
320 | return -EINVAL; |
321 | if (bdev_read_only(bdev)) |
322 | return -EPERM; |
323 | |
324 | blk_start_plug(&plug); |
325 | for (;;) { |
326 | unsigned int len = min_t(sector_t, nr_sects, max_sectors); |
327 | |
328 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_SECURE_ERASE, gfp); |
329 | bio->bi_iter.bi_sector = sector; |
330 | bio->bi_iter.bi_size = len << SECTOR_SHIFT; |
331 | |
332 | sector += len; |
333 | nr_sects -= len; |
334 | if (!nr_sects) { |
335 | ret = submit_bio_wait(bio); |
336 | bio_put(bio); |
337 | break; |
338 | } |
339 | cond_resched(); |
340 | } |
341 | blk_finish_plug(&plug); |
342 | |
343 | return ret; |
344 | } |
345 | EXPORT_SYMBOL(blkdev_issue_secure_erase); |
346 | |