1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Functions related to generic helpers functions |
4 | */ |
5 | #include <linux/kernel.h> |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/scatterlist.h> |
10 | |
11 | #include "blk.h" |
12 | |
13 | static sector_t bio_discard_limit(struct block_device *bdev, sector_t sector) |
14 | { |
15 | unsigned int discard_granularity = bdev_discard_granularity(bdev); |
16 | sector_t granularity_aligned_sector; |
17 | |
18 | if (bdev_is_partition(bdev)) |
19 | sector += bdev->bd_start_sect; |
20 | |
21 | granularity_aligned_sector = |
22 | round_up(sector, discard_granularity >> SECTOR_SHIFT); |
23 | |
24 | /* |
25 | * Make sure subsequent bios start aligned to the discard granularity if |
26 | * it needs to be split. |
27 | */ |
28 | if (granularity_aligned_sector != sector) |
29 | return granularity_aligned_sector - sector; |
30 | |
31 | /* |
32 | * Align the bio size to the discard granularity to make splitting the bio |
33 | * at discard granularity boundaries easier in the driver if needed. |
34 | */ |
35 | return round_down(UINT_MAX, discard_granularity) >> SECTOR_SHIFT; |
36 | } |
37 | |
38 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
39 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop) |
40 | { |
41 | struct bio *bio = *biop; |
42 | sector_t bs_mask; |
43 | |
44 | if (bdev_read_only(bdev)) |
45 | return -EPERM; |
46 | if (!bdev_max_discard_sectors(bdev)) |
47 | return -EOPNOTSUPP; |
48 | |
49 | /* In case the discard granularity isn't set by buggy device driver */ |
50 | if (WARN_ON_ONCE(!bdev_discard_granularity(bdev))) { |
51 | pr_err_ratelimited("%pg: Error: discard_granularity is 0.\n" , |
52 | bdev); |
53 | return -EOPNOTSUPP; |
54 | } |
55 | |
56 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
57 | if ((sector | nr_sects) & bs_mask) |
58 | return -EINVAL; |
59 | |
60 | if (!nr_sects) |
61 | return -EINVAL; |
62 | |
63 | while (nr_sects) { |
64 | sector_t req_sects = |
65 | min(nr_sects, bio_discard_limit(bdev, sector)); |
66 | |
67 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_DISCARD, gfp: gfp_mask); |
68 | bio->bi_iter.bi_sector = sector; |
69 | bio->bi_iter.bi_size = req_sects << 9; |
70 | sector += req_sects; |
71 | nr_sects -= req_sects; |
72 | |
73 | /* |
74 | * We can loop for a long time in here, if someone does |
75 | * full device discards (like mkfs). Be nice and allow |
76 | * us to schedule out to avoid softlocking if preempt |
77 | * is disabled. |
78 | */ |
79 | cond_resched(); |
80 | } |
81 | |
82 | *biop = bio; |
83 | return 0; |
84 | } |
85 | EXPORT_SYMBOL(__blkdev_issue_discard); |
86 | |
87 | /** |
88 | * blkdev_issue_discard - queue a discard |
89 | * @bdev: blockdev to issue discard for |
90 | * @sector: start sector |
91 | * @nr_sects: number of sectors to discard |
92 | * @gfp_mask: memory allocation flags (for bio_alloc) |
93 | * |
94 | * Description: |
95 | * Issue a discard request for the sectors in question. |
96 | */ |
97 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
98 | sector_t nr_sects, gfp_t gfp_mask) |
99 | { |
100 | struct bio *bio = NULL; |
101 | struct blk_plug plug; |
102 | int ret; |
103 | |
104 | blk_start_plug(&plug); |
105 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, &bio); |
106 | if (!ret && bio) { |
107 | ret = submit_bio_wait(bio); |
108 | if (ret == -EOPNOTSUPP) |
109 | ret = 0; |
110 | bio_put(bio); |
111 | } |
112 | blk_finish_plug(&plug); |
113 | |
114 | return ret; |
115 | } |
116 | EXPORT_SYMBOL(blkdev_issue_discard); |
117 | |
118 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
119 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
120 | struct bio **biop, unsigned flags) |
121 | { |
122 | struct bio *bio = *biop; |
123 | unsigned int max_sectors; |
124 | |
125 | if (bdev_read_only(bdev)) |
126 | return -EPERM; |
127 | |
128 | /* Ensure that max_sectors doesn't overflow bi_size */ |
129 | max_sectors = bdev_write_zeroes_sectors(bdev); |
130 | |
131 | if (max_sectors == 0) |
132 | return -EOPNOTSUPP; |
133 | |
134 | while (nr_sects) { |
135 | unsigned int len = min_t(sector_t, nr_sects, max_sectors); |
136 | |
137 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_WRITE_ZEROES, gfp: gfp_mask); |
138 | bio->bi_iter.bi_sector = sector; |
139 | if (flags & BLKDEV_ZERO_NOUNMAP) |
140 | bio->bi_opf |= REQ_NOUNMAP; |
141 | |
142 | bio->bi_iter.bi_size = len << SECTOR_SHIFT; |
143 | nr_sects -= len; |
144 | sector += len; |
145 | cond_resched(); |
146 | } |
147 | |
148 | *biop = bio; |
149 | return 0; |
150 | } |
151 | |
152 | /* |
153 | * Convert a number of 512B sectors to a number of pages. |
154 | * The result is limited to a number of pages that can fit into a BIO. |
155 | * Also make sure that the result is always at least 1 (page) for the cases |
156 | * where nr_sects is lower than the number of sectors in a page. |
157 | */ |
158 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) |
159 | { |
160 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
161 | |
162 | return min(pages, (sector_t)BIO_MAX_VECS); |
163 | } |
164 | |
165 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
166 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, |
167 | struct bio **biop) |
168 | { |
169 | struct bio *bio = *biop; |
170 | int bi_size = 0; |
171 | unsigned int sz; |
172 | |
173 | if (bdev_read_only(bdev)) |
174 | return -EPERM; |
175 | |
176 | while (nr_sects != 0) { |
177 | bio = blk_next_bio(bio, bdev, nr_pages: __blkdev_sectors_to_bio_pages(nr_sects), |
178 | opf: REQ_OP_WRITE, gfp: gfp_mask); |
179 | bio->bi_iter.bi_sector = sector; |
180 | |
181 | while (nr_sects != 0) { |
182 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
183 | bi_size = bio_add_page(bio, ZERO_PAGE(0), len: sz, off: 0); |
184 | nr_sects -= bi_size >> 9; |
185 | sector += bi_size >> 9; |
186 | if (bi_size < sz) |
187 | break; |
188 | } |
189 | cond_resched(); |
190 | } |
191 | |
192 | *biop = bio; |
193 | return 0; |
194 | } |
195 | |
196 | /** |
197 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
198 | * @bdev: blockdev to issue |
199 | * @sector: start sector |
200 | * @nr_sects: number of sectors to write |
201 | * @gfp_mask: memory allocation flags (for bio_alloc) |
202 | * @biop: pointer to anchor bio |
203 | * @flags: controls detailed behavior |
204 | * |
205 | * Description: |
206 | * Zero-fill a block range, either using hardware offload or by explicitly |
207 | * writing zeroes to the device. |
208 | * |
209 | * If a device is using logical block provisioning, the underlying space will |
210 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. |
211 | * |
212 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return |
213 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. |
214 | */ |
215 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
216 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, |
217 | unsigned flags) |
218 | { |
219 | int ret; |
220 | sector_t bs_mask; |
221 | |
222 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
223 | if ((sector | nr_sects) & bs_mask) |
224 | return -EINVAL; |
225 | |
226 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
227 | biop, flags); |
228 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
229 | return ret; |
230 | |
231 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
232 | biop); |
233 | } |
234 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
235 | |
236 | /** |
237 | * blkdev_issue_zeroout - zero-fill a block range |
238 | * @bdev: blockdev to write |
239 | * @sector: start sector |
240 | * @nr_sects: number of sectors to write |
241 | * @gfp_mask: memory allocation flags (for bio_alloc) |
242 | * @flags: controls detailed behavior |
243 | * |
244 | * Description: |
245 | * Zero-fill a block range, either using hardware offload or by explicitly |
246 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the |
247 | * valid values for %flags. |
248 | */ |
249 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
250 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
251 | { |
252 | int ret = 0; |
253 | sector_t bs_mask; |
254 | struct bio *bio; |
255 | struct blk_plug plug; |
256 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
257 | |
258 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
259 | if ((sector | nr_sects) & bs_mask) |
260 | return -EINVAL; |
261 | |
262 | retry: |
263 | bio = NULL; |
264 | blk_start_plug(&plug); |
265 | if (try_write_zeroes) { |
266 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, |
267 | gfp_mask, biop: &bio, flags); |
268 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
269 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, |
270 | gfp_mask, biop: &bio); |
271 | } else { |
272 | /* No zeroing offload support */ |
273 | ret = -EOPNOTSUPP; |
274 | } |
275 | if (ret == 0 && bio) { |
276 | ret = submit_bio_wait(bio); |
277 | bio_put(bio); |
278 | } |
279 | blk_finish_plug(&plug); |
280 | if (ret && try_write_zeroes) { |
281 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { |
282 | try_write_zeroes = false; |
283 | goto retry; |
284 | } |
285 | if (!bdev_write_zeroes_sectors(bdev)) { |
286 | /* |
287 | * Zeroing offload support was indicated, but the |
288 | * device reported ILLEGAL REQUEST (for some devices |
289 | * there is no non-destructive way to verify whether |
290 | * WRITE ZEROES is actually supported). |
291 | */ |
292 | ret = -EOPNOTSUPP; |
293 | } |
294 | } |
295 | |
296 | return ret; |
297 | } |
298 | EXPORT_SYMBOL(blkdev_issue_zeroout); |
299 | |
300 | int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector, |
301 | sector_t nr_sects, gfp_t gfp) |
302 | { |
303 | sector_t bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
304 | unsigned int max_sectors = bdev_max_secure_erase_sectors(bdev); |
305 | struct bio *bio = NULL; |
306 | struct blk_plug plug; |
307 | int ret = 0; |
308 | |
309 | /* make sure that "len << SECTOR_SHIFT" doesn't overflow */ |
310 | if (max_sectors > UINT_MAX >> SECTOR_SHIFT) |
311 | max_sectors = UINT_MAX >> SECTOR_SHIFT; |
312 | max_sectors &= ~bs_mask; |
313 | |
314 | if (max_sectors == 0) |
315 | return -EOPNOTSUPP; |
316 | if ((sector | nr_sects) & bs_mask) |
317 | return -EINVAL; |
318 | if (bdev_read_only(bdev)) |
319 | return -EPERM; |
320 | |
321 | blk_start_plug(&plug); |
322 | while (nr_sects) { |
323 | unsigned int len = min_t(sector_t, nr_sects, max_sectors); |
324 | |
325 | bio = blk_next_bio(bio, bdev, nr_pages: 0, opf: REQ_OP_SECURE_ERASE, gfp); |
326 | bio->bi_iter.bi_sector = sector; |
327 | bio->bi_iter.bi_size = len << SECTOR_SHIFT; |
328 | |
329 | sector += len; |
330 | nr_sects -= len; |
331 | cond_resched(); |
332 | } |
333 | if (bio) { |
334 | ret = submit_bio_wait(bio); |
335 | bio_put(bio); |
336 | } |
337 | blk_finish_plug(&plug); |
338 | |
339 | return ret; |
340 | } |
341 | EXPORT_SYMBOL(blkdev_issue_secure_erase); |
342 | |