1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> |
4 | */ |
5 | #include <linux/mm.h> |
6 | #include <linux/swap.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/uio.h> |
10 | #include <linux/iocontext.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/init.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/export.h> |
15 | #include <linux/mempool.h> |
16 | #include <linux/workqueue.h> |
17 | #include <linux/cgroup.h> |
18 | #include <linux/highmem.h> |
19 | #include <linux/blk-crypto.h> |
20 | #include <linux/xarray.h> |
21 | |
22 | #include <trace/events/block.h> |
23 | #include "blk.h" |
24 | #include "blk-rq-qos.h" |
25 | #include "blk-cgroup.h" |
26 | |
27 | #define ALLOC_CACHE_THRESHOLD 16 |
28 | #define ALLOC_CACHE_MAX 256 |
29 | |
30 | struct bio_alloc_cache { |
31 | struct bio *free_list; |
32 | struct bio *free_list_irq; |
33 | unsigned int nr; |
34 | unsigned int nr_irq; |
35 | }; |
36 | |
37 | static struct biovec_slab { |
38 | int nr_vecs; |
39 | char *name; |
40 | struct kmem_cache *slab; |
41 | } bvec_slabs[] __read_mostly = { |
42 | { .nr_vecs = 16, .name = "biovec-16" }, |
43 | { .nr_vecs = 64, .name = "biovec-64" }, |
44 | { .nr_vecs = 128, .name = "biovec-128" }, |
45 | { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, |
46 | }; |
47 | |
48 | static struct biovec_slab *biovec_slab(unsigned short nr_vecs) |
49 | { |
50 | switch (nr_vecs) { |
51 | /* smaller bios use inline vecs */ |
52 | case 5 ... 16: |
53 | return &bvec_slabs[0]; |
54 | case 17 ... 64: |
55 | return &bvec_slabs[1]; |
56 | case 65 ... 128: |
57 | return &bvec_slabs[2]; |
58 | case 129 ... BIO_MAX_VECS: |
59 | return &bvec_slabs[3]; |
60 | default: |
61 | BUG(); |
62 | return NULL; |
63 | } |
64 | } |
65 | |
66 | /* |
67 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by |
68 | * IO code that does not need private memory pools. |
69 | */ |
70 | struct bio_set fs_bio_set; |
71 | EXPORT_SYMBOL(fs_bio_set); |
72 | |
73 | /* |
74 | * Our slab pool management |
75 | */ |
76 | struct bio_slab { |
77 | struct kmem_cache *slab; |
78 | unsigned int slab_ref; |
79 | unsigned int slab_size; |
80 | char name[8]; |
81 | }; |
82 | static DEFINE_MUTEX(bio_slab_lock); |
83 | static DEFINE_XARRAY(bio_slabs); |
84 | |
85 | static struct bio_slab *create_bio_slab(unsigned int size) |
86 | { |
87 | struct bio_slab *bslab = kzalloc(size: sizeof(*bslab), GFP_KERNEL); |
88 | |
89 | if (!bslab) |
90 | return NULL; |
91 | |
92 | snprintf(buf: bslab->name, size: sizeof(bslab->name), fmt: "bio-%d" , size); |
93 | bslab->slab = kmem_cache_create(name: bslab->name, size, |
94 | ARCH_KMALLOC_MINALIGN, |
95 | SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); |
96 | if (!bslab->slab) |
97 | goto fail_alloc_slab; |
98 | |
99 | bslab->slab_ref = 1; |
100 | bslab->slab_size = size; |
101 | |
102 | if (!xa_err(entry: xa_store(&bio_slabs, index: size, entry: bslab, GFP_KERNEL))) |
103 | return bslab; |
104 | |
105 | kmem_cache_destroy(s: bslab->slab); |
106 | |
107 | fail_alloc_slab: |
108 | kfree(objp: bslab); |
109 | return NULL; |
110 | } |
111 | |
112 | static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
113 | { |
114 | return bs->front_pad + sizeof(struct bio) + bs->back_pad; |
115 | } |
116 | |
117 | static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
118 | { |
119 | unsigned int size = bs_bio_slab_size(bs); |
120 | struct bio_slab *bslab; |
121 | |
122 | mutex_lock(&bio_slab_lock); |
123 | bslab = xa_load(&bio_slabs, index: size); |
124 | if (bslab) |
125 | bslab->slab_ref++; |
126 | else |
127 | bslab = create_bio_slab(size); |
128 | mutex_unlock(lock: &bio_slab_lock); |
129 | |
130 | if (bslab) |
131 | return bslab->slab; |
132 | return NULL; |
133 | } |
134 | |
135 | static void bio_put_slab(struct bio_set *bs) |
136 | { |
137 | struct bio_slab *bslab = NULL; |
138 | unsigned int slab_size = bs_bio_slab_size(bs); |
139 | |
140 | mutex_lock(&bio_slab_lock); |
141 | |
142 | bslab = xa_load(&bio_slabs, index: slab_size); |
143 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n" )) |
144 | goto out; |
145 | |
146 | WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
147 | |
148 | WARN_ON(!bslab->slab_ref); |
149 | |
150 | if (--bslab->slab_ref) |
151 | goto out; |
152 | |
153 | xa_erase(&bio_slabs, index: slab_size); |
154 | |
155 | kmem_cache_destroy(s: bslab->slab); |
156 | kfree(objp: bslab); |
157 | |
158 | out: |
159 | mutex_unlock(lock: &bio_slab_lock); |
160 | } |
161 | |
162 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) |
163 | { |
164 | BUG_ON(nr_vecs > BIO_MAX_VECS); |
165 | |
166 | if (nr_vecs == BIO_MAX_VECS) |
167 | mempool_free(element: bv, pool); |
168 | else if (nr_vecs > BIO_INLINE_VECS) |
169 | kmem_cache_free(s: biovec_slab(nr_vecs)->slab, objp: bv); |
170 | } |
171 | |
172 | /* |
173 | * Make the first allocation restricted and don't dump info on allocation |
174 | * failures, since we'll fall back to the mempool in case of failure. |
175 | */ |
176 | static inline gfp_t bvec_alloc_gfp(gfp_t gfp) |
177 | { |
178 | return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | |
179 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; |
180 | } |
181 | |
182 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
183 | gfp_t gfp_mask) |
184 | { |
185 | struct biovec_slab *bvs = biovec_slab(nr_vecs: *nr_vecs); |
186 | |
187 | if (WARN_ON_ONCE(!bvs)) |
188 | return NULL; |
189 | |
190 | /* |
191 | * Upgrade the nr_vecs request to take full advantage of the allocation. |
192 | * We also rely on this in the bvec_free path. |
193 | */ |
194 | *nr_vecs = bvs->nr_vecs; |
195 | |
196 | /* |
197 | * Try a slab allocation first for all smaller allocations. If that |
198 | * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. |
199 | * The mempool is sized to handle up to BIO_MAX_VECS entries. |
200 | */ |
201 | if (*nr_vecs < BIO_MAX_VECS) { |
202 | struct bio_vec *bvl; |
203 | |
204 | bvl = kmem_cache_alloc(cachep: bvs->slab, flags: bvec_alloc_gfp(gfp: gfp_mask)); |
205 | if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
206 | return bvl; |
207 | *nr_vecs = BIO_MAX_VECS; |
208 | } |
209 | |
210 | return mempool_alloc(pool, gfp_mask); |
211 | } |
212 | |
213 | void bio_uninit(struct bio *bio) |
214 | { |
215 | #ifdef CONFIG_BLK_CGROUP |
216 | if (bio->bi_blkg) { |
217 | blkg_put(blkg: bio->bi_blkg); |
218 | bio->bi_blkg = NULL; |
219 | } |
220 | #endif |
221 | if (bio_integrity(bio)) |
222 | bio_integrity_free(bio); |
223 | |
224 | bio_crypt_free_ctx(bio); |
225 | } |
226 | EXPORT_SYMBOL(bio_uninit); |
227 | |
228 | static void bio_free(struct bio *bio) |
229 | { |
230 | struct bio_set *bs = bio->bi_pool; |
231 | void *p = bio; |
232 | |
233 | WARN_ON_ONCE(!bs); |
234 | |
235 | bio_uninit(bio); |
236 | bvec_free(pool: &bs->bvec_pool, bv: bio->bi_io_vec, nr_vecs: bio->bi_max_vecs); |
237 | mempool_free(element: p - bs->front_pad, pool: &bs->bio_pool); |
238 | } |
239 | |
240 | /* |
241 | * Users of this function have their own bio allocation. Subsequently, |
242 | * they must remember to pair any call to bio_init() with bio_uninit() |
243 | * when IO has completed, or when the bio is released. |
244 | */ |
245 | void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, |
246 | unsigned short max_vecs, blk_opf_t opf) |
247 | { |
248 | bio->bi_next = NULL; |
249 | bio->bi_bdev = bdev; |
250 | bio->bi_opf = opf; |
251 | bio->bi_flags = 0; |
252 | bio->bi_ioprio = 0; |
253 | bio->bi_write_hint = 0; |
254 | bio->bi_status = 0; |
255 | bio->bi_iter.bi_sector = 0; |
256 | bio->bi_iter.bi_size = 0; |
257 | bio->bi_iter.bi_idx = 0; |
258 | bio->bi_iter.bi_bvec_done = 0; |
259 | bio->bi_end_io = NULL; |
260 | bio->bi_private = NULL; |
261 | #ifdef CONFIG_BLK_CGROUP |
262 | bio->bi_blkg = NULL; |
263 | bio->bi_issue.value = 0; |
264 | if (bdev) |
265 | bio_associate_blkg(bio); |
266 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
267 | bio->bi_iocost_cost = 0; |
268 | #endif |
269 | #endif |
270 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION |
271 | bio->bi_crypt_context = NULL; |
272 | #endif |
273 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
274 | bio->bi_integrity = NULL; |
275 | #endif |
276 | bio->bi_vcnt = 0; |
277 | |
278 | atomic_set(v: &bio->__bi_remaining, i: 1); |
279 | atomic_set(v: &bio->__bi_cnt, i: 1); |
280 | bio->bi_cookie = BLK_QC_T_NONE; |
281 | |
282 | bio->bi_max_vecs = max_vecs; |
283 | bio->bi_io_vec = table; |
284 | bio->bi_pool = NULL; |
285 | } |
286 | EXPORT_SYMBOL(bio_init); |
287 | |
288 | /** |
289 | * bio_reset - reinitialize a bio |
290 | * @bio: bio to reset |
291 | * @bdev: block device to use the bio for |
292 | * @opf: operation and flags for bio |
293 | * |
294 | * Description: |
295 | * After calling bio_reset(), @bio will be in the same state as a freshly |
296 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are |
297 | * preserved are the ones that are initialized by bio_alloc_bioset(). See |
298 | * comment in struct bio. |
299 | */ |
300 | void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) |
301 | { |
302 | bio_uninit(bio); |
303 | memset(bio, 0, BIO_RESET_BYTES); |
304 | atomic_set(v: &bio->__bi_remaining, i: 1); |
305 | bio->bi_bdev = bdev; |
306 | if (bio->bi_bdev) |
307 | bio_associate_blkg(bio); |
308 | bio->bi_opf = opf; |
309 | } |
310 | EXPORT_SYMBOL(bio_reset); |
311 | |
312 | static struct bio *__bio_chain_endio(struct bio *bio) |
313 | { |
314 | struct bio *parent = bio->bi_private; |
315 | |
316 | if (bio->bi_status && !parent->bi_status) |
317 | parent->bi_status = bio->bi_status; |
318 | bio_put(bio); |
319 | return parent; |
320 | } |
321 | |
322 | static void bio_chain_endio(struct bio *bio) |
323 | { |
324 | bio_endio(__bio_chain_endio(bio)); |
325 | } |
326 | |
327 | /** |
328 | * bio_chain - chain bio completions |
329 | * @bio: the target bio |
330 | * @parent: the parent bio of @bio |
331 | * |
332 | * The caller won't have a bi_end_io called when @bio completes - instead, |
333 | * @parent's bi_end_io won't be called until both @parent and @bio have |
334 | * completed; the chained bio will also be freed when it completes. |
335 | * |
336 | * The caller must not set bi_private or bi_end_io in @bio. |
337 | */ |
338 | void bio_chain(struct bio *bio, struct bio *parent) |
339 | { |
340 | BUG_ON(bio->bi_private || bio->bi_end_io); |
341 | |
342 | bio->bi_private = parent; |
343 | bio->bi_end_io = bio_chain_endio; |
344 | bio_inc_remaining(bio: parent); |
345 | } |
346 | EXPORT_SYMBOL(bio_chain); |
347 | |
348 | struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |
349 | unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) |
350 | { |
351 | struct bio *new = bio_alloc(bdev, nr_vecs: nr_pages, opf, gfp_mask: gfp); |
352 | |
353 | if (bio) { |
354 | bio_chain(bio, new); |
355 | submit_bio(bio); |
356 | } |
357 | |
358 | return new; |
359 | } |
360 | EXPORT_SYMBOL_GPL(blk_next_bio); |
361 | |
362 | static void bio_alloc_rescue(struct work_struct *work) |
363 | { |
364 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); |
365 | struct bio *bio; |
366 | |
367 | while (1) { |
368 | spin_lock(lock: &bs->rescue_lock); |
369 | bio = bio_list_pop(bl: &bs->rescue_list); |
370 | spin_unlock(lock: &bs->rescue_lock); |
371 | |
372 | if (!bio) |
373 | break; |
374 | |
375 | submit_bio_noacct(bio); |
376 | } |
377 | } |
378 | |
379 | static void punt_bios_to_rescuer(struct bio_set *bs) |
380 | { |
381 | struct bio_list punt, nopunt; |
382 | struct bio *bio; |
383 | |
384 | if (WARN_ON_ONCE(!bs->rescue_workqueue)) |
385 | return; |
386 | /* |
387 | * In order to guarantee forward progress we must punt only bios that |
388 | * were allocated from this bio_set; otherwise, if there was a bio on |
389 | * there for a stacking driver higher up in the stack, processing it |
390 | * could require allocating bios from this bio_set, and doing that from |
391 | * our own rescuer would be bad. |
392 | * |
393 | * Since bio lists are singly linked, pop them all instead of trying to |
394 | * remove from the middle of the list: |
395 | */ |
396 | |
397 | bio_list_init(bl: &punt); |
398 | bio_list_init(bl: &nopunt); |
399 | |
400 | while ((bio = bio_list_pop(bl: ¤t->bio_list[0]))) |
401 | bio_list_add(bl: bio->bi_pool == bs ? &punt : &nopunt, bio); |
402 | current->bio_list[0] = nopunt; |
403 | |
404 | bio_list_init(bl: &nopunt); |
405 | while ((bio = bio_list_pop(bl: ¤t->bio_list[1]))) |
406 | bio_list_add(bl: bio->bi_pool == bs ? &punt : &nopunt, bio); |
407 | current->bio_list[1] = nopunt; |
408 | |
409 | spin_lock(lock: &bs->rescue_lock); |
410 | bio_list_merge(bl: &bs->rescue_list, bl2: &punt); |
411 | spin_unlock(lock: &bs->rescue_lock); |
412 | |
413 | queue_work(wq: bs->rescue_workqueue, work: &bs->rescue_work); |
414 | } |
415 | |
416 | static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) |
417 | { |
418 | unsigned long flags; |
419 | |
420 | /* cache->free_list must be empty */ |
421 | if (WARN_ON_ONCE(cache->free_list)) |
422 | return; |
423 | |
424 | local_irq_save(flags); |
425 | cache->free_list = cache->free_list_irq; |
426 | cache->free_list_irq = NULL; |
427 | cache->nr += cache->nr_irq; |
428 | cache->nr_irq = 0; |
429 | local_irq_restore(flags); |
430 | } |
431 | |
432 | static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, |
433 | unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, |
434 | struct bio_set *bs) |
435 | { |
436 | struct bio_alloc_cache *cache; |
437 | struct bio *bio; |
438 | |
439 | cache = per_cpu_ptr(bs->cache, get_cpu()); |
440 | if (!cache->free_list) { |
441 | if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) |
442 | bio_alloc_irq_cache_splice(cache); |
443 | if (!cache->free_list) { |
444 | put_cpu(); |
445 | return NULL; |
446 | } |
447 | } |
448 | bio = cache->free_list; |
449 | cache->free_list = bio->bi_next; |
450 | cache->nr--; |
451 | put_cpu(); |
452 | |
453 | bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); |
454 | bio->bi_pool = bs; |
455 | return bio; |
456 | } |
457 | |
458 | /** |
459 | * bio_alloc_bioset - allocate a bio for I/O |
460 | * @bdev: block device to allocate the bio for (can be %NULL) |
461 | * @nr_vecs: number of bvecs to pre-allocate |
462 | * @opf: operation and flags for bio |
463 | * @gfp_mask: the GFP_* mask given to the slab allocator |
464 | * @bs: the bio_set to allocate from. |
465 | * |
466 | * Allocate a bio from the mempools in @bs. |
467 | * |
468 | * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to |
469 | * allocate a bio. This is due to the mempool guarantees. To make this work, |
470 | * callers must never allocate more than 1 bio at a time from the general pool. |
471 | * Callers that need to allocate more than 1 bio must always submit the |
472 | * previously allocated bio for IO before attempting to allocate a new one. |
473 | * Failure to do so can cause deadlocks under memory pressure. |
474 | * |
475 | * Note that when running under submit_bio_noacct() (i.e. any block driver), |
476 | * bios are not submitted until after you return - see the code in |
477 | * submit_bio_noacct() that converts recursion into iteration, to prevent |
478 | * stack overflows. |
479 | * |
480 | * This would normally mean allocating multiple bios under submit_bio_noacct() |
481 | * would be susceptible to deadlocks, but we have |
482 | * deadlock avoidance code that resubmits any blocked bios from a rescuer |
483 | * thread. |
484 | * |
485 | * However, we do not guarantee forward progress for allocations from other |
486 | * mempools. Doing multiple allocations from the same mempool under |
487 | * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad |
488 | * for per bio allocations. |
489 | * |
490 | * Returns: Pointer to new bio on success, NULL on failure. |
491 | */ |
492 | struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, |
493 | blk_opf_t opf, gfp_t gfp_mask, |
494 | struct bio_set *bs) |
495 | { |
496 | gfp_t saved_gfp = gfp_mask; |
497 | struct bio *bio; |
498 | void *p; |
499 | |
500 | /* should not use nobvec bioset for nr_vecs > 0 */ |
501 | if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) |
502 | return NULL; |
503 | |
504 | if (opf & REQ_ALLOC_CACHE) { |
505 | if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { |
506 | bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, |
507 | gfp: gfp_mask, bs); |
508 | if (bio) |
509 | return bio; |
510 | /* |
511 | * No cached bio available, bio returned below marked with |
512 | * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. |
513 | */ |
514 | } else { |
515 | opf &= ~REQ_ALLOC_CACHE; |
516 | } |
517 | } |
518 | |
519 | /* |
520 | * submit_bio_noacct() converts recursion to iteration; this means if |
521 | * we're running beneath it, any bios we allocate and submit will not be |
522 | * submitted (and thus freed) until after we return. |
523 | * |
524 | * This exposes us to a potential deadlock if we allocate multiple bios |
525 | * from the same bio_set() while running underneath submit_bio_noacct(). |
526 | * If we were to allocate multiple bios (say a stacking block driver |
527 | * that was splitting bios), we would deadlock if we exhausted the |
528 | * mempool's reserve. |
529 | * |
530 | * We solve this, and guarantee forward progress, with a rescuer |
531 | * workqueue per bio_set. If we go to allocate and there are bios on |
532 | * current->bio_list, we first try the allocation without |
533 | * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be |
534 | * blocking to the rescuer workqueue before we retry with the original |
535 | * gfp_flags. |
536 | */ |
537 | if (current->bio_list && |
538 | (!bio_list_empty(bl: ¤t->bio_list[0]) || |
539 | !bio_list_empty(bl: ¤t->bio_list[1])) && |
540 | bs->rescue_workqueue) |
541 | gfp_mask &= ~__GFP_DIRECT_RECLAIM; |
542 | |
543 | p = mempool_alloc(pool: &bs->bio_pool, gfp_mask); |
544 | if (!p && gfp_mask != saved_gfp) { |
545 | punt_bios_to_rescuer(bs); |
546 | gfp_mask = saved_gfp; |
547 | p = mempool_alloc(pool: &bs->bio_pool, gfp_mask); |
548 | } |
549 | if (unlikely(!p)) |
550 | return NULL; |
551 | if (!mempool_is_saturated(pool: &bs->bio_pool)) |
552 | opf &= ~REQ_ALLOC_CACHE; |
553 | |
554 | bio = p + bs->front_pad; |
555 | if (nr_vecs > BIO_INLINE_VECS) { |
556 | struct bio_vec *bvl = NULL; |
557 | |
558 | bvl = bvec_alloc(pool: &bs->bvec_pool, nr_vecs: &nr_vecs, gfp_mask); |
559 | if (!bvl && gfp_mask != saved_gfp) { |
560 | punt_bios_to_rescuer(bs); |
561 | gfp_mask = saved_gfp; |
562 | bvl = bvec_alloc(pool: &bs->bvec_pool, nr_vecs: &nr_vecs, gfp_mask); |
563 | } |
564 | if (unlikely(!bvl)) |
565 | goto err_free; |
566 | |
567 | bio_init(bio, bdev, bvl, nr_vecs, opf); |
568 | } else if (nr_vecs) { |
569 | bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); |
570 | } else { |
571 | bio_init(bio, bdev, NULL, 0, opf); |
572 | } |
573 | |
574 | bio->bi_pool = bs; |
575 | return bio; |
576 | |
577 | err_free: |
578 | mempool_free(element: p, pool: &bs->bio_pool); |
579 | return NULL; |
580 | } |
581 | EXPORT_SYMBOL(bio_alloc_bioset); |
582 | |
583 | /** |
584 | * bio_kmalloc - kmalloc a bio |
585 | * @nr_vecs: number of bio_vecs to allocate |
586 | * @gfp_mask: the GFP_* mask given to the slab allocator |
587 | * |
588 | * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized |
589 | * using bio_init() before use. To free a bio returned from this function use |
590 | * kfree() after calling bio_uninit(). A bio returned from this function can |
591 | * be reused by calling bio_uninit() before calling bio_init() again. |
592 | * |
593 | * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this |
594 | * function are not backed by a mempool can fail. Do not use this function |
595 | * for allocations in the file system I/O path. |
596 | * |
597 | * Returns: Pointer to new bio on success, NULL on failure. |
598 | */ |
599 | struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) |
600 | { |
601 | struct bio *bio; |
602 | |
603 | if (nr_vecs > UIO_MAXIOV) |
604 | return NULL; |
605 | return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), flags: gfp_mask); |
606 | } |
607 | EXPORT_SYMBOL(bio_kmalloc); |
608 | |
609 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) |
610 | { |
611 | struct bio_vec bv; |
612 | struct bvec_iter iter; |
613 | |
614 | __bio_for_each_segment(bv, bio, iter, start) |
615 | memzero_bvec(bvec: &bv); |
616 | } |
617 | EXPORT_SYMBOL(zero_fill_bio_iter); |
618 | |
619 | /** |
620 | * bio_truncate - truncate the bio to small size of @new_size |
621 | * @bio: the bio to be truncated |
622 | * @new_size: new size for truncating the bio |
623 | * |
624 | * Description: |
625 | * Truncate the bio to new size of @new_size. If bio_op(bio) is |
626 | * REQ_OP_READ, zero the truncated part. This function should only |
627 | * be used for handling corner cases, such as bio eod. |
628 | */ |
629 | static void bio_truncate(struct bio *bio, unsigned new_size) |
630 | { |
631 | struct bio_vec bv; |
632 | struct bvec_iter iter; |
633 | unsigned int done = 0; |
634 | bool truncated = false; |
635 | |
636 | if (new_size >= bio->bi_iter.bi_size) |
637 | return; |
638 | |
639 | if (bio_op(bio) != REQ_OP_READ) |
640 | goto exit; |
641 | |
642 | bio_for_each_segment(bv, bio, iter) { |
643 | if (done + bv.bv_len > new_size) { |
644 | unsigned offset; |
645 | |
646 | if (!truncated) |
647 | offset = new_size - done; |
648 | else |
649 | offset = 0; |
650 | zero_user(page: bv.bv_page, start: bv.bv_offset + offset, |
651 | size: bv.bv_len - offset); |
652 | truncated = true; |
653 | } |
654 | done += bv.bv_len; |
655 | } |
656 | |
657 | exit: |
658 | /* |
659 | * Don't touch bvec table here and make it really immutable, since |
660 | * fs bio user has to retrieve all pages via bio_for_each_segment_all |
661 | * in its .end_bio() callback. |
662 | * |
663 | * It is enough to truncate bio by updating .bi_size since we can make |
664 | * correct bvec with the updated .bi_size for drivers. |
665 | */ |
666 | bio->bi_iter.bi_size = new_size; |
667 | } |
668 | |
669 | /** |
670 | * guard_bio_eod - truncate a BIO to fit the block device |
671 | * @bio: bio to truncate |
672 | * |
673 | * This allows us to do IO even on the odd last sectors of a device, even if the |
674 | * block size is some multiple of the physical sector size. |
675 | * |
676 | * We'll just truncate the bio to the size of the device, and clear the end of |
677 | * the buffer head manually. Truly out-of-range accesses will turn into actual |
678 | * I/O errors, this only handles the "we need to be able to do I/O at the final |
679 | * sector" case. |
680 | */ |
681 | void guard_bio_eod(struct bio *bio) |
682 | { |
683 | sector_t maxsector = bdev_nr_sectors(bdev: bio->bi_bdev); |
684 | |
685 | if (!maxsector) |
686 | return; |
687 | |
688 | /* |
689 | * If the *whole* IO is past the end of the device, |
690 | * let it through, and the IO layer will turn it into |
691 | * an EIO. |
692 | */ |
693 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) |
694 | return; |
695 | |
696 | maxsector -= bio->bi_iter.bi_sector; |
697 | if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) |
698 | return; |
699 | |
700 | bio_truncate(bio, new_size: maxsector << 9); |
701 | } |
702 | |
703 | static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, |
704 | unsigned int nr) |
705 | { |
706 | unsigned int i = 0; |
707 | struct bio *bio; |
708 | |
709 | while ((bio = cache->free_list) != NULL) { |
710 | cache->free_list = bio->bi_next; |
711 | cache->nr--; |
712 | bio_free(bio); |
713 | if (++i == nr) |
714 | break; |
715 | } |
716 | return i; |
717 | } |
718 | |
719 | static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, |
720 | unsigned int nr) |
721 | { |
722 | nr -= __bio_alloc_cache_prune(cache, nr); |
723 | if (!READ_ONCE(cache->free_list)) { |
724 | bio_alloc_irq_cache_splice(cache); |
725 | __bio_alloc_cache_prune(cache, nr); |
726 | } |
727 | } |
728 | |
729 | static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) |
730 | { |
731 | struct bio_set *bs; |
732 | |
733 | bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); |
734 | if (bs->cache) { |
735 | struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); |
736 | |
737 | bio_alloc_cache_prune(cache, nr: -1U); |
738 | } |
739 | return 0; |
740 | } |
741 | |
742 | static void bio_alloc_cache_destroy(struct bio_set *bs) |
743 | { |
744 | int cpu; |
745 | |
746 | if (!bs->cache) |
747 | return; |
748 | |
749 | cpuhp_state_remove_instance_nocalls(state: CPUHP_BIO_DEAD, node: &bs->cpuhp_dead); |
750 | for_each_possible_cpu(cpu) { |
751 | struct bio_alloc_cache *cache; |
752 | |
753 | cache = per_cpu_ptr(bs->cache, cpu); |
754 | bio_alloc_cache_prune(cache, nr: -1U); |
755 | } |
756 | free_percpu(pdata: bs->cache); |
757 | bs->cache = NULL; |
758 | } |
759 | |
760 | static inline void bio_put_percpu_cache(struct bio *bio) |
761 | { |
762 | struct bio_alloc_cache *cache; |
763 | |
764 | cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); |
765 | if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) |
766 | goto out_free; |
767 | |
768 | if (in_task()) { |
769 | bio_uninit(bio); |
770 | bio->bi_next = cache->free_list; |
771 | /* Not necessary but helps not to iopoll already freed bios */ |
772 | bio->bi_bdev = NULL; |
773 | cache->free_list = bio; |
774 | cache->nr++; |
775 | } else if (in_hardirq()) { |
776 | lockdep_assert_irqs_disabled(); |
777 | |
778 | bio_uninit(bio); |
779 | bio->bi_next = cache->free_list_irq; |
780 | cache->free_list_irq = bio; |
781 | cache->nr_irq++; |
782 | } else { |
783 | goto out_free; |
784 | } |
785 | put_cpu(); |
786 | return; |
787 | out_free: |
788 | put_cpu(); |
789 | bio_free(bio); |
790 | } |
791 | |
792 | /** |
793 | * bio_put - release a reference to a bio |
794 | * @bio: bio to release reference to |
795 | * |
796 | * Description: |
797 | * Put a reference to a &struct bio, either one you have gotten with |
798 | * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. |
799 | **/ |
800 | void bio_put(struct bio *bio) |
801 | { |
802 | if (unlikely(bio_flagged(bio, BIO_REFFED))) { |
803 | BUG_ON(!atomic_read(&bio->__bi_cnt)); |
804 | if (!atomic_dec_and_test(v: &bio->__bi_cnt)) |
805 | return; |
806 | } |
807 | if (bio->bi_opf & REQ_ALLOC_CACHE) |
808 | bio_put_percpu_cache(bio); |
809 | else |
810 | bio_free(bio); |
811 | } |
812 | EXPORT_SYMBOL(bio_put); |
813 | |
814 | static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) |
815 | { |
816 | bio_set_flag(bio, bit: BIO_CLONED); |
817 | bio->bi_ioprio = bio_src->bi_ioprio; |
818 | bio->bi_write_hint = bio_src->bi_write_hint; |
819 | bio->bi_iter = bio_src->bi_iter; |
820 | |
821 | if (bio->bi_bdev) { |
822 | if (bio->bi_bdev == bio_src->bi_bdev && |
823 | bio_flagged(bio: bio_src, bit: BIO_REMAPPED)) |
824 | bio_set_flag(bio, bit: BIO_REMAPPED); |
825 | bio_clone_blkg_association(dst: bio, src: bio_src); |
826 | } |
827 | |
828 | if (bio_crypt_clone(dst: bio, src: bio_src, gfp_mask: gfp) < 0) |
829 | return -ENOMEM; |
830 | if (bio_integrity(bio: bio_src) && |
831 | bio_integrity_clone(bio, bio_src, gfp) < 0) |
832 | return -ENOMEM; |
833 | return 0; |
834 | } |
835 | |
836 | /** |
837 | * bio_alloc_clone - clone a bio that shares the original bio's biovec |
838 | * @bdev: block_device to clone onto |
839 | * @bio_src: bio to clone from |
840 | * @gfp: allocation priority |
841 | * @bs: bio_set to allocate from |
842 | * |
843 | * Allocate a new bio that is a clone of @bio_src. The caller owns the returned |
844 | * bio, but not the actual data it points to. |
845 | * |
846 | * The caller must ensure that the return bio is not freed before @bio_src. |
847 | */ |
848 | struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, |
849 | gfp_t gfp, struct bio_set *bs) |
850 | { |
851 | struct bio *bio; |
852 | |
853 | bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); |
854 | if (!bio) |
855 | return NULL; |
856 | |
857 | if (__bio_clone(bio, bio_src, gfp) < 0) { |
858 | bio_put(bio); |
859 | return NULL; |
860 | } |
861 | bio->bi_io_vec = bio_src->bi_io_vec; |
862 | |
863 | return bio; |
864 | } |
865 | EXPORT_SYMBOL(bio_alloc_clone); |
866 | |
867 | /** |
868 | * bio_init_clone - clone a bio that shares the original bio's biovec |
869 | * @bdev: block_device to clone onto |
870 | * @bio: bio to clone into |
871 | * @bio_src: bio to clone from |
872 | * @gfp: allocation priority |
873 | * |
874 | * Initialize a new bio in caller provided memory that is a clone of @bio_src. |
875 | * The caller owns the returned bio, but not the actual data it points to. |
876 | * |
877 | * The caller must ensure that @bio_src is not freed before @bio. |
878 | */ |
879 | int bio_init_clone(struct block_device *bdev, struct bio *bio, |
880 | struct bio *bio_src, gfp_t gfp) |
881 | { |
882 | int ret; |
883 | |
884 | bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); |
885 | ret = __bio_clone(bio, bio_src, gfp); |
886 | if (ret) |
887 | bio_uninit(bio); |
888 | return ret; |
889 | } |
890 | EXPORT_SYMBOL(bio_init_clone); |
891 | |
892 | /** |
893 | * bio_full - check if the bio is full |
894 | * @bio: bio to check |
895 | * @len: length of one segment to be added |
896 | * |
897 | * Return true if @bio is full and one segment with @len bytes can't be |
898 | * added to the bio, otherwise return false |
899 | */ |
900 | static inline bool bio_full(struct bio *bio, unsigned len) |
901 | { |
902 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
903 | return true; |
904 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
905 | return true; |
906 | return false; |
907 | } |
908 | |
909 | static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, |
910 | unsigned int len, unsigned int off, bool *same_page) |
911 | { |
912 | size_t bv_end = bv->bv_offset + bv->bv_len; |
913 | phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; |
914 | phys_addr_t page_addr = page_to_phys(page); |
915 | |
916 | if (vec_end_addr + 1 != page_addr + off) |
917 | return false; |
918 | if (xen_domain() && !xen_biovec_phys_mergeable(vec1: bv, page)) |
919 | return false; |
920 | if (!zone_device_pages_have_same_pgmap(a: bv->bv_page, b: page)) |
921 | return false; |
922 | |
923 | *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); |
924 | if (!*same_page) { |
925 | if (IS_ENABLED(CONFIG_KMSAN)) |
926 | return false; |
927 | if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) |
928 | return false; |
929 | } |
930 | |
931 | bv->bv_len += len; |
932 | return true; |
933 | } |
934 | |
935 | /* |
936 | * Try to merge a page into a segment, while obeying the hardware segment |
937 | * size limit. This is not for normal read/write bios, but for passthrough |
938 | * or Zone Append operations that we can't split. |
939 | */ |
940 | bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, |
941 | struct page *page, unsigned len, unsigned offset, |
942 | bool *same_page) |
943 | { |
944 | unsigned long mask = queue_segment_boundary(q); |
945 | phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; |
946 | phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; |
947 | |
948 | if ((addr1 | mask) != (addr2 | mask)) |
949 | return false; |
950 | if (len > queue_max_segment_size(q) - bv->bv_len) |
951 | return false; |
952 | return bvec_try_merge_page(bv, page, len, off: offset, same_page); |
953 | } |
954 | |
955 | /** |
956 | * bio_add_hw_page - attempt to add a page to a bio with hw constraints |
957 | * @q: the target queue |
958 | * @bio: destination bio |
959 | * @page: page to add |
960 | * @len: vec entry length |
961 | * @offset: vec entry offset |
962 | * @max_sectors: maximum number of sectors that can be added |
963 | * @same_page: return if the segment has been merged inside the same page |
964 | * |
965 | * Add a page to a bio while respecting the hardware max_sectors, max_segment |
966 | * and gap limitations. |
967 | */ |
968 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
969 | struct page *page, unsigned int len, unsigned int offset, |
970 | unsigned int max_sectors, bool *same_page) |
971 | { |
972 | unsigned int max_size = max_sectors << SECTOR_SHIFT; |
973 | |
974 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
975 | return 0; |
976 | |
977 | len = min3(len, max_size, queue_max_segment_size(q)); |
978 | if (len > max_size - bio->bi_iter.bi_size) |
979 | return 0; |
980 | |
981 | if (bio->bi_vcnt > 0) { |
982 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
983 | |
984 | if (bvec_try_merge_hw_page(q, bv, page, len, offset, |
985 | same_page)) { |
986 | bio->bi_iter.bi_size += len; |
987 | return len; |
988 | } |
989 | |
990 | if (bio->bi_vcnt >= |
991 | min(bio->bi_max_vecs, queue_max_segments(q))) |
992 | return 0; |
993 | |
994 | /* |
995 | * If the queue doesn't support SG gaps and adding this segment |
996 | * would create a gap, disallow it. |
997 | */ |
998 | if (bvec_gap_to_prev(lim: &q->limits, bprv: bv, offset)) |
999 | return 0; |
1000 | } |
1001 | |
1002 | bvec_set_page(bv: &bio->bi_io_vec[bio->bi_vcnt], page, len, offset); |
1003 | bio->bi_vcnt++; |
1004 | bio->bi_iter.bi_size += len; |
1005 | return len; |
1006 | } |
1007 | |
1008 | /** |
1009 | * bio_add_pc_page - attempt to add page to passthrough bio |
1010 | * @q: the target queue |
1011 | * @bio: destination bio |
1012 | * @page: page to add |
1013 | * @len: vec entry length |
1014 | * @offset: vec entry offset |
1015 | * |
1016 | * Attempt to add a page to the bio_vec maplist. This can fail for a |
1017 | * number of reasons, such as the bio being full or target block device |
1018 | * limitations. The target block device must allow bio's up to PAGE_SIZE, |
1019 | * so it is always possible to add a single page to an empty bio. |
1020 | * |
1021 | * This should only be used by passthrough bios. |
1022 | */ |
1023 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, |
1024 | struct page *page, unsigned int len, unsigned int offset) |
1025 | { |
1026 | bool same_page = false; |
1027 | return bio_add_hw_page(q, bio, page, len, offset, |
1028 | max_sectors: queue_max_hw_sectors(q), same_page: &same_page); |
1029 | } |
1030 | EXPORT_SYMBOL(bio_add_pc_page); |
1031 | |
1032 | /** |
1033 | * bio_add_zone_append_page - attempt to add page to zone-append bio |
1034 | * @bio: destination bio |
1035 | * @page: page to add |
1036 | * @len: vec entry length |
1037 | * @offset: vec entry offset |
1038 | * |
1039 | * Attempt to add a page to the bio_vec maplist of a bio that will be submitted |
1040 | * for a zone-append request. This can fail for a number of reasons, such as the |
1041 | * bio being full or the target block device is not a zoned block device or |
1042 | * other limitations of the target block device. The target block device must |
1043 | * allow bio's up to PAGE_SIZE, so it is always possible to add a single page |
1044 | * to an empty bio. |
1045 | * |
1046 | * Returns: number of bytes added to the bio, or 0 in case of a failure. |
1047 | */ |
1048 | int bio_add_zone_append_page(struct bio *bio, struct page *page, |
1049 | unsigned int len, unsigned int offset) |
1050 | { |
1051 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
1052 | bool same_page = false; |
1053 | |
1054 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) |
1055 | return 0; |
1056 | |
1057 | if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) |
1058 | return 0; |
1059 | |
1060 | return bio_add_hw_page(q, bio, page, len, offset, |
1061 | max_sectors: queue_max_zone_append_sectors(q), same_page: &same_page); |
1062 | } |
1063 | EXPORT_SYMBOL_GPL(bio_add_zone_append_page); |
1064 | |
1065 | /** |
1066 | * __bio_add_page - add page(s) to a bio in a new segment |
1067 | * @bio: destination bio |
1068 | * @page: start page to add |
1069 | * @len: length of the data to add, may cross pages |
1070 | * @off: offset of the data relative to @page, may cross pages |
1071 | * |
1072 | * Add the data at @page + @off to @bio as a new bvec. The caller must ensure |
1073 | * that @bio has space for another bvec. |
1074 | */ |
1075 | void __bio_add_page(struct bio *bio, struct page *page, |
1076 | unsigned int len, unsigned int off) |
1077 | { |
1078 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
1079 | WARN_ON_ONCE(bio_full(bio, len)); |
1080 | |
1081 | bvec_set_page(bv: &bio->bi_io_vec[bio->bi_vcnt], page, len, offset: off); |
1082 | bio->bi_iter.bi_size += len; |
1083 | bio->bi_vcnt++; |
1084 | } |
1085 | EXPORT_SYMBOL_GPL(__bio_add_page); |
1086 | |
1087 | /** |
1088 | * bio_add_page - attempt to add page(s) to bio |
1089 | * @bio: destination bio |
1090 | * @page: start page to add |
1091 | * @len: vec entry length, may cross pages |
1092 | * @offset: vec entry offset relative to @page, may cross pages |
1093 | * |
1094 | * Attempt to add page(s) to the bio_vec maplist. This will only fail |
1095 | * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. |
1096 | */ |
1097 | int bio_add_page(struct bio *bio, struct page *page, |
1098 | unsigned int len, unsigned int offset) |
1099 | { |
1100 | bool same_page = false; |
1101 | |
1102 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1103 | return 0; |
1104 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
1105 | return 0; |
1106 | |
1107 | if (bio->bi_vcnt > 0 && |
1108 | bvec_try_merge_page(bv: &bio->bi_io_vec[bio->bi_vcnt - 1], |
1109 | page, len, off: offset, same_page: &same_page)) { |
1110 | bio->bi_iter.bi_size += len; |
1111 | return len; |
1112 | } |
1113 | |
1114 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
1115 | return 0; |
1116 | __bio_add_page(bio, page, len, offset); |
1117 | return len; |
1118 | } |
1119 | EXPORT_SYMBOL(bio_add_page); |
1120 | |
1121 | void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, |
1122 | size_t off) |
1123 | { |
1124 | WARN_ON_ONCE(len > UINT_MAX); |
1125 | WARN_ON_ONCE(off > UINT_MAX); |
1126 | __bio_add_page(bio, &folio->page, len, off); |
1127 | } |
1128 | |
1129 | /** |
1130 | * bio_add_folio - Attempt to add part of a folio to a bio. |
1131 | * @bio: BIO to add to. |
1132 | * @folio: Folio to add. |
1133 | * @len: How many bytes from the folio to add. |
1134 | * @off: First byte in this folio to add. |
1135 | * |
1136 | * Filesystems that use folios can call this function instead of calling |
1137 | * bio_add_page() for each page in the folio. If @off is bigger than |
1138 | * PAGE_SIZE, this function can create a bio_vec that starts in a page |
1139 | * after the bv_page. BIOs do not support folios that are 4GiB or larger. |
1140 | * |
1141 | * Return: Whether the addition was successful. |
1142 | */ |
1143 | bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, |
1144 | size_t off) |
1145 | { |
1146 | if (len > UINT_MAX || off > UINT_MAX) |
1147 | return false; |
1148 | return bio_add_page(bio, &folio->page, len, off) > 0; |
1149 | } |
1150 | EXPORT_SYMBOL(bio_add_folio); |
1151 | |
1152 | void __bio_release_pages(struct bio *bio, bool mark_dirty) |
1153 | { |
1154 | struct folio_iter fi; |
1155 | |
1156 | bio_for_each_folio_all(fi, bio) { |
1157 | struct page *page; |
1158 | size_t nr_pages; |
1159 | |
1160 | if (mark_dirty) { |
1161 | folio_lock(folio: fi.folio); |
1162 | folio_mark_dirty(folio: fi.folio); |
1163 | folio_unlock(folio: fi.folio); |
1164 | } |
1165 | page = folio_page(fi.folio, fi.offset / PAGE_SIZE); |
1166 | nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - |
1167 | fi.offset / PAGE_SIZE + 1; |
1168 | do { |
1169 | bio_release_page(bio, page: page++); |
1170 | } while (--nr_pages != 0); |
1171 | } |
1172 | } |
1173 | EXPORT_SYMBOL_GPL(__bio_release_pages); |
1174 | |
1175 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) |
1176 | { |
1177 | size_t size = iov_iter_count(i: iter); |
1178 | |
1179 | WARN_ON_ONCE(bio->bi_max_vecs); |
1180 | |
1181 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1182 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
1183 | size_t max_sectors = queue_max_zone_append_sectors(q); |
1184 | |
1185 | size = min(size, max_sectors << SECTOR_SHIFT); |
1186 | } |
1187 | |
1188 | bio->bi_vcnt = iter->nr_segs; |
1189 | bio->bi_io_vec = (struct bio_vec *)iter->bvec; |
1190 | bio->bi_iter.bi_bvec_done = iter->iov_offset; |
1191 | bio->bi_iter.bi_size = size; |
1192 | bio_set_flag(bio, bit: BIO_CLONED); |
1193 | } |
1194 | |
1195 | static int bio_iov_add_page(struct bio *bio, struct page *page, |
1196 | unsigned int len, unsigned int offset) |
1197 | { |
1198 | bool same_page = false; |
1199 | |
1200 | if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) |
1201 | return -EIO; |
1202 | |
1203 | if (bio->bi_vcnt > 0 && |
1204 | bvec_try_merge_page(bv: &bio->bi_io_vec[bio->bi_vcnt - 1], |
1205 | page, len, off: offset, same_page: &same_page)) { |
1206 | bio->bi_iter.bi_size += len; |
1207 | if (same_page) |
1208 | bio_release_page(bio, page); |
1209 | return 0; |
1210 | } |
1211 | __bio_add_page(bio, page, len, offset); |
1212 | return 0; |
1213 | } |
1214 | |
1215 | static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, |
1216 | unsigned int len, unsigned int offset) |
1217 | { |
1218 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
1219 | bool same_page = false; |
1220 | |
1221 | if (bio_add_hw_page(q, bio, page, len, offset, |
1222 | max_sectors: queue_max_zone_append_sectors(q), same_page: &same_page) != len) |
1223 | return -EINVAL; |
1224 | if (same_page) |
1225 | bio_release_page(bio, page); |
1226 | return 0; |
1227 | } |
1228 | |
1229 | #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) |
1230 | |
1231 | /** |
1232 | * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio |
1233 | * @bio: bio to add pages to |
1234 | * @iter: iov iterator describing the region to be mapped |
1235 | * |
1236 | * Extracts pages from *iter and appends them to @bio's bvec array. The pages |
1237 | * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag. |
1238 | * For a multi-segment *iter, this function only adds pages from the next |
1239 | * non-empty segment of the iov iterator. |
1240 | */ |
1241 | static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
1242 | { |
1243 | iov_iter_extraction_t = 0; |
1244 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; |
1245 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; |
1246 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; |
1247 | struct page **pages = (struct page **)bv; |
1248 | ssize_t size, left; |
1249 | unsigned len, i = 0; |
1250 | size_t offset; |
1251 | int ret = 0; |
1252 | |
1253 | /* |
1254 | * Move page array up in the allocated memory for the bio vecs as far as |
1255 | * possible so that we can start filling biovecs from the beginning |
1256 | * without overwriting the temporary page array. |
1257 | */ |
1258 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); |
1259 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); |
1260 | |
1261 | if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) |
1262 | extraction_flags |= ITER_ALLOW_P2PDMA; |
1263 | |
1264 | /* |
1265 | * Each segment in the iov is required to be a block size multiple. |
1266 | * However, we may not be able to get the entire segment if it spans |
1267 | * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the |
1268 | * result to ensure the bio's total size is correct. The remainder of |
1269 | * the iov data will be picked up in the next bio iteration. |
1270 | */ |
1271 | size = iov_iter_extract_pages(i: iter, pages: &pages, |
1272 | UINT_MAX - bio->bi_iter.bi_size, |
1273 | maxpages: nr_pages, extraction_flags, offset0: &offset); |
1274 | if (unlikely(size <= 0)) |
1275 | return size ? size : -EFAULT; |
1276 | |
1277 | nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); |
1278 | |
1279 | if (bio->bi_bdev) { |
1280 | size_t trim = size & (bdev_logical_block_size(bdev: bio->bi_bdev) - 1); |
1281 | iov_iter_revert(i: iter, bytes: trim); |
1282 | size -= trim; |
1283 | } |
1284 | |
1285 | if (unlikely(!size)) { |
1286 | ret = -EFAULT; |
1287 | goto out; |
1288 | } |
1289 | |
1290 | for (left = size, i = 0; left > 0; left -= len, i++) { |
1291 | struct page *page = pages[i]; |
1292 | |
1293 | len = min_t(size_t, PAGE_SIZE - offset, left); |
1294 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1295 | ret = bio_iov_add_zone_append_page(bio, page, len, |
1296 | offset); |
1297 | if (ret) |
1298 | break; |
1299 | } else |
1300 | bio_iov_add_page(bio, page, len, offset); |
1301 | |
1302 | offset = 0; |
1303 | } |
1304 | |
1305 | iov_iter_revert(i: iter, bytes: left); |
1306 | out: |
1307 | while (i < nr_pages) |
1308 | bio_release_page(bio, page: pages[i++]); |
1309 | |
1310 | return ret; |
1311 | } |
1312 | |
1313 | /** |
1314 | * bio_iov_iter_get_pages - add user or kernel pages to a bio |
1315 | * @bio: bio to add pages to |
1316 | * @iter: iov iterator describing the region to be added |
1317 | * |
1318 | * This takes either an iterator pointing to user memory, or one pointing to |
1319 | * kernel pages (BVEC iterator). If we're adding user pages, we pin them and |
1320 | * map them into the kernel. On IO completion, the caller should put those |
1321 | * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided |
1322 | * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs |
1323 | * to ensure the bvecs and pages stay referenced until the submitted I/O is |
1324 | * completed by a call to ->ki_complete() or returns with an error other than |
1325 | * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF |
1326 | * on IO completion. If it isn't, then pages should be released. |
1327 | * |
1328 | * The function tries, but does not guarantee, to pin as many pages as |
1329 | * fit into the bio, or are requested in @iter, whatever is smaller. If |
1330 | * MM encounters an error pinning the requested pages, it stops. Error |
1331 | * is returned only if 0 pages could be pinned. |
1332 | */ |
1333 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
1334 | { |
1335 | int ret = 0; |
1336 | |
1337 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1338 | return -EIO; |
1339 | |
1340 | if (iov_iter_is_bvec(i: iter)) { |
1341 | bio_iov_bvec_set(bio, iter); |
1342 | iov_iter_advance(i: iter, bytes: bio->bi_iter.bi_size); |
1343 | return 0; |
1344 | } |
1345 | |
1346 | if (iov_iter_extract_will_pin(iter)) |
1347 | bio_set_flag(bio, bit: BIO_PAGE_PINNED); |
1348 | do { |
1349 | ret = __bio_iov_iter_get_pages(bio, iter); |
1350 | } while (!ret && iov_iter_count(i: iter) && !bio_full(bio, len: 0)); |
1351 | |
1352 | return bio->bi_vcnt ? 0 : ret; |
1353 | } |
1354 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
1355 | |
1356 | static void submit_bio_wait_endio(struct bio *bio) |
1357 | { |
1358 | complete(bio->bi_private); |
1359 | } |
1360 | |
1361 | /** |
1362 | * submit_bio_wait - submit a bio, and wait until it completes |
1363 | * @bio: The &struct bio which describes the I/O |
1364 | * |
1365 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from |
1366 | * bio_endio() on failure. |
1367 | * |
1368 | * WARNING: Unlike to how submit_bio() is usually used, this function does not |
1369 | * result in bio reference to be consumed. The caller must drop the reference |
1370 | * on his own. |
1371 | */ |
1372 | int submit_bio_wait(struct bio *bio) |
1373 | { |
1374 | DECLARE_COMPLETION_ONSTACK_MAP(done, |
1375 | bio->bi_bdev->bd_disk->lockdep_map); |
1376 | |
1377 | bio->bi_private = &done; |
1378 | bio->bi_end_io = submit_bio_wait_endio; |
1379 | bio->bi_opf |= REQ_SYNC; |
1380 | submit_bio(bio); |
1381 | blk_wait_io(done: &done); |
1382 | |
1383 | return blk_status_to_errno(status: bio->bi_status); |
1384 | } |
1385 | EXPORT_SYMBOL(submit_bio_wait); |
1386 | |
1387 | void __bio_advance(struct bio *bio, unsigned bytes) |
1388 | { |
1389 | if (bio_integrity(bio)) |
1390 | bio_integrity_advance(bio, bytes); |
1391 | |
1392 | bio_crypt_advance(bio, bytes); |
1393 | bio_advance_iter(bio, iter: &bio->bi_iter, bytes); |
1394 | } |
1395 | EXPORT_SYMBOL(__bio_advance); |
1396 | |
1397 | void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
1398 | struct bio *src, struct bvec_iter *src_iter) |
1399 | { |
1400 | while (src_iter->bi_size && dst_iter->bi_size) { |
1401 | struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); |
1402 | struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); |
1403 | unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); |
1404 | void *src_buf = bvec_kmap_local(bvec: &src_bv); |
1405 | void *dst_buf = bvec_kmap_local(bvec: &dst_bv); |
1406 | |
1407 | memcpy(dst_buf, src_buf, bytes); |
1408 | |
1409 | kunmap_local(dst_buf); |
1410 | kunmap_local(src_buf); |
1411 | |
1412 | bio_advance_iter_single(bio: src, iter: src_iter, bytes); |
1413 | bio_advance_iter_single(bio: dst, iter: dst_iter, bytes); |
1414 | } |
1415 | } |
1416 | EXPORT_SYMBOL(bio_copy_data_iter); |
1417 | |
1418 | /** |
1419 | * bio_copy_data - copy contents of data buffers from one bio to another |
1420 | * @src: source bio |
1421 | * @dst: destination bio |
1422 | * |
1423 | * Stops when it reaches the end of either @src or @dst - that is, copies |
1424 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). |
1425 | */ |
1426 | void bio_copy_data(struct bio *dst, struct bio *src) |
1427 | { |
1428 | struct bvec_iter src_iter = src->bi_iter; |
1429 | struct bvec_iter dst_iter = dst->bi_iter; |
1430 | |
1431 | bio_copy_data_iter(dst, &dst_iter, src, &src_iter); |
1432 | } |
1433 | EXPORT_SYMBOL(bio_copy_data); |
1434 | |
1435 | void bio_free_pages(struct bio *bio) |
1436 | { |
1437 | struct bio_vec *bvec; |
1438 | struct bvec_iter_all iter_all; |
1439 | |
1440 | bio_for_each_segment_all(bvec, bio, iter_all) |
1441 | __free_page(bvec->bv_page); |
1442 | } |
1443 | EXPORT_SYMBOL(bio_free_pages); |
1444 | |
1445 | /* |
1446 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
1447 | * for performing direct-IO in BIOs. |
1448 | * |
1449 | * The problem is that we cannot run folio_mark_dirty() from interrupt context |
1450 | * because the required locks are not interrupt-safe. So what we can do is to |
1451 | * mark the pages dirty _before_ performing IO. And in interrupt context, |
1452 | * check that the pages are still dirty. If so, fine. If not, redirty them |
1453 | * in process context. |
1454 | * |
1455 | * Note that this code is very hard to test under normal circumstances because |
1456 | * direct-io pins the pages with get_user_pages(). This makes |
1457 | * is_page_cache_freeable return false, and the VM will not clean the pages. |
1458 | * But other code (eg, flusher threads) could clean the pages if they are mapped |
1459 | * pagecache. |
1460 | * |
1461 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the |
1462 | * deferred bio dirtying paths. |
1463 | */ |
1464 | |
1465 | /* |
1466 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. |
1467 | */ |
1468 | void bio_set_pages_dirty(struct bio *bio) |
1469 | { |
1470 | struct folio_iter fi; |
1471 | |
1472 | bio_for_each_folio_all(fi, bio) { |
1473 | folio_lock(folio: fi.folio); |
1474 | folio_mark_dirty(folio: fi.folio); |
1475 | folio_unlock(folio: fi.folio); |
1476 | } |
1477 | } |
1478 | EXPORT_SYMBOL_GPL(bio_set_pages_dirty); |
1479 | |
1480 | /* |
1481 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. |
1482 | * If they are, then fine. If, however, some pages are clean then they must |
1483 | * have been written out during the direct-IO read. So we take another ref on |
1484 | * the BIO and re-dirty the pages in process context. |
1485 | * |
1486 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from |
1487 | * here on. It will unpin each page and will run one bio_put() against the |
1488 | * BIO. |
1489 | */ |
1490 | |
1491 | static void bio_dirty_fn(struct work_struct *work); |
1492 | |
1493 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
1494 | static DEFINE_SPINLOCK(bio_dirty_lock); |
1495 | static struct bio *bio_dirty_list; |
1496 | |
1497 | /* |
1498 | * This runs in process context |
1499 | */ |
1500 | static void bio_dirty_fn(struct work_struct *work) |
1501 | { |
1502 | struct bio *bio, *next; |
1503 | |
1504 | spin_lock_irq(lock: &bio_dirty_lock); |
1505 | next = bio_dirty_list; |
1506 | bio_dirty_list = NULL; |
1507 | spin_unlock_irq(lock: &bio_dirty_lock); |
1508 | |
1509 | while ((bio = next) != NULL) { |
1510 | next = bio->bi_private; |
1511 | |
1512 | bio_release_pages(bio, mark_dirty: true); |
1513 | bio_put(bio); |
1514 | } |
1515 | } |
1516 | |
1517 | void bio_check_pages_dirty(struct bio *bio) |
1518 | { |
1519 | struct folio_iter fi; |
1520 | unsigned long flags; |
1521 | |
1522 | bio_for_each_folio_all(fi, bio) { |
1523 | if (!folio_test_dirty(folio: fi.folio)) |
1524 | goto defer; |
1525 | } |
1526 | |
1527 | bio_release_pages(bio, mark_dirty: false); |
1528 | bio_put(bio); |
1529 | return; |
1530 | defer: |
1531 | spin_lock_irqsave(&bio_dirty_lock, flags); |
1532 | bio->bi_private = bio_dirty_list; |
1533 | bio_dirty_list = bio; |
1534 | spin_unlock_irqrestore(lock: &bio_dirty_lock, flags); |
1535 | schedule_work(work: &bio_dirty_work); |
1536 | } |
1537 | EXPORT_SYMBOL_GPL(bio_check_pages_dirty); |
1538 | |
1539 | static inline bool bio_remaining_done(struct bio *bio) |
1540 | { |
1541 | /* |
1542 | * If we're not chaining, then ->__bi_remaining is always 1 and |
1543 | * we always end io on the first invocation. |
1544 | */ |
1545 | if (!bio_flagged(bio, bit: BIO_CHAIN)) |
1546 | return true; |
1547 | |
1548 | BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); |
1549 | |
1550 | if (atomic_dec_and_test(v: &bio->__bi_remaining)) { |
1551 | bio_clear_flag(bio, bit: BIO_CHAIN); |
1552 | return true; |
1553 | } |
1554 | |
1555 | return false; |
1556 | } |
1557 | |
1558 | /** |
1559 | * bio_endio - end I/O on a bio |
1560 | * @bio: bio |
1561 | * |
1562 | * Description: |
1563 | * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred |
1564 | * way to end I/O on a bio. No one should call bi_end_io() directly on a |
1565 | * bio unless they own it and thus know that it has an end_io function. |
1566 | * |
1567 | * bio_endio() can be called several times on a bio that has been chained |
1568 | * using bio_chain(). The ->bi_end_io() function will only be called the |
1569 | * last time. |
1570 | **/ |
1571 | void bio_endio(struct bio *bio) |
1572 | { |
1573 | again: |
1574 | if (!bio_remaining_done(bio)) |
1575 | return; |
1576 | if (!bio_integrity_endio(bio)) |
1577 | return; |
1578 | |
1579 | rq_qos_done_bio(bio); |
1580 | |
1581 | if (bio->bi_bdev && bio_flagged(bio, bit: BIO_TRACE_COMPLETION)) { |
1582 | trace_block_bio_complete(q: bdev_get_queue(bdev: bio->bi_bdev), bio); |
1583 | bio_clear_flag(bio, bit: BIO_TRACE_COMPLETION); |
1584 | } |
1585 | |
1586 | /* |
1587 | * Need to have a real endio function for chained bios, otherwise |
1588 | * various corner cases will break (like stacking block devices that |
1589 | * save/restore bi_end_io) - however, we want to avoid unbounded |
1590 | * recursion and blowing the stack. Tail call optimization would |
1591 | * handle this, but compiling with frame pointers also disables |
1592 | * gcc's sibling call optimization. |
1593 | */ |
1594 | if (bio->bi_end_io == bio_chain_endio) { |
1595 | bio = __bio_chain_endio(bio); |
1596 | goto again; |
1597 | } |
1598 | |
1599 | blk_throtl_bio_endio(bio); |
1600 | /* release cgroup info */ |
1601 | bio_uninit(bio); |
1602 | if (bio->bi_end_io) |
1603 | bio->bi_end_io(bio); |
1604 | } |
1605 | EXPORT_SYMBOL(bio_endio); |
1606 | |
1607 | /** |
1608 | * bio_split - split a bio |
1609 | * @bio: bio to split |
1610 | * @sectors: number of sectors to split from the front of @bio |
1611 | * @gfp: gfp mask |
1612 | * @bs: bio set to allocate from |
1613 | * |
1614 | * Allocates and returns a new bio which represents @sectors from the start of |
1615 | * @bio, and updates @bio to represent the remaining sectors. |
1616 | * |
1617 | * Unless this is a discard request the newly allocated bio will point |
1618 | * to @bio's bi_io_vec. It is the caller's responsibility to ensure that |
1619 | * neither @bio nor @bs are freed before the split bio. |
1620 | */ |
1621 | struct bio *bio_split(struct bio *bio, int sectors, |
1622 | gfp_t gfp, struct bio_set *bs) |
1623 | { |
1624 | struct bio *split; |
1625 | |
1626 | BUG_ON(sectors <= 0); |
1627 | BUG_ON(sectors >= bio_sectors(bio)); |
1628 | |
1629 | /* Zone append commands cannot be split */ |
1630 | if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) |
1631 | return NULL; |
1632 | |
1633 | split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); |
1634 | if (!split) |
1635 | return NULL; |
1636 | |
1637 | split->bi_iter.bi_size = sectors << 9; |
1638 | |
1639 | if (bio_integrity(bio: split)) |
1640 | bio_integrity_trim(split); |
1641 | |
1642 | bio_advance(bio, nbytes: split->bi_iter.bi_size); |
1643 | |
1644 | if (bio_flagged(bio, bit: BIO_TRACE_COMPLETION)) |
1645 | bio_set_flag(bio: split, bit: BIO_TRACE_COMPLETION); |
1646 | |
1647 | return split; |
1648 | } |
1649 | EXPORT_SYMBOL(bio_split); |
1650 | |
1651 | /** |
1652 | * bio_trim - trim a bio |
1653 | * @bio: bio to trim |
1654 | * @offset: number of sectors to trim from the front of @bio |
1655 | * @size: size we want to trim @bio to, in sectors |
1656 | * |
1657 | * This function is typically used for bios that are cloned and submitted |
1658 | * to the underlying device in parts. |
1659 | */ |
1660 | void bio_trim(struct bio *bio, sector_t offset, sector_t size) |
1661 | { |
1662 | if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || |
1663 | offset + size > bio_sectors(bio))) |
1664 | return; |
1665 | |
1666 | size <<= 9; |
1667 | if (offset == 0 && size == bio->bi_iter.bi_size) |
1668 | return; |
1669 | |
1670 | bio_advance(bio, nbytes: offset << 9); |
1671 | bio->bi_iter.bi_size = size; |
1672 | |
1673 | if (bio_integrity(bio)) |
1674 | bio_integrity_trim(bio); |
1675 | } |
1676 | EXPORT_SYMBOL_GPL(bio_trim); |
1677 | |
1678 | /* |
1679 | * create memory pools for biovec's in a bio_set. |
1680 | * use the global biovec slabs created for general use. |
1681 | */ |
1682 | int biovec_init_pool(mempool_t *pool, int pool_entries) |
1683 | { |
1684 | struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; |
1685 | |
1686 | return mempool_init_slab_pool(pool, min_nr: pool_entries, kc: bp->slab); |
1687 | } |
1688 | |
1689 | /* |
1690 | * bioset_exit - exit a bioset initialized with bioset_init() |
1691 | * |
1692 | * May be called on a zeroed but uninitialized bioset (i.e. allocated with |
1693 | * kzalloc()). |
1694 | */ |
1695 | void bioset_exit(struct bio_set *bs) |
1696 | { |
1697 | bio_alloc_cache_destroy(bs); |
1698 | if (bs->rescue_workqueue) |
1699 | destroy_workqueue(wq: bs->rescue_workqueue); |
1700 | bs->rescue_workqueue = NULL; |
1701 | |
1702 | mempool_exit(pool: &bs->bio_pool); |
1703 | mempool_exit(pool: &bs->bvec_pool); |
1704 | |
1705 | bioset_integrity_free(bs); |
1706 | if (bs->bio_slab) |
1707 | bio_put_slab(bs); |
1708 | bs->bio_slab = NULL; |
1709 | } |
1710 | EXPORT_SYMBOL(bioset_exit); |
1711 | |
1712 | /** |
1713 | * bioset_init - Initialize a bio_set |
1714 | * @bs: pool to initialize |
1715 | * @pool_size: Number of bio and bio_vecs to cache in the mempool |
1716 | * @front_pad: Number of bytes to allocate in front of the returned bio |
1717 | * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS |
1718 | * and %BIOSET_NEED_RESCUER |
1719 | * |
1720 | * Description: |
1721 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller |
1722 | * to ask for a number of bytes to be allocated in front of the bio. |
1723 | * Front pad allocation is useful for embedding the bio inside |
1724 | * another structure, to avoid allocating extra data to go with the bio. |
1725 | * Note that the bio must be embedded at the END of that structure always, |
1726 | * or things will break badly. |
1727 | * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated |
1728 | * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). |
1729 | * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used |
1730 | * to dispatch queued requests when the mempool runs out of space. |
1731 | * |
1732 | */ |
1733 | int bioset_init(struct bio_set *bs, |
1734 | unsigned int pool_size, |
1735 | unsigned int front_pad, |
1736 | int flags) |
1737 | { |
1738 | bs->front_pad = front_pad; |
1739 | if (flags & BIOSET_NEED_BVECS) |
1740 | bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); |
1741 | else |
1742 | bs->back_pad = 0; |
1743 | |
1744 | spin_lock_init(&bs->rescue_lock); |
1745 | bio_list_init(bl: &bs->rescue_list); |
1746 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); |
1747 | |
1748 | bs->bio_slab = bio_find_or_create_slab(bs); |
1749 | if (!bs->bio_slab) |
1750 | return -ENOMEM; |
1751 | |
1752 | if (mempool_init_slab_pool(pool: &bs->bio_pool, min_nr: pool_size, kc: bs->bio_slab)) |
1753 | goto bad; |
1754 | |
1755 | if ((flags & BIOSET_NEED_BVECS) && |
1756 | biovec_init_pool(pool: &bs->bvec_pool, pool_entries: pool_size)) |
1757 | goto bad; |
1758 | |
1759 | if (flags & BIOSET_NEED_RESCUER) { |
1760 | bs->rescue_workqueue = alloc_workqueue(fmt: "bioset" , |
1761 | flags: WQ_MEM_RECLAIM, max_active: 0); |
1762 | if (!bs->rescue_workqueue) |
1763 | goto bad; |
1764 | } |
1765 | if (flags & BIOSET_PERCPU_CACHE) { |
1766 | bs->cache = alloc_percpu(struct bio_alloc_cache); |
1767 | if (!bs->cache) |
1768 | goto bad; |
1769 | cpuhp_state_add_instance_nocalls(state: CPUHP_BIO_DEAD, node: &bs->cpuhp_dead); |
1770 | } |
1771 | |
1772 | return 0; |
1773 | bad: |
1774 | bioset_exit(bs); |
1775 | return -ENOMEM; |
1776 | } |
1777 | EXPORT_SYMBOL(bioset_init); |
1778 | |
1779 | static int __init init_bio(void) |
1780 | { |
1781 | int i; |
1782 | |
1783 | BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); |
1784 | |
1785 | bio_integrity_init(); |
1786 | |
1787 | for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { |
1788 | struct biovec_slab *bvs = bvec_slabs + i; |
1789 | |
1790 | bvs->slab = kmem_cache_create(name: bvs->name, |
1791 | size: bvs->nr_vecs * sizeof(struct bio_vec), align: 0, |
1792 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
1793 | } |
1794 | |
1795 | cpuhp_setup_state_multi(state: CPUHP_BIO_DEAD, name: "block/bio:dead" , NULL, |
1796 | teardown: bio_cpu_dead); |
1797 | |
1798 | if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, |
1799 | BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) |
1800 | panic(fmt: "bio: can't allocate bios\n" ); |
1801 | |
1802 | if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) |
1803 | panic(fmt: "bio: can't create integrity pool\n" ); |
1804 | |
1805 | return 0; |
1806 | } |
1807 | subsys_initcall(init_bio); |
1808 | |