1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * bio-integrity.c - bio data integrity extensions |
4 | * |
5 | * Copyright (C) 2007, 2008, 2009 Oracle Corporation |
6 | * Written by: Martin K. Petersen <martin.petersen@oracle.com> |
7 | */ |
8 | |
9 | #include <linux/blk-integrity.h> |
10 | #include <linux/mempool.h> |
11 | #include <linux/export.h> |
12 | #include <linux/bio.h> |
13 | #include <linux/workqueue.h> |
14 | #include <linux/slab.h> |
15 | #include "blk.h" |
16 | |
17 | static struct kmem_cache *bip_slab; |
18 | static struct workqueue_struct *kintegrityd_wq; |
19 | |
20 | void blk_flush_integrity(void) |
21 | { |
22 | flush_workqueue(kintegrityd_wq); |
23 | } |
24 | |
25 | static void __bio_integrity_free(struct bio_set *bs, |
26 | struct bio_integrity_payload *bip) |
27 | { |
28 | if (bs && mempool_initialized(pool: &bs->bio_integrity_pool)) { |
29 | if (bip->bip_vec) |
30 | bvec_free(pool: &bs->bvec_integrity_pool, bv: bip->bip_vec, |
31 | nr_vecs: bip->bip_max_vcnt); |
32 | mempool_free(element: bip, pool: &bs->bio_integrity_pool); |
33 | } else { |
34 | kfree(objp: bip); |
35 | } |
36 | } |
37 | |
38 | /** |
39 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio |
40 | * @bio: bio to attach integrity metadata to |
41 | * @gfp_mask: Memory allocation mask |
42 | * @nr_vecs: Number of integrity metadata scatter-gather elements |
43 | * |
44 | * Description: This function prepares a bio for attaching integrity |
45 | * metadata. nr_vecs specifies the maximum number of pages containing |
46 | * integrity metadata that can be attached. |
47 | */ |
48 | struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, |
49 | gfp_t gfp_mask, |
50 | unsigned int nr_vecs) |
51 | { |
52 | struct bio_integrity_payload *bip; |
53 | struct bio_set *bs = bio->bi_pool; |
54 | unsigned inline_vecs; |
55 | |
56 | if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) |
57 | return ERR_PTR(error: -EOPNOTSUPP); |
58 | |
59 | if (!bs || !mempool_initialized(pool: &bs->bio_integrity_pool)) { |
60 | bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), flags: gfp_mask); |
61 | inline_vecs = nr_vecs; |
62 | } else { |
63 | bip = mempool_alloc(pool: &bs->bio_integrity_pool, gfp_mask); |
64 | inline_vecs = BIO_INLINE_VECS; |
65 | } |
66 | |
67 | if (unlikely(!bip)) |
68 | return ERR_PTR(error: -ENOMEM); |
69 | |
70 | memset(bip, 0, sizeof(*bip)); |
71 | |
72 | /* always report as many vecs as asked explicitly, not inline vecs */ |
73 | bip->bip_max_vcnt = nr_vecs; |
74 | if (nr_vecs > inline_vecs) { |
75 | bip->bip_vec = bvec_alloc(pool: &bs->bvec_integrity_pool, |
76 | nr_vecs: &bip->bip_max_vcnt, gfp_mask); |
77 | if (!bip->bip_vec) |
78 | goto err; |
79 | } else { |
80 | bip->bip_vec = bip->bip_inline_vecs; |
81 | } |
82 | |
83 | bip->bip_bio = bio; |
84 | bio->bi_integrity = bip; |
85 | bio->bi_opf |= REQ_INTEGRITY; |
86 | |
87 | return bip; |
88 | err: |
89 | __bio_integrity_free(bs, bip); |
90 | return ERR_PTR(error: -ENOMEM); |
91 | } |
92 | EXPORT_SYMBOL(bio_integrity_alloc); |
93 | |
94 | static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs, |
95 | bool dirty) |
96 | { |
97 | int i; |
98 | |
99 | for (i = 0; i < nr_vecs; i++) { |
100 | if (dirty && !PageCompound(page: bv[i].bv_page)) |
101 | set_page_dirty_lock(bv[i].bv_page); |
102 | unpin_user_page(page: bv[i].bv_page); |
103 | } |
104 | } |
105 | |
106 | static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip) |
107 | { |
108 | unsigned short nr_vecs = bip->bip_max_vcnt - 1; |
109 | struct bio_vec *copy = &bip->bip_vec[1]; |
110 | size_t bytes = bip->bip_iter.bi_size; |
111 | struct iov_iter iter; |
112 | int ret; |
113 | |
114 | iov_iter_bvec(i: &iter, ITER_DEST, bvec: copy, nr_segs: nr_vecs, count: bytes); |
115 | ret = copy_to_iter(addr: bvec_virt(bvec: bip->bip_vec), bytes, i: &iter); |
116 | WARN_ON_ONCE(ret != bytes); |
117 | |
118 | bio_integrity_unpin_bvec(bv: copy, nr_vecs, dirty: true); |
119 | } |
120 | |
121 | static void bio_integrity_unmap_user(struct bio_integrity_payload *bip) |
122 | { |
123 | bool dirty = bio_data_dir(bip->bip_bio) == READ; |
124 | |
125 | if (bip->bip_flags & BIP_COPY_USER) { |
126 | if (dirty) |
127 | bio_integrity_uncopy_user(bip); |
128 | kfree(objp: bvec_virt(bvec: bip->bip_vec)); |
129 | return; |
130 | } |
131 | |
132 | bio_integrity_unpin_bvec(bv: bip->bip_vec, nr_vecs: bip->bip_max_vcnt, dirty); |
133 | } |
134 | |
135 | /** |
136 | * bio_integrity_free - Free bio integrity payload |
137 | * @bio: bio containing bip to be freed |
138 | * |
139 | * Description: Used to free the integrity portion of a bio. Usually |
140 | * called from bio_free(). |
141 | */ |
142 | void bio_integrity_free(struct bio *bio) |
143 | { |
144 | struct bio_integrity_payload *bip = bio_integrity(bio); |
145 | struct bio_set *bs = bio->bi_pool; |
146 | |
147 | if (bip->bip_flags & BIP_BLOCK_INTEGRITY) |
148 | kfree(objp: bvec_virt(bvec: bip->bip_vec)); |
149 | else if (bip->bip_flags & BIP_INTEGRITY_USER) |
150 | bio_integrity_unmap_user(bip); |
151 | |
152 | __bio_integrity_free(bs, bip); |
153 | bio->bi_integrity = NULL; |
154 | bio->bi_opf &= ~REQ_INTEGRITY; |
155 | } |
156 | |
157 | /** |
158 | * bio_integrity_add_page - Attach integrity metadata |
159 | * @bio: bio to update |
160 | * @page: page containing integrity metadata |
161 | * @len: number of bytes of integrity metadata in page |
162 | * @offset: start offset within page |
163 | * |
164 | * Description: Attach a page containing integrity metadata to bio. |
165 | */ |
166 | int bio_integrity_add_page(struct bio *bio, struct page *page, |
167 | unsigned int len, unsigned int offset) |
168 | { |
169 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
170 | struct bio_integrity_payload *bip = bio_integrity(bio); |
171 | |
172 | if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) > |
173 | queue_max_hw_sectors(q)) |
174 | return 0; |
175 | |
176 | if (bip->bip_vcnt > 0) { |
177 | struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; |
178 | bool same_page = false; |
179 | |
180 | if (bvec_try_merge_hw_page(q, bv, page, len, offset, |
181 | same_page: &same_page)) { |
182 | bip->bip_iter.bi_size += len; |
183 | return len; |
184 | } |
185 | |
186 | if (bip->bip_vcnt >= |
187 | min(bip->bip_max_vcnt, queue_max_integrity_segments(q))) |
188 | return 0; |
189 | |
190 | /* |
191 | * If the queue doesn't support SG gaps and adding this segment |
192 | * would create a gap, disallow it. |
193 | */ |
194 | if (bvec_gap_to_prev(lim: &q->limits, bprv: bv, offset)) |
195 | return 0; |
196 | } |
197 | |
198 | bvec_set_page(bv: &bip->bip_vec[bip->bip_vcnt], page, len, offset); |
199 | bip->bip_vcnt++; |
200 | bip->bip_iter.bi_size += len; |
201 | |
202 | return len; |
203 | } |
204 | EXPORT_SYMBOL(bio_integrity_add_page); |
205 | |
206 | static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec, |
207 | int nr_vecs, unsigned int len, |
208 | unsigned int direction, u32 seed) |
209 | { |
210 | bool write = direction == ITER_SOURCE; |
211 | struct bio_integrity_payload *bip; |
212 | struct iov_iter iter; |
213 | void *buf; |
214 | int ret; |
215 | |
216 | buf = kmalloc(size: len, GFP_KERNEL); |
217 | if (!buf) |
218 | return -ENOMEM; |
219 | |
220 | if (write) { |
221 | iov_iter_bvec(i: &iter, direction, bvec, nr_segs: nr_vecs, count: len); |
222 | if (!copy_from_iter_full(addr: buf, bytes: len, i: &iter)) { |
223 | ret = -EFAULT; |
224 | goto free_buf; |
225 | } |
226 | |
227 | bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); |
228 | } else { |
229 | memset(buf, 0, len); |
230 | |
231 | /* |
232 | * We need to preserve the original bvec and the number of vecs |
233 | * in it for completion handling |
234 | */ |
235 | bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1); |
236 | } |
237 | |
238 | if (IS_ERR(ptr: bip)) { |
239 | ret = PTR_ERR(ptr: bip); |
240 | goto free_buf; |
241 | } |
242 | |
243 | if (write) |
244 | bio_integrity_unpin_bvec(bv: bvec, nr_vecs, dirty: false); |
245 | else |
246 | memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec)); |
247 | |
248 | ret = bio_integrity_add_page(bio, virt_to_page(buf), len, |
249 | offset_in_page(buf)); |
250 | if (ret != len) { |
251 | ret = -ENOMEM; |
252 | goto free_bip; |
253 | } |
254 | |
255 | bip->bip_flags |= BIP_INTEGRITY_USER | BIP_COPY_USER; |
256 | bip->bip_iter.bi_sector = seed; |
257 | return 0; |
258 | free_bip: |
259 | bio_integrity_free(bio); |
260 | free_buf: |
261 | kfree(objp: buf); |
262 | return ret; |
263 | } |
264 | |
265 | static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec, |
266 | int nr_vecs, unsigned int len, u32 seed) |
267 | { |
268 | struct bio_integrity_payload *bip; |
269 | |
270 | bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs); |
271 | if (IS_ERR(ptr: bip)) |
272 | return PTR_ERR(ptr: bip); |
273 | |
274 | memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec)); |
275 | bip->bip_flags |= BIP_INTEGRITY_USER; |
276 | bip->bip_iter.bi_sector = seed; |
277 | bip->bip_iter.bi_size = len; |
278 | return 0; |
279 | } |
280 | |
281 | static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages, |
282 | int nr_vecs, ssize_t bytes, ssize_t offset) |
283 | { |
284 | unsigned int nr_bvecs = 0; |
285 | int i, j; |
286 | |
287 | for (i = 0; i < nr_vecs; i = j) { |
288 | size_t size = min_t(size_t, bytes, PAGE_SIZE - offset); |
289 | struct folio *folio = page_folio(pages[i]); |
290 | |
291 | bytes -= size; |
292 | for (j = i + 1; j < nr_vecs; j++) { |
293 | size_t next = min_t(size_t, PAGE_SIZE, bytes); |
294 | |
295 | if (page_folio(pages[j]) != folio || |
296 | pages[j] != pages[j - 1] + 1) |
297 | break; |
298 | unpin_user_page(page: pages[j]); |
299 | size += next; |
300 | bytes -= next; |
301 | } |
302 | |
303 | bvec_set_page(bv: &bvec[nr_bvecs], page: pages[i], len: size, offset); |
304 | offset = 0; |
305 | nr_bvecs++; |
306 | } |
307 | |
308 | return nr_bvecs; |
309 | } |
310 | |
311 | int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes, |
312 | u32 seed) |
313 | { |
314 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
315 | unsigned int align = q->dma_pad_mask | queue_dma_alignment(q); |
316 | struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages; |
317 | struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec; |
318 | unsigned int direction, nr_bvecs; |
319 | struct iov_iter iter; |
320 | int ret, nr_vecs; |
321 | size_t offset; |
322 | bool copy; |
323 | |
324 | if (bio_integrity(bio)) |
325 | return -EINVAL; |
326 | if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q)) |
327 | return -E2BIG; |
328 | |
329 | if (bio_data_dir(bio) == READ) |
330 | direction = ITER_DEST; |
331 | else |
332 | direction = ITER_SOURCE; |
333 | |
334 | iov_iter_ubuf(i: &iter, direction, buf: ubuf, count: bytes); |
335 | nr_vecs = iov_iter_npages(i: &iter, BIO_MAX_VECS + 1); |
336 | if (nr_vecs > BIO_MAX_VECS) |
337 | return -E2BIG; |
338 | if (nr_vecs > UIO_FASTIOV) { |
339 | bvec = kcalloc(n: nr_vecs, size: sizeof(*bvec), GFP_KERNEL); |
340 | if (!bvec) |
341 | return -ENOMEM; |
342 | pages = NULL; |
343 | } |
344 | |
345 | copy = !iov_iter_is_aligned(i: &iter, addr_mask: align, len_mask: align); |
346 | ret = iov_iter_extract_pages(i: &iter, pages: &pages, maxsize: bytes, maxpages: nr_vecs, extraction_flags: 0, offset0: &offset); |
347 | if (unlikely(ret < 0)) |
348 | goto free_bvec; |
349 | |
350 | nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset); |
351 | if (pages != stack_pages) |
352 | kvfree(addr: pages); |
353 | if (nr_bvecs > queue_max_integrity_segments(q)) |
354 | copy = true; |
355 | |
356 | if (copy) |
357 | ret = bio_integrity_copy_user(bio, bvec, nr_vecs: nr_bvecs, len: bytes, |
358 | direction, seed); |
359 | else |
360 | ret = bio_integrity_init_user(bio, bvec, nr_vecs: nr_bvecs, len: bytes, seed); |
361 | if (ret) |
362 | goto release_pages; |
363 | if (bvec != stack_vec) |
364 | kfree(objp: bvec); |
365 | |
366 | return 0; |
367 | |
368 | release_pages: |
369 | bio_integrity_unpin_bvec(bv: bvec, nr_vecs: nr_bvecs, dirty: false); |
370 | free_bvec: |
371 | if (bvec != stack_vec) |
372 | kfree(objp: bvec); |
373 | return ret; |
374 | } |
375 | EXPORT_SYMBOL_GPL(bio_integrity_map_user); |
376 | |
377 | /** |
378 | * bio_integrity_process - Process integrity metadata for a bio |
379 | * @bio: bio to generate/verify integrity metadata for |
380 | * @proc_iter: iterator to process |
381 | * @proc_fn: Pointer to the relevant processing function |
382 | */ |
383 | static blk_status_t bio_integrity_process(struct bio *bio, |
384 | struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) |
385 | { |
386 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
387 | struct blk_integrity_iter iter; |
388 | struct bvec_iter bviter; |
389 | struct bio_vec bv; |
390 | struct bio_integrity_payload *bip = bio_integrity(bio); |
391 | blk_status_t ret = BLK_STS_OK; |
392 | |
393 | iter.disk_name = bio->bi_bdev->bd_disk->disk_name; |
394 | iter.interval = 1 << bi->interval_exp; |
395 | iter.tuple_size = bi->tuple_size; |
396 | iter.seed = proc_iter->bi_sector; |
397 | iter.prot_buf = bvec_virt(bvec: bip->bip_vec); |
398 | iter.pi_offset = bi->pi_offset; |
399 | |
400 | __bio_for_each_segment(bv, bio, bviter, *proc_iter) { |
401 | void *kaddr = bvec_kmap_local(bvec: &bv); |
402 | |
403 | iter.data_buf = kaddr; |
404 | iter.data_size = bv.bv_len; |
405 | ret = proc_fn(&iter); |
406 | kunmap_local(kaddr); |
407 | |
408 | if (ret) |
409 | break; |
410 | |
411 | } |
412 | return ret; |
413 | } |
414 | |
415 | /** |
416 | * bio_integrity_prep - Prepare bio for integrity I/O |
417 | * @bio: bio to prepare |
418 | * |
419 | * Description: Checks if the bio already has an integrity payload attached. |
420 | * If it does, the payload has been generated by another kernel subsystem, |
421 | * and we just pass it through. Otherwise allocates integrity payload. |
422 | * The bio must have data direction, target device and start sector set priot |
423 | * to calling. In the WRITE case, integrity metadata will be generated using |
424 | * the block device's integrity function. In the READ case, the buffer |
425 | * will be prepared for DMA and a suitable end_io handler set up. |
426 | */ |
427 | bool bio_integrity_prep(struct bio *bio) |
428 | { |
429 | struct bio_integrity_payload *bip; |
430 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
431 | void *buf; |
432 | unsigned long start, end; |
433 | unsigned int len, nr_pages; |
434 | unsigned int bytes, offset, i; |
435 | |
436 | if (!bi) |
437 | return true; |
438 | |
439 | if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) |
440 | return true; |
441 | |
442 | if (!bio_sectors(bio)) |
443 | return true; |
444 | |
445 | /* Already protected? */ |
446 | if (bio_integrity(bio)) |
447 | return true; |
448 | |
449 | if (bio_data_dir(bio) == READ) { |
450 | if (!bi->profile->verify_fn || |
451 | !(bi->flags & BLK_INTEGRITY_VERIFY)) |
452 | return true; |
453 | } else { |
454 | if (!bi->profile->generate_fn || |
455 | !(bi->flags & BLK_INTEGRITY_GENERATE)) |
456 | return true; |
457 | } |
458 | |
459 | /* Allocate kernel buffer for protection data */ |
460 | len = bio_integrity_bytes(bi, bio_sectors(bio)); |
461 | buf = kmalloc(size: len, GFP_NOIO); |
462 | if (unlikely(buf == NULL)) { |
463 | printk(KERN_ERR "could not allocate integrity buffer\n" ); |
464 | goto err_end_io; |
465 | } |
466 | |
467 | end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
468 | start = ((unsigned long) buf) >> PAGE_SHIFT; |
469 | nr_pages = end - start; |
470 | |
471 | /* Allocate bio integrity payload and integrity vectors */ |
472 | bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages); |
473 | if (IS_ERR(ptr: bip)) { |
474 | printk(KERN_ERR "could not allocate data integrity bioset\n" ); |
475 | kfree(objp: buf); |
476 | goto err_end_io; |
477 | } |
478 | |
479 | bip->bip_flags |= BIP_BLOCK_INTEGRITY; |
480 | bip_set_seed(bip, seed: bio->bi_iter.bi_sector); |
481 | |
482 | if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM) |
483 | bip->bip_flags |= BIP_IP_CHECKSUM; |
484 | |
485 | /* Map it */ |
486 | offset = offset_in_page(buf); |
487 | for (i = 0; i < nr_pages && len > 0; i++) { |
488 | bytes = PAGE_SIZE - offset; |
489 | |
490 | if (bytes > len) |
491 | bytes = len; |
492 | |
493 | if (bio_integrity_add_page(bio, virt_to_page(buf), |
494 | bytes, offset) < bytes) { |
495 | printk(KERN_ERR "could not attach integrity payload\n" ); |
496 | goto err_end_io; |
497 | } |
498 | |
499 | buf += bytes; |
500 | len -= bytes; |
501 | offset = 0; |
502 | } |
503 | |
504 | /* Auto-generate integrity metadata if this is a write */ |
505 | if (bio_data_dir(bio) == WRITE) { |
506 | bio_integrity_process(bio, proc_iter: &bio->bi_iter, |
507 | proc_fn: bi->profile->generate_fn); |
508 | } else { |
509 | bip->bio_iter = bio->bi_iter; |
510 | } |
511 | return true; |
512 | |
513 | err_end_io: |
514 | bio->bi_status = BLK_STS_RESOURCE; |
515 | bio_endio(bio); |
516 | return false; |
517 | } |
518 | EXPORT_SYMBOL(bio_integrity_prep); |
519 | |
520 | /** |
521 | * bio_integrity_verify_fn - Integrity I/O completion worker |
522 | * @work: Work struct stored in bio to be verified |
523 | * |
524 | * Description: This workqueue function is called to complete a READ |
525 | * request. The function verifies the transferred integrity metadata |
526 | * and then calls the original bio end_io function. |
527 | */ |
528 | static void bio_integrity_verify_fn(struct work_struct *work) |
529 | { |
530 | struct bio_integrity_payload *bip = |
531 | container_of(work, struct bio_integrity_payload, bip_work); |
532 | struct bio *bio = bip->bip_bio; |
533 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
534 | |
535 | /* |
536 | * At the moment verify is called bio's iterator was advanced |
537 | * during split and completion, we need to rewind iterator to |
538 | * it's original position. |
539 | */ |
540 | bio->bi_status = bio_integrity_process(bio, proc_iter: &bip->bio_iter, |
541 | proc_fn: bi->profile->verify_fn); |
542 | bio_integrity_free(bio); |
543 | bio_endio(bio); |
544 | } |
545 | |
546 | /** |
547 | * __bio_integrity_endio - Integrity I/O completion function |
548 | * @bio: Protected bio |
549 | * |
550 | * Description: Completion for integrity I/O |
551 | * |
552 | * Normally I/O completion is done in interrupt context. However, |
553 | * verifying I/O integrity is a time-consuming task which must be run |
554 | * in process context. This function postpones completion |
555 | * accordingly. |
556 | */ |
557 | bool __bio_integrity_endio(struct bio *bio) |
558 | { |
559 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
560 | struct bio_integrity_payload *bip = bio_integrity(bio); |
561 | |
562 | if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && |
563 | (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) { |
564 | INIT_WORK(&bip->bip_work, bio_integrity_verify_fn); |
565 | queue_work(wq: kintegrityd_wq, work: &bip->bip_work); |
566 | return false; |
567 | } |
568 | |
569 | bio_integrity_free(bio); |
570 | return true; |
571 | } |
572 | |
573 | /** |
574 | * bio_integrity_advance - Advance integrity vector |
575 | * @bio: bio whose integrity vector to update |
576 | * @bytes_done: number of data bytes that have been completed |
577 | * |
578 | * Description: This function calculates how many integrity bytes the |
579 | * number of completed data bytes correspond to and advances the |
580 | * integrity vector accordingly. |
581 | */ |
582 | void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) |
583 | { |
584 | struct bio_integrity_payload *bip = bio_integrity(bio); |
585 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
586 | unsigned bytes = bio_integrity_bytes(bi, sectors: bytes_done >> 9); |
587 | |
588 | bip->bip_iter.bi_sector += bio_integrity_intervals(bi, sectors: bytes_done >> 9); |
589 | bvec_iter_advance(bv: bip->bip_vec, iter: &bip->bip_iter, bytes); |
590 | } |
591 | |
592 | /** |
593 | * bio_integrity_trim - Trim integrity vector |
594 | * @bio: bio whose integrity vector to update |
595 | * |
596 | * Description: Used to trim the integrity vector in a cloned bio. |
597 | */ |
598 | void bio_integrity_trim(struct bio *bio) |
599 | { |
600 | struct bio_integrity_payload *bip = bio_integrity(bio); |
601 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
602 | |
603 | bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); |
604 | } |
605 | EXPORT_SYMBOL(bio_integrity_trim); |
606 | |
607 | /** |
608 | * bio_integrity_clone - Callback for cloning bios with integrity metadata |
609 | * @bio: New bio |
610 | * @bio_src: Original bio |
611 | * @gfp_mask: Memory allocation mask |
612 | * |
613 | * Description: Called to allocate a bip when cloning a bio |
614 | */ |
615 | int bio_integrity_clone(struct bio *bio, struct bio *bio_src, |
616 | gfp_t gfp_mask) |
617 | { |
618 | struct bio_integrity_payload *bip_src = bio_integrity(bio: bio_src); |
619 | struct bio_integrity_payload *bip; |
620 | |
621 | BUG_ON(bip_src == NULL); |
622 | |
623 | bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt); |
624 | if (IS_ERR(ptr: bip)) |
625 | return PTR_ERR(ptr: bip); |
626 | |
627 | memcpy(bip->bip_vec, bip_src->bip_vec, |
628 | bip_src->bip_vcnt * sizeof(struct bio_vec)); |
629 | |
630 | bip->bip_vcnt = bip_src->bip_vcnt; |
631 | bip->bip_iter = bip_src->bip_iter; |
632 | bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY; |
633 | |
634 | return 0; |
635 | } |
636 | |
637 | int bioset_integrity_create(struct bio_set *bs, int pool_size) |
638 | { |
639 | if (mempool_initialized(pool: &bs->bio_integrity_pool)) |
640 | return 0; |
641 | |
642 | if (mempool_init_slab_pool(pool: &bs->bio_integrity_pool, |
643 | min_nr: pool_size, kc: bip_slab)) |
644 | return -1; |
645 | |
646 | if (biovec_init_pool(pool: &bs->bvec_integrity_pool, pool_entries: pool_size)) { |
647 | mempool_exit(pool: &bs->bio_integrity_pool); |
648 | return -1; |
649 | } |
650 | |
651 | return 0; |
652 | } |
653 | EXPORT_SYMBOL(bioset_integrity_create); |
654 | |
655 | void bioset_integrity_free(struct bio_set *bs) |
656 | { |
657 | mempool_exit(pool: &bs->bio_integrity_pool); |
658 | mempool_exit(pool: &bs->bvec_integrity_pool); |
659 | } |
660 | |
661 | void __init bio_integrity_init(void) |
662 | { |
663 | /* |
664 | * kintegrityd won't block much but may burn a lot of CPU cycles. |
665 | * Make it highpri CPU intensive wq with max concurrency of 1. |
666 | */ |
667 | kintegrityd_wq = alloc_workqueue(fmt: "kintegrityd" , flags: WQ_MEM_RECLAIM | |
668 | WQ_HIGHPRI | WQ_CPU_INTENSIVE, max_active: 1); |
669 | if (!kintegrityd_wq) |
670 | panic(fmt: "Failed to create kintegrityd\n" ); |
671 | |
672 | bip_slab = kmem_cache_create(name: "bio_integrity_payload" , |
673 | size: sizeof(struct bio_integrity_payload) + |
674 | sizeof(struct bio_vec) * BIO_INLINE_VECS, |
675 | align: 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
676 | } |
677 | |