1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright 2022 Red Hat, Inc. |
4 | */ |
5 | |
6 | #include <linux/bio.h> |
7 | #include <linux/blk-crypto.h> |
8 | #include <linux/blk-integrity.h> |
9 | |
10 | #include "dm-core.h" |
11 | |
12 | static inline bool dm_bvec_iter_rewind(const struct bio_vec *bv, |
13 | struct bvec_iter *iter, |
14 | unsigned int bytes) |
15 | { |
16 | int idx; |
17 | |
18 | iter->bi_size += bytes; |
19 | if (bytes <= iter->bi_bvec_done) { |
20 | iter->bi_bvec_done -= bytes; |
21 | return true; |
22 | } |
23 | |
24 | bytes -= iter->bi_bvec_done; |
25 | idx = iter->bi_idx - 1; |
26 | |
27 | while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { |
28 | bytes -= bv[idx].bv_len; |
29 | idx--; |
30 | } |
31 | |
32 | if (WARN_ONCE(idx < 0 && bytes, |
33 | "Attempted to rewind iter beyond bvec's boundaries\n" )) { |
34 | iter->bi_size -= bytes; |
35 | iter->bi_bvec_done = 0; |
36 | iter->bi_idx = 0; |
37 | return false; |
38 | } |
39 | |
40 | iter->bi_idx = idx; |
41 | iter->bi_bvec_done = bv[idx].bv_len - bytes; |
42 | return true; |
43 | } |
44 | |
45 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
46 | |
47 | /** |
48 | * dm_bio_integrity_rewind - Rewind integrity vector |
49 | * @bio: bio whose integrity vector to update |
50 | * @bytes_done: number of data bytes to rewind |
51 | * |
52 | * Description: This function calculates how many integrity bytes the |
53 | * number of completed data bytes correspond to and rewind the |
54 | * integrity vector accordingly. |
55 | */ |
56 | static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done) |
57 | { |
58 | struct bio_integrity_payload *bip = bio_integrity(bio); |
59 | struct blk_integrity *bi = blk_get_integrity(disk: bio->bi_bdev->bd_disk); |
60 | unsigned int bytes = bio_integrity_bytes(bi, sectors: bytes_done >> 9); |
61 | |
62 | bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, sectors: bytes_done >> 9); |
63 | dm_bvec_iter_rewind(bv: bip->bip_vec, iter: &bip->bip_iter, bytes); |
64 | } |
65 | |
66 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
67 | |
68 | static inline void dm_bio_integrity_rewind(struct bio *bio, |
69 | unsigned int bytes_done) |
70 | { |
71 | } |
72 | |
73 | #endif |
74 | |
75 | #if defined(CONFIG_BLK_INLINE_ENCRYPTION) |
76 | |
77 | /* Decrements @dun by @dec, treating @dun as a multi-limb integer. */ |
78 | static void dm_bio_crypt_dun_decrement(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], |
79 | unsigned int dec) |
80 | { |
81 | int i; |
82 | |
83 | for (i = 0; dec && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) { |
84 | u64 prev = dun[i]; |
85 | |
86 | dun[i] -= dec; |
87 | if (dun[i] > prev) |
88 | dec = 1; |
89 | else |
90 | dec = 0; |
91 | } |
92 | } |
93 | |
94 | static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) |
95 | { |
96 | struct bio_crypt_ctx *bc = bio->bi_crypt_context; |
97 | |
98 | dm_bio_crypt_dun_decrement(dun: bc->bc_dun, |
99 | dec: bytes >> bc->bc_key->data_unit_size_bits); |
100 | } |
101 | |
102 | #else /* CONFIG_BLK_INLINE_ENCRYPTION */ |
103 | |
104 | static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes) |
105 | { |
106 | } |
107 | |
108 | #endif |
109 | |
110 | static inline void dm_bio_rewind_iter(const struct bio *bio, |
111 | struct bvec_iter *iter, unsigned int bytes) |
112 | { |
113 | iter->bi_sector -= bytes >> 9; |
114 | |
115 | /* No advance means no rewind */ |
116 | if (bio_no_advance_iter(bio)) |
117 | iter->bi_size += bytes; |
118 | else |
119 | dm_bvec_iter_rewind(bv: bio->bi_io_vec, iter, bytes); |
120 | } |
121 | |
122 | /** |
123 | * dm_bio_rewind - update ->bi_iter of @bio by rewinding @bytes. |
124 | * @bio: bio to rewind |
125 | * @bytes: how many bytes to rewind |
126 | * |
127 | * WARNING: |
128 | * Caller must ensure that @bio has a fixed end sector, to allow |
129 | * rewinding from end of bio and restoring its original position. |
130 | * Caller is also responsibile for restoring bio's size. |
131 | */ |
132 | static void dm_bio_rewind(struct bio *bio, unsigned int bytes) |
133 | { |
134 | if (bio_integrity(bio)) |
135 | dm_bio_integrity_rewind(bio, bytes_done: bytes); |
136 | |
137 | if (bio_has_crypt_ctx(bio)) |
138 | dm_bio_crypt_rewind(bio, bytes); |
139 | |
140 | dm_bio_rewind_iter(bio, iter: &bio->bi_iter, bytes); |
141 | } |
142 | |
143 | void dm_io_rewind(struct dm_io *io, struct bio_set *bs) |
144 | { |
145 | struct bio *orig = io->orig_bio; |
146 | struct bio *new_orig = bio_alloc_clone(bdev: orig->bi_bdev, bio_src: orig, |
147 | GFP_NOIO, bs); |
148 | /* |
149 | * dm_bio_rewind can restore to previous position since the |
150 | * end sector is fixed for original bio, but we still need |
151 | * to restore bio's size manually (using io->sectors). |
152 | */ |
153 | dm_bio_rewind(bio: new_orig, bytes: ((io->sector_offset << 9) - |
154 | orig->bi_iter.bi_size)); |
155 | bio_trim(bio: new_orig, offset: 0, size: io->sectors); |
156 | |
157 | bio_chain(new_orig, orig); |
158 | /* |
159 | * __bi_remaining was increased (by dm_split_and_process_bio), |
160 | * so must drop the one added in bio_chain. |
161 | */ |
162 | atomic_dec(v: &orig->__bi_remaining); |
163 | io->orig_bio = new_orig; |
164 | } |
165 | |