1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved. |
4 | * Copyright (C) 2016-2017 Milan Broz |
5 | * Copyright (C) 2016-2017 Mikulas Patocka |
6 | * |
7 | * This file is released under the GPL. |
8 | */ |
9 | |
10 | #include "dm-bio-record.h" |
11 | |
12 | #include <linux/compiler.h> |
13 | #include <linux/module.h> |
14 | #include <linux/device-mapper.h> |
15 | #include <linux/dm-io.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/sort.h> |
18 | #include <linux/rbtree.h> |
19 | #include <linux/delay.h> |
20 | #include <linux/random.h> |
21 | #include <linux/reboot.h> |
22 | #include <crypto/hash.h> |
23 | #include <crypto/skcipher.h> |
24 | #include <linux/async_tx.h> |
25 | #include <linux/dm-bufio.h> |
26 | |
27 | #include "dm-audit.h" |
28 | |
29 | #define DM_MSG_PREFIX "integrity" |
30 | |
31 | #define DEFAULT_INTERLEAVE_SECTORS 32768 |
32 | #define DEFAULT_JOURNAL_SIZE_FACTOR 7 |
33 | #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768 |
34 | #define DEFAULT_BUFFER_SECTORS 128 |
35 | #define DEFAULT_JOURNAL_WATERMARK 50 |
36 | #define DEFAULT_SYNC_MSEC 10000 |
37 | #define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192) |
38 | #define MIN_LOG2_INTERLEAVE_SECTORS 3 |
39 | #define MAX_LOG2_INTERLEAVE_SECTORS 31 |
40 | #define METADATA_WORKQUEUE_MAX_ACTIVE 16 |
41 | #define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048) |
42 | #define RECALC_WRITE_SUPER 16 |
43 | #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ |
44 | #define BITMAP_FLUSH_INTERVAL (10 * HZ) |
45 | #define DISCARD_FILLER 0xf6 |
46 | #define SALT_SIZE 16 |
47 | |
48 | /* |
49 | * Warning - DEBUG_PRINT prints security-sensitive data to the log, |
50 | * so it should not be enabled in the official kernel |
51 | */ |
52 | //#define DEBUG_PRINT |
53 | //#define INTERNAL_VERIFY |
54 | |
55 | /* |
56 | * On disk structures |
57 | */ |
58 | |
59 | #define SB_MAGIC "integrt" |
60 | #define SB_VERSION_1 1 |
61 | #define SB_VERSION_2 2 |
62 | #define SB_VERSION_3 3 |
63 | #define SB_VERSION_4 4 |
64 | #define SB_VERSION_5 5 |
65 | #define SB_SECTORS 8 |
66 | #define MAX_SECTORS_PER_BLOCK 8 |
67 | |
68 | struct superblock { |
69 | __u8 magic[8]; |
70 | __u8 version; |
71 | __u8 log2_interleave_sectors; |
72 | __le16 integrity_tag_size; |
73 | __le32 journal_sections; |
74 | __le64 provided_data_sectors; /* userspace uses this value */ |
75 | __le32 flags; |
76 | __u8 log2_sectors_per_block; |
77 | __u8 log2_blocks_per_bitmap_bit; |
78 | __u8 pad[2]; |
79 | __le64 recalc_sector; |
80 | __u8 pad2[8]; |
81 | __u8 salt[SALT_SIZE]; |
82 | }; |
83 | |
84 | #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 |
85 | #define SB_FLAG_RECALCULATING 0x2 |
86 | #define SB_FLAG_DIRTY_BITMAP 0x4 |
87 | #define SB_FLAG_FIXED_PADDING 0x8 |
88 | #define SB_FLAG_FIXED_HMAC 0x10 |
89 | |
90 | #define JOURNAL_ENTRY_ROUNDUP 8 |
91 | |
92 | typedef __le64 commit_id_t; |
93 | #define JOURNAL_MAC_PER_SECTOR 8 |
94 | |
95 | struct journal_entry { |
96 | union { |
97 | struct { |
98 | __le32 sector_lo; |
99 | __le32 sector_hi; |
100 | } s; |
101 | __le64 sector; |
102 | } u; |
103 | commit_id_t last_bytes[]; |
104 | /* __u8 tag[0]; */ |
105 | }; |
106 | |
107 | #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) |
108 | |
109 | #if BITS_PER_LONG == 64 |
110 | #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) |
111 | #else |
112 | #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) |
113 | #endif |
114 | #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) |
115 | #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) |
116 | #define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1)) |
117 | #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2)) |
118 | #define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2)) |
119 | |
120 | #define JOURNAL_BLOCK_SECTORS 8 |
121 | #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t)) |
122 | #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS) |
123 | |
124 | struct journal_sector { |
125 | struct_group(sectors, |
126 | __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR]; |
127 | __u8 mac[JOURNAL_MAC_PER_SECTOR]; |
128 | ); |
129 | commit_id_t commit_id; |
130 | }; |
131 | |
132 | #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK])) |
133 | |
134 | #define METADATA_PADDING_SECTORS 8 |
135 | |
136 | #define N_COMMIT_IDS 4 |
137 | |
138 | static unsigned char prev_commit_seq(unsigned char seq) |
139 | { |
140 | return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS; |
141 | } |
142 | |
143 | static unsigned char next_commit_seq(unsigned char seq) |
144 | { |
145 | return (seq + 1) % N_COMMIT_IDS; |
146 | } |
147 | |
148 | /* |
149 | * In-memory structures |
150 | */ |
151 | |
152 | struct journal_node { |
153 | struct rb_node node; |
154 | sector_t sector; |
155 | }; |
156 | |
157 | struct alg_spec { |
158 | char *alg_string; |
159 | char *key_string; |
160 | __u8 *key; |
161 | unsigned int key_size; |
162 | }; |
163 | |
164 | struct dm_integrity_c { |
165 | struct dm_dev *dev; |
166 | struct dm_dev *meta_dev; |
167 | unsigned int tag_size; |
168 | __s8 log2_tag_size; |
169 | sector_t start; |
170 | mempool_t journal_io_mempool; |
171 | struct dm_io_client *io; |
172 | struct dm_bufio_client *bufio; |
173 | struct workqueue_struct *metadata_wq; |
174 | struct superblock *sb; |
175 | unsigned int journal_pages; |
176 | unsigned int n_bitmap_blocks; |
177 | |
178 | struct page_list *journal; |
179 | struct page_list *journal_io; |
180 | struct page_list *journal_xor; |
181 | struct page_list *recalc_bitmap; |
182 | struct page_list *may_write_bitmap; |
183 | struct bitmap_block_status *bbs; |
184 | unsigned int bitmap_flush_interval; |
185 | int synchronous_mode; |
186 | struct bio_list synchronous_bios; |
187 | struct delayed_work bitmap_flush_work; |
188 | |
189 | struct crypto_skcipher *journal_crypt; |
190 | struct scatterlist **journal_scatterlist; |
191 | struct scatterlist **journal_io_scatterlist; |
192 | struct skcipher_request **sk_requests; |
193 | |
194 | struct crypto_shash *journal_mac; |
195 | |
196 | struct journal_node *journal_tree; |
197 | struct rb_root journal_tree_root; |
198 | |
199 | sector_t provided_data_sectors; |
200 | |
201 | unsigned short journal_entry_size; |
202 | unsigned char journal_entries_per_sector; |
203 | unsigned char journal_section_entries; |
204 | unsigned short journal_section_sectors; |
205 | unsigned int journal_sections; |
206 | unsigned int journal_entries; |
207 | sector_t data_device_sectors; |
208 | sector_t meta_device_sectors; |
209 | unsigned int initial_sectors; |
210 | unsigned int metadata_run; |
211 | __s8 log2_metadata_run; |
212 | __u8 log2_buffer_sectors; |
213 | __u8 sectors_per_block; |
214 | __u8 log2_blocks_per_bitmap_bit; |
215 | |
216 | unsigned char mode; |
217 | |
218 | int failed; |
219 | |
220 | struct crypto_shash *internal_hash; |
221 | |
222 | struct dm_target *ti; |
223 | |
224 | /* these variables are locked with endio_wait.lock */ |
225 | struct rb_root in_progress; |
226 | struct list_head wait_list; |
227 | wait_queue_head_t endio_wait; |
228 | struct workqueue_struct *wait_wq; |
229 | struct workqueue_struct *offload_wq; |
230 | |
231 | unsigned char commit_seq; |
232 | commit_id_t commit_ids[N_COMMIT_IDS]; |
233 | |
234 | unsigned int committed_section; |
235 | unsigned int n_committed_sections; |
236 | |
237 | unsigned int uncommitted_section; |
238 | unsigned int n_uncommitted_sections; |
239 | |
240 | unsigned int free_section; |
241 | unsigned char free_section_entry; |
242 | unsigned int free_sectors; |
243 | |
244 | unsigned int free_sectors_threshold; |
245 | |
246 | struct workqueue_struct *commit_wq; |
247 | struct work_struct commit_work; |
248 | |
249 | struct workqueue_struct *writer_wq; |
250 | struct work_struct writer_work; |
251 | |
252 | struct workqueue_struct *recalc_wq; |
253 | struct work_struct recalc_work; |
254 | |
255 | struct bio_list flush_bio_list; |
256 | |
257 | unsigned long autocommit_jiffies; |
258 | struct timer_list autocommit_timer; |
259 | unsigned int autocommit_msec; |
260 | |
261 | wait_queue_head_t copy_to_journal_wait; |
262 | |
263 | struct completion crypto_backoff; |
264 | |
265 | bool wrote_to_journal; |
266 | bool journal_uptodate; |
267 | bool just_formatted; |
268 | bool recalculate_flag; |
269 | bool reset_recalculate_flag; |
270 | bool discard; |
271 | bool fix_padding; |
272 | bool fix_hmac; |
273 | bool legacy_recalculate; |
274 | |
275 | struct alg_spec internal_hash_alg; |
276 | struct alg_spec journal_crypt_alg; |
277 | struct alg_spec journal_mac_alg; |
278 | |
279 | atomic64_t number_of_mismatches; |
280 | |
281 | mempool_t recheck_pool; |
282 | |
283 | struct notifier_block reboot_notifier; |
284 | }; |
285 | |
286 | struct dm_integrity_range { |
287 | sector_t logical_sector; |
288 | sector_t n_sectors; |
289 | bool waiting; |
290 | union { |
291 | struct rb_node node; |
292 | struct { |
293 | struct task_struct *task; |
294 | struct list_head wait_entry; |
295 | }; |
296 | }; |
297 | }; |
298 | |
299 | struct dm_integrity_io { |
300 | struct work_struct work; |
301 | |
302 | struct dm_integrity_c *ic; |
303 | enum req_op op; |
304 | bool fua; |
305 | |
306 | struct dm_integrity_range range; |
307 | |
308 | sector_t metadata_block; |
309 | unsigned int metadata_offset; |
310 | |
311 | atomic_t in_flight; |
312 | blk_status_t bi_status; |
313 | |
314 | struct completion *completion; |
315 | |
316 | struct dm_bio_details bio_details; |
317 | }; |
318 | |
319 | struct journal_completion { |
320 | struct dm_integrity_c *ic; |
321 | atomic_t in_flight; |
322 | struct completion comp; |
323 | }; |
324 | |
325 | struct journal_io { |
326 | struct dm_integrity_range range; |
327 | struct journal_completion *comp; |
328 | }; |
329 | |
330 | struct bitmap_block_status { |
331 | struct work_struct work; |
332 | struct dm_integrity_c *ic; |
333 | unsigned int idx; |
334 | unsigned long *bitmap; |
335 | struct bio_list bio_queue; |
336 | spinlock_t bio_queue_lock; |
337 | |
338 | }; |
339 | |
340 | static struct kmem_cache *journal_io_cache; |
341 | |
342 | #define JOURNAL_IO_MEMPOOL 32 |
343 | |
344 | #ifdef DEBUG_PRINT |
345 | #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__) |
346 | #define DEBUG_bytes(bytes, len, msg, ...) printk(KERN_DEBUG msg "%s%*ph\n", ##__VA_ARGS__, \ |
347 | len ? ": " : "", len, bytes) |
348 | #else |
349 | #define DEBUG_print(x, ...) do { } while (0) |
350 | #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0) |
351 | #endif |
352 | |
353 | static void dm_integrity_prepare(struct request *rq) |
354 | { |
355 | } |
356 | |
357 | static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes) |
358 | { |
359 | } |
360 | |
361 | /* |
362 | * DM Integrity profile, protection is performed layer above (dm-crypt) |
363 | */ |
364 | static const struct blk_integrity_profile dm_integrity_profile = { |
365 | .name = "DM-DIF-EXT-TAG" , |
366 | .generate_fn = NULL, |
367 | .verify_fn = NULL, |
368 | .prepare_fn = dm_integrity_prepare, |
369 | .complete_fn = dm_integrity_complete, |
370 | }; |
371 | |
372 | static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map); |
373 | static void integrity_bio_wait(struct work_struct *w); |
374 | static void dm_integrity_dtr(struct dm_target *ti); |
375 | |
376 | static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) |
377 | { |
378 | if (err == -EILSEQ) |
379 | atomic64_inc(v: &ic->number_of_mismatches); |
380 | if (!cmpxchg(&ic->failed, 0, err)) |
381 | DMERR("Error on %s: %d" , msg, err); |
382 | } |
383 | |
384 | static int dm_integrity_failed(struct dm_integrity_c *ic) |
385 | { |
386 | return READ_ONCE(ic->failed); |
387 | } |
388 | |
389 | static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) |
390 | { |
391 | if (ic->legacy_recalculate) |
392 | return false; |
393 | if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? |
394 | ic->internal_hash_alg.key || ic->journal_mac_alg.key : |
395 | ic->internal_hash_alg.key && !ic->journal_mac_alg.key) |
396 | return true; |
397 | return false; |
398 | } |
399 | |
400 | static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i, |
401 | unsigned int j, unsigned char seq) |
402 | { |
403 | /* |
404 | * Xor the number with section and sector, so that if a piece of |
405 | * journal is written at wrong place, it is detected. |
406 | */ |
407 | return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); |
408 | } |
409 | |
410 | static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, |
411 | sector_t *area, sector_t *offset) |
412 | { |
413 | if (!ic->meta_dev) { |
414 | __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; |
415 | *area = data_sector >> log2_interleave_sectors; |
416 | *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1); |
417 | } else { |
418 | *area = 0; |
419 | *offset = data_sector; |
420 | } |
421 | } |
422 | |
423 | #define sector_to_block(ic, n) \ |
424 | do { \ |
425 | BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \ |
426 | (n) >>= (ic)->sb->log2_sectors_per_block; \ |
427 | } while (0) |
428 | |
429 | static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, |
430 | sector_t offset, unsigned int *metadata_offset) |
431 | { |
432 | __u64 ms; |
433 | unsigned int mo; |
434 | |
435 | ms = area << ic->sb->log2_interleave_sectors; |
436 | if (likely(ic->log2_metadata_run >= 0)) |
437 | ms += area << ic->log2_metadata_run; |
438 | else |
439 | ms += area * ic->metadata_run; |
440 | ms >>= ic->log2_buffer_sectors; |
441 | |
442 | sector_to_block(ic, offset); |
443 | |
444 | if (likely(ic->log2_tag_size >= 0)) { |
445 | ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); |
446 | mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); |
447 | } else { |
448 | ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); |
449 | mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); |
450 | } |
451 | *metadata_offset = mo; |
452 | return ms; |
453 | } |
454 | |
455 | static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) |
456 | { |
457 | sector_t result; |
458 | |
459 | if (ic->meta_dev) |
460 | return offset; |
461 | |
462 | result = area << ic->sb->log2_interleave_sectors; |
463 | if (likely(ic->log2_metadata_run >= 0)) |
464 | result += (area + 1) << ic->log2_metadata_run; |
465 | else |
466 | result += (area + 1) * ic->metadata_run; |
467 | |
468 | result += (sector_t)ic->initial_sectors + offset; |
469 | result += ic->start; |
470 | |
471 | return result; |
472 | } |
473 | |
474 | static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr) |
475 | { |
476 | if (unlikely(*sec_ptr >= ic->journal_sections)) |
477 | *sec_ptr -= ic->journal_sections; |
478 | } |
479 | |
480 | static void sb_set_version(struct dm_integrity_c *ic) |
481 | { |
482 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) |
483 | ic->sb->version = SB_VERSION_5; |
484 | else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) |
485 | ic->sb->version = SB_VERSION_4; |
486 | else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) |
487 | ic->sb->version = SB_VERSION_3; |
488 | else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) |
489 | ic->sb->version = SB_VERSION_2; |
490 | else |
491 | ic->sb->version = SB_VERSION_1; |
492 | } |
493 | |
494 | static int sb_mac(struct dm_integrity_c *ic, bool wr) |
495 | { |
496 | SHASH_DESC_ON_STACK(desc, ic->journal_mac); |
497 | int r; |
498 | unsigned int mac_size = crypto_shash_digestsize(tfm: ic->journal_mac); |
499 | __u8 *sb = (__u8 *)ic->sb; |
500 | __u8 *mac = sb + (1 << SECTOR_SHIFT) - mac_size; |
501 | |
502 | if (sizeof(struct superblock) + mac_size > 1 << SECTOR_SHIFT) { |
503 | dm_integrity_io_error(ic, msg: "digest is too long" , err: -EINVAL); |
504 | return -EINVAL; |
505 | } |
506 | |
507 | desc->tfm = ic->journal_mac; |
508 | |
509 | if (likely(wr)) { |
510 | r = crypto_shash_digest(desc, data: sb, len: mac - sb, out: mac); |
511 | if (unlikely(r < 0)) { |
512 | dm_integrity_io_error(ic, msg: "crypto_shash_digest" , err: r); |
513 | return r; |
514 | } |
515 | } else { |
516 | __u8 actual_mac[HASH_MAX_DIGESTSIZE]; |
517 | |
518 | r = crypto_shash_digest(desc, data: sb, len: mac - sb, out: actual_mac); |
519 | if (unlikely(r < 0)) { |
520 | dm_integrity_io_error(ic, msg: "crypto_shash_digest" , err: r); |
521 | return r; |
522 | } |
523 | if (memcmp(p: mac, q: actual_mac, size: mac_size)) { |
524 | dm_integrity_io_error(ic, msg: "superblock mac" , err: -EILSEQ); |
525 | dm_audit_log_target(DM_MSG_PREFIX, op: "mac-superblock" , ti: ic->ti, result: 0); |
526 | return -EILSEQ; |
527 | } |
528 | } |
529 | |
530 | return 0; |
531 | } |
532 | |
533 | static int sync_rw_sb(struct dm_integrity_c *ic, blk_opf_t opf) |
534 | { |
535 | struct dm_io_request io_req; |
536 | struct dm_io_region io_loc; |
537 | const enum req_op op = opf & REQ_OP_MASK; |
538 | int r; |
539 | |
540 | io_req.bi_opf = opf; |
541 | io_req.mem.type = DM_IO_KMEM; |
542 | io_req.mem.ptr.addr = ic->sb; |
543 | io_req.notify.fn = NULL; |
544 | io_req.client = ic->io; |
545 | io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; |
546 | io_loc.sector = ic->start; |
547 | io_loc.count = SB_SECTORS; |
548 | |
549 | if (op == REQ_OP_WRITE) { |
550 | sb_set_version(ic); |
551 | if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
552 | r = sb_mac(ic, wr: true); |
553 | if (unlikely(r)) |
554 | return r; |
555 | } |
556 | } |
557 | |
558 | r = dm_io(io_req: &io_req, num_regions: 1, region: &io_loc, NULL, IOPRIO_DEFAULT); |
559 | if (unlikely(r)) |
560 | return r; |
561 | |
562 | if (op == REQ_OP_READ) { |
563 | if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
564 | r = sb_mac(ic, wr: false); |
565 | if (unlikely(r)) |
566 | return r; |
567 | } |
568 | } |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | #define BITMAP_OP_TEST_ALL_SET 0 |
574 | #define BITMAP_OP_TEST_ALL_CLEAR 1 |
575 | #define BITMAP_OP_SET 2 |
576 | #define BITMAP_OP_CLEAR 3 |
577 | |
578 | static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, |
579 | sector_t sector, sector_t n_sectors, int mode) |
580 | { |
581 | unsigned long bit, end_bit, this_end_bit, page, end_page; |
582 | unsigned long *data; |
583 | |
584 | if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { |
585 | DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)" , |
586 | sector, |
587 | n_sectors, |
588 | ic->sb->log2_sectors_per_block, |
589 | ic->log2_blocks_per_bitmap_bit, |
590 | mode); |
591 | BUG(); |
592 | } |
593 | |
594 | if (unlikely(!n_sectors)) |
595 | return true; |
596 | |
597 | bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
598 | end_bit = (sector + n_sectors - 1) >> |
599 | (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
600 | |
601 | page = bit / (PAGE_SIZE * 8); |
602 | bit %= PAGE_SIZE * 8; |
603 | |
604 | end_page = end_bit / (PAGE_SIZE * 8); |
605 | end_bit %= PAGE_SIZE * 8; |
606 | |
607 | repeat: |
608 | if (page < end_page) |
609 | this_end_bit = PAGE_SIZE * 8 - 1; |
610 | else |
611 | this_end_bit = end_bit; |
612 | |
613 | data = lowmem_page_address(page: bitmap[page].page); |
614 | |
615 | if (mode == BITMAP_OP_TEST_ALL_SET) { |
616 | while (bit <= this_end_bit) { |
617 | if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
618 | do { |
619 | if (data[bit / BITS_PER_LONG] != -1) |
620 | return false; |
621 | bit += BITS_PER_LONG; |
622 | } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
623 | continue; |
624 | } |
625 | if (!test_bit(bit, data)) |
626 | return false; |
627 | bit++; |
628 | } |
629 | } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) { |
630 | while (bit <= this_end_bit) { |
631 | if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
632 | do { |
633 | if (data[bit / BITS_PER_LONG] != 0) |
634 | return false; |
635 | bit += BITS_PER_LONG; |
636 | } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
637 | continue; |
638 | } |
639 | if (test_bit(bit, data)) |
640 | return false; |
641 | bit++; |
642 | } |
643 | } else if (mode == BITMAP_OP_SET) { |
644 | while (bit <= this_end_bit) { |
645 | if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
646 | do { |
647 | data[bit / BITS_PER_LONG] = -1; |
648 | bit += BITS_PER_LONG; |
649 | } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
650 | continue; |
651 | } |
652 | __set_bit(bit, data); |
653 | bit++; |
654 | } |
655 | } else if (mode == BITMAP_OP_CLEAR) { |
656 | if (!bit && this_end_bit == PAGE_SIZE * 8 - 1) |
657 | clear_page(page: data); |
658 | else { |
659 | while (bit <= this_end_bit) { |
660 | if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) { |
661 | do { |
662 | data[bit / BITS_PER_LONG] = 0; |
663 | bit += BITS_PER_LONG; |
664 | } while (this_end_bit >= bit + BITS_PER_LONG - 1); |
665 | continue; |
666 | } |
667 | __clear_bit(bit, data); |
668 | bit++; |
669 | } |
670 | } |
671 | } else { |
672 | BUG(); |
673 | } |
674 | |
675 | if (unlikely(page < end_page)) { |
676 | bit = 0; |
677 | page++; |
678 | goto repeat; |
679 | } |
680 | |
681 | return true; |
682 | } |
683 | |
684 | static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) |
685 | { |
686 | unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); |
687 | unsigned int i; |
688 | |
689 | for (i = 0; i < n_bitmap_pages; i++) { |
690 | unsigned long *dst_data = lowmem_page_address(page: dst[i].page); |
691 | unsigned long *src_data = lowmem_page_address(page: src[i].page); |
692 | |
693 | copy_page(to: dst_data, from: src_data); |
694 | } |
695 | } |
696 | |
697 | static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) |
698 | { |
699 | unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
700 | unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8); |
701 | |
702 | BUG_ON(bitmap_block >= ic->n_bitmap_blocks); |
703 | return &ic->bbs[bitmap_block]; |
704 | } |
705 | |
706 | static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, |
707 | bool e, const char *function) |
708 | { |
709 | #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY) |
710 | unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors; |
711 | |
712 | if (unlikely(section >= ic->journal_sections) || |
713 | unlikely(offset >= limit)) { |
714 | DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)" , |
715 | function, section, offset, ic->journal_sections, limit); |
716 | BUG(); |
717 | } |
718 | #endif |
719 | } |
720 | |
721 | static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, |
722 | unsigned int *pl_index, unsigned int *pl_offset) |
723 | { |
724 | unsigned int sector; |
725 | |
726 | access_journal_check(ic, section, offset, e: false, function: "page_list_location" ); |
727 | |
728 | sector = section * ic->journal_section_sectors + offset; |
729 | |
730 | *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
731 | *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
732 | } |
733 | |
734 | static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, |
735 | unsigned int section, unsigned int offset, unsigned int *n_sectors) |
736 | { |
737 | unsigned int pl_index, pl_offset; |
738 | char *va; |
739 | |
740 | page_list_location(ic, section, offset, pl_index: &pl_index, pl_offset: &pl_offset); |
741 | |
742 | if (n_sectors) |
743 | *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT; |
744 | |
745 | va = lowmem_page_address(page: pl[pl_index].page); |
746 | |
747 | return (struct journal_sector *)(va + pl_offset); |
748 | } |
749 | |
750 | static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset) |
751 | { |
752 | return access_page_list(ic, pl: ic->journal, section, offset, NULL); |
753 | } |
754 | |
755 | static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n) |
756 | { |
757 | unsigned int rel_sector, offset; |
758 | struct journal_sector *js; |
759 | |
760 | access_journal_check(ic, section, offset: n, e: true, function: "access_journal_entry" ); |
761 | |
762 | rel_sector = n % JOURNAL_BLOCK_SECTORS; |
763 | offset = n / JOURNAL_BLOCK_SECTORS; |
764 | |
765 | js = access_journal(ic, section, offset: rel_sector); |
766 | return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); |
767 | } |
768 | |
769 | static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n) |
770 | { |
771 | n <<= ic->sb->log2_sectors_per_block; |
772 | |
773 | n += JOURNAL_BLOCK_SECTORS; |
774 | |
775 | access_journal_check(ic, section, offset: n, e: false, function: "access_journal_data" ); |
776 | |
777 | return access_journal(ic, section, offset: n); |
778 | } |
779 | |
780 | static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE]) |
781 | { |
782 | SHASH_DESC_ON_STACK(desc, ic->journal_mac); |
783 | int r; |
784 | unsigned int j, size; |
785 | |
786 | desc->tfm = ic->journal_mac; |
787 | |
788 | r = crypto_shash_init(desc); |
789 | if (unlikely(r < 0)) { |
790 | dm_integrity_io_error(ic, msg: "crypto_shash_init" , err: r); |
791 | goto err; |
792 | } |
793 | |
794 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
795 | __le64 section_le; |
796 | |
797 | r = crypto_shash_update(desc, data: (__u8 *)&ic->sb->salt, SALT_SIZE); |
798 | if (unlikely(r < 0)) { |
799 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
800 | goto err; |
801 | } |
802 | |
803 | section_le = cpu_to_le64(section); |
804 | r = crypto_shash_update(desc, data: (__u8 *)§ion_le, len: sizeof(section_le)); |
805 | if (unlikely(r < 0)) { |
806 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
807 | goto err; |
808 | } |
809 | } |
810 | |
811 | for (j = 0; j < ic->journal_section_entries; j++) { |
812 | struct journal_entry *je = access_journal_entry(ic, section, n: j); |
813 | |
814 | r = crypto_shash_update(desc, data: (__u8 *)&je->u.sector, len: sizeof(je->u.sector)); |
815 | if (unlikely(r < 0)) { |
816 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
817 | goto err; |
818 | } |
819 | } |
820 | |
821 | size = crypto_shash_digestsize(tfm: ic->journal_mac); |
822 | |
823 | if (likely(size <= JOURNAL_MAC_SIZE)) { |
824 | r = crypto_shash_final(desc, out: result); |
825 | if (unlikely(r < 0)) { |
826 | dm_integrity_io_error(ic, msg: "crypto_shash_final" , err: r); |
827 | goto err; |
828 | } |
829 | memset(result + size, 0, JOURNAL_MAC_SIZE - size); |
830 | } else { |
831 | __u8 digest[HASH_MAX_DIGESTSIZE]; |
832 | |
833 | if (WARN_ON(size > sizeof(digest))) { |
834 | dm_integrity_io_error(ic, msg: "digest_size" , err: -EINVAL); |
835 | goto err; |
836 | } |
837 | r = crypto_shash_final(desc, out: digest); |
838 | if (unlikely(r < 0)) { |
839 | dm_integrity_io_error(ic, msg: "crypto_shash_final" , err: r); |
840 | goto err; |
841 | } |
842 | memcpy(result, digest, JOURNAL_MAC_SIZE); |
843 | } |
844 | |
845 | return; |
846 | err: |
847 | memset(result, 0, JOURNAL_MAC_SIZE); |
848 | } |
849 | |
850 | static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr) |
851 | { |
852 | __u8 result[JOURNAL_MAC_SIZE]; |
853 | unsigned int j; |
854 | |
855 | if (!ic->journal_mac) |
856 | return; |
857 | |
858 | section_mac(ic, section, result); |
859 | |
860 | for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) { |
861 | struct journal_sector *js = access_journal(ic, section, offset: j); |
862 | |
863 | if (likely(wr)) |
864 | memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR); |
865 | else { |
866 | if (memcmp(p: &js->mac, q: result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) { |
867 | dm_integrity_io_error(ic, msg: "journal mac" , err: -EILSEQ); |
868 | dm_audit_log_target(DM_MSG_PREFIX, op: "mac-journal" , ti: ic->ti, result: 0); |
869 | } |
870 | } |
871 | } |
872 | } |
873 | |
874 | static void complete_journal_op(void *context) |
875 | { |
876 | struct journal_completion *comp = context; |
877 | |
878 | BUG_ON(!atomic_read(&comp->in_flight)); |
879 | if (likely(atomic_dec_and_test(&comp->in_flight))) |
880 | complete(&comp->comp); |
881 | } |
882 | |
883 | static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, |
884 | unsigned int n_sections, struct journal_completion *comp) |
885 | { |
886 | struct async_submit_ctl submit; |
887 | size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; |
888 | unsigned int pl_index, pl_offset, section_index; |
889 | struct page_list *source_pl, *target_pl; |
890 | |
891 | if (likely(encrypt)) { |
892 | source_pl = ic->journal; |
893 | target_pl = ic->journal_io; |
894 | } else { |
895 | source_pl = ic->journal_io; |
896 | target_pl = ic->journal; |
897 | } |
898 | |
899 | page_list_location(ic, section, offset: 0, pl_index: &pl_index, pl_offset: &pl_offset); |
900 | |
901 | atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, v: &comp->in_flight); |
902 | |
903 | init_async_submit(args: &submit, flags: ASYNC_TX_XOR_ZERO_DST, NULL, cb_fn: complete_journal_op, cb_param: comp, NULL); |
904 | |
905 | section_index = pl_index; |
906 | |
907 | do { |
908 | size_t this_step; |
909 | struct page *src_pages[2]; |
910 | struct page *dst_page; |
911 | |
912 | while (unlikely(pl_index == section_index)) { |
913 | unsigned int dummy; |
914 | |
915 | if (likely(encrypt)) |
916 | rw_section_mac(ic, section, wr: true); |
917 | section++; |
918 | n_sections--; |
919 | if (!n_sections) |
920 | break; |
921 | page_list_location(ic, section, offset: 0, pl_index: §ion_index, pl_offset: &dummy); |
922 | } |
923 | |
924 | this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset); |
925 | dst_page = target_pl[pl_index].page; |
926 | src_pages[0] = source_pl[pl_index].page; |
927 | src_pages[1] = ic->journal_xor[pl_index].page; |
928 | |
929 | async_xor(dest: dst_page, src_list: src_pages, offset: pl_offset, src_cnt: 2, len: this_step, submit: &submit); |
930 | |
931 | pl_index++; |
932 | pl_offset = 0; |
933 | n_bytes -= this_step; |
934 | } while (n_bytes); |
935 | |
936 | BUG_ON(n_sections); |
937 | |
938 | async_tx_issue_pending_all(); |
939 | } |
940 | |
941 | static void complete_journal_encrypt(void *data, int err) |
942 | { |
943 | struct journal_completion *comp = data; |
944 | |
945 | if (unlikely(err)) { |
946 | if (likely(err == -EINPROGRESS)) { |
947 | complete(&comp->ic->crypto_backoff); |
948 | return; |
949 | } |
950 | dm_integrity_io_error(ic: comp->ic, msg: "asynchronous encrypt" , err); |
951 | } |
952 | complete_journal_op(context: comp); |
953 | } |
954 | |
955 | static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp) |
956 | { |
957 | int r; |
958 | |
959 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
960 | compl: complete_journal_encrypt, data: comp); |
961 | if (likely(encrypt)) |
962 | r = crypto_skcipher_encrypt(req); |
963 | else |
964 | r = crypto_skcipher_decrypt(req); |
965 | if (likely(!r)) |
966 | return false; |
967 | if (likely(r == -EINPROGRESS)) |
968 | return true; |
969 | if (likely(r == -EBUSY)) { |
970 | wait_for_completion(&comp->ic->crypto_backoff); |
971 | reinit_completion(x: &comp->ic->crypto_backoff); |
972 | return true; |
973 | } |
974 | dm_integrity_io_error(ic: comp->ic, msg: "encrypt" , err: r); |
975 | return false; |
976 | } |
977 | |
978 | static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, |
979 | unsigned int n_sections, struct journal_completion *comp) |
980 | { |
981 | struct scatterlist **source_sg; |
982 | struct scatterlist **target_sg; |
983 | |
984 | atomic_add(i: 2, v: &comp->in_flight); |
985 | |
986 | if (likely(encrypt)) { |
987 | source_sg = ic->journal_scatterlist; |
988 | target_sg = ic->journal_io_scatterlist; |
989 | } else { |
990 | source_sg = ic->journal_io_scatterlist; |
991 | target_sg = ic->journal_scatterlist; |
992 | } |
993 | |
994 | do { |
995 | struct skcipher_request *req; |
996 | unsigned int ivsize; |
997 | char *iv; |
998 | |
999 | if (likely(encrypt)) |
1000 | rw_section_mac(ic, section, wr: true); |
1001 | |
1002 | req = ic->sk_requests[section]; |
1003 | ivsize = crypto_skcipher_ivsize(tfm: ic->journal_crypt); |
1004 | iv = req->iv; |
1005 | |
1006 | memcpy(iv, iv + ivsize, ivsize); |
1007 | |
1008 | req->src = source_sg[section]; |
1009 | req->dst = target_sg[section]; |
1010 | |
1011 | if (unlikely(do_crypt(encrypt, req, comp))) |
1012 | atomic_inc(v: &comp->in_flight); |
1013 | |
1014 | section++; |
1015 | n_sections--; |
1016 | } while (n_sections); |
1017 | |
1018 | atomic_dec(v: &comp->in_flight); |
1019 | complete_journal_op(context: comp); |
1020 | } |
1021 | |
1022 | static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section, |
1023 | unsigned int n_sections, struct journal_completion *comp) |
1024 | { |
1025 | if (ic->journal_xor) |
1026 | return xor_journal(ic, encrypt, section, n_sections, comp); |
1027 | else |
1028 | return crypt_journal(ic, encrypt, section, n_sections, comp); |
1029 | } |
1030 | |
1031 | static void complete_journal_io(unsigned long error, void *context) |
1032 | { |
1033 | struct journal_completion *comp = context; |
1034 | |
1035 | if (unlikely(error != 0)) |
1036 | dm_integrity_io_error(ic: comp->ic, msg: "writing journal" , err: -EIO); |
1037 | complete_journal_op(context: comp); |
1038 | } |
1039 | |
1040 | static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf, |
1041 | unsigned int sector, unsigned int n_sectors, |
1042 | struct journal_completion *comp) |
1043 | { |
1044 | struct dm_io_request io_req; |
1045 | struct dm_io_region io_loc; |
1046 | unsigned int pl_index, pl_offset; |
1047 | int r; |
1048 | |
1049 | if (unlikely(dm_integrity_failed(ic))) { |
1050 | if (comp) |
1051 | complete_journal_io(error: -1UL, context: comp); |
1052 | return; |
1053 | } |
1054 | |
1055 | pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
1056 | pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
1057 | |
1058 | io_req.bi_opf = opf; |
1059 | io_req.mem.type = DM_IO_PAGE_LIST; |
1060 | if (ic->journal_io) |
1061 | io_req.mem.ptr.pl = &ic->journal_io[pl_index]; |
1062 | else |
1063 | io_req.mem.ptr.pl = &ic->journal[pl_index]; |
1064 | io_req.mem.offset = pl_offset; |
1065 | if (likely(comp != NULL)) { |
1066 | io_req.notify.fn = complete_journal_io; |
1067 | io_req.notify.context = comp; |
1068 | } else { |
1069 | io_req.notify.fn = NULL; |
1070 | } |
1071 | io_req.client = ic->io; |
1072 | io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; |
1073 | io_loc.sector = ic->start + SB_SECTORS + sector; |
1074 | io_loc.count = n_sectors; |
1075 | |
1076 | r = dm_io(io_req: &io_req, num_regions: 1, region: &io_loc, NULL, IOPRIO_DEFAULT); |
1077 | if (unlikely(r)) { |
1078 | dm_integrity_io_error(ic, msg: (opf & REQ_OP_MASK) == REQ_OP_READ ? |
1079 | "reading journal" : "writing journal" , err: r); |
1080 | if (comp) { |
1081 | WARN_ONCE(1, "asynchronous dm_io failed: %d" , r); |
1082 | complete_journal_io(error: -1UL, context: comp); |
1083 | } |
1084 | } |
1085 | } |
1086 | |
1087 | static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf, |
1088 | unsigned int section, unsigned int n_sections, |
1089 | struct journal_completion *comp) |
1090 | { |
1091 | unsigned int sector, n_sectors; |
1092 | |
1093 | sector = section * ic->journal_section_sectors; |
1094 | n_sectors = n_sections * ic->journal_section_sectors; |
1095 | |
1096 | rw_journal_sectors(ic, opf, sector, n_sectors, comp); |
1097 | } |
1098 | |
1099 | static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections) |
1100 | { |
1101 | struct journal_completion io_comp; |
1102 | struct journal_completion crypt_comp_1; |
1103 | struct journal_completion crypt_comp_2; |
1104 | unsigned int i; |
1105 | |
1106 | io_comp.ic = ic; |
1107 | init_completion(x: &io_comp.comp); |
1108 | |
1109 | if (commit_start + commit_sections <= ic->journal_sections) { |
1110 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
1111 | if (ic->journal_io) { |
1112 | crypt_comp_1.ic = ic; |
1113 | init_completion(x: &crypt_comp_1.comp); |
1114 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
1115 | encrypt_journal(ic, encrypt: true, section: commit_start, n_sections: commit_sections, comp: &crypt_comp_1); |
1116 | wait_for_completion_io(&crypt_comp_1.comp); |
1117 | } else { |
1118 | for (i = 0; i < commit_sections; i++) |
1119 | rw_section_mac(ic, section: commit_start + i, wr: true); |
1120 | } |
1121 | rw_journal(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, section: commit_start, |
1122 | n_sections: commit_sections, comp: &io_comp); |
1123 | } else { |
1124 | unsigned int to_end; |
1125 | |
1126 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); |
1127 | to_end = ic->journal_sections - commit_start; |
1128 | if (ic->journal_io) { |
1129 | crypt_comp_1.ic = ic; |
1130 | init_completion(x: &crypt_comp_1.comp); |
1131 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
1132 | encrypt_journal(ic, encrypt: true, section: commit_start, n_sections: to_end, comp: &crypt_comp_1); |
1133 | if (try_wait_for_completion(x: &crypt_comp_1.comp)) { |
1134 | rw_journal(ic, opf: REQ_OP_WRITE | REQ_FUA, |
1135 | section: commit_start, n_sections: to_end, comp: &io_comp); |
1136 | reinit_completion(x: &crypt_comp_1.comp); |
1137 | crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0); |
1138 | encrypt_journal(ic, encrypt: true, section: 0, n_sections: commit_sections - to_end, comp: &crypt_comp_1); |
1139 | wait_for_completion_io(&crypt_comp_1.comp); |
1140 | } else { |
1141 | crypt_comp_2.ic = ic; |
1142 | init_completion(x: &crypt_comp_2.comp); |
1143 | crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0); |
1144 | encrypt_journal(ic, encrypt: true, section: 0, n_sections: commit_sections - to_end, comp: &crypt_comp_2); |
1145 | wait_for_completion_io(&crypt_comp_1.comp); |
1146 | rw_journal(ic, opf: REQ_OP_WRITE | REQ_FUA, section: commit_start, n_sections: to_end, comp: &io_comp); |
1147 | wait_for_completion_io(&crypt_comp_2.comp); |
1148 | } |
1149 | } else { |
1150 | for (i = 0; i < to_end; i++) |
1151 | rw_section_mac(ic, section: commit_start + i, wr: true); |
1152 | rw_journal(ic, opf: REQ_OP_WRITE | REQ_FUA, section: commit_start, n_sections: to_end, comp: &io_comp); |
1153 | for (i = 0; i < commit_sections - to_end; i++) |
1154 | rw_section_mac(ic, section: i, wr: true); |
1155 | } |
1156 | rw_journal(ic, opf: REQ_OP_WRITE | REQ_FUA, section: 0, n_sections: commit_sections - to_end, comp: &io_comp); |
1157 | } |
1158 | |
1159 | wait_for_completion_io(&io_comp.comp); |
1160 | } |
1161 | |
1162 | static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset, |
1163 | unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data) |
1164 | { |
1165 | struct dm_io_request io_req; |
1166 | struct dm_io_region io_loc; |
1167 | int r; |
1168 | unsigned int sector, pl_index, pl_offset; |
1169 | |
1170 | BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1)); |
1171 | |
1172 | if (unlikely(dm_integrity_failed(ic))) { |
1173 | fn(-1UL, data); |
1174 | return; |
1175 | } |
1176 | |
1177 | sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; |
1178 | |
1179 | pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
1180 | pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
1181 | |
1182 | io_req.bi_opf = REQ_OP_WRITE; |
1183 | io_req.mem.type = DM_IO_PAGE_LIST; |
1184 | io_req.mem.ptr.pl = &ic->journal[pl_index]; |
1185 | io_req.mem.offset = pl_offset; |
1186 | io_req.notify.fn = fn; |
1187 | io_req.notify.context = data; |
1188 | io_req.client = ic->io; |
1189 | io_loc.bdev = ic->dev->bdev; |
1190 | io_loc.sector = target; |
1191 | io_loc.count = n_sectors; |
1192 | |
1193 | r = dm_io(io_req: &io_req, num_regions: 1, region: &io_loc, NULL, IOPRIO_DEFAULT); |
1194 | if (unlikely(r)) { |
1195 | WARN_ONCE(1, "asynchronous dm_io failed: %d" , r); |
1196 | fn(-1UL, data); |
1197 | } |
1198 | } |
1199 | |
1200 | static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) |
1201 | { |
1202 | return range1->logical_sector < range2->logical_sector + range2->n_sectors && |
1203 | range1->logical_sector + range1->n_sectors > range2->logical_sector; |
1204 | } |
1205 | |
1206 | static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) |
1207 | { |
1208 | struct rb_node **n = &ic->in_progress.rb_node; |
1209 | struct rb_node *parent; |
1210 | |
1211 | BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1)); |
1212 | |
1213 | if (likely(check_waiting)) { |
1214 | struct dm_integrity_range *range; |
1215 | |
1216 | list_for_each_entry(range, &ic->wait_list, wait_entry) { |
1217 | if (unlikely(ranges_overlap(range, new_range))) |
1218 | return false; |
1219 | } |
1220 | } |
1221 | |
1222 | parent = NULL; |
1223 | |
1224 | while (*n) { |
1225 | struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node); |
1226 | |
1227 | parent = *n; |
1228 | if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) |
1229 | n = &range->node.rb_left; |
1230 | else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) |
1231 | n = &range->node.rb_right; |
1232 | else |
1233 | return false; |
1234 | } |
1235 | |
1236 | rb_link_node(node: &new_range->node, parent, rb_link: n); |
1237 | rb_insert_color(&new_range->node, &ic->in_progress); |
1238 | |
1239 | return true; |
1240 | } |
1241 | |
1242 | static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) |
1243 | { |
1244 | rb_erase(&range->node, &ic->in_progress); |
1245 | while (unlikely(!list_empty(&ic->wait_list))) { |
1246 | struct dm_integrity_range *last_range = |
1247 | list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); |
1248 | struct task_struct *last_range_task; |
1249 | |
1250 | last_range_task = last_range->task; |
1251 | list_del(entry: &last_range->wait_entry); |
1252 | if (!add_new_range(ic, new_range: last_range, check_waiting: false)) { |
1253 | last_range->task = last_range_task; |
1254 | list_add(new: &last_range->wait_entry, head: &ic->wait_list); |
1255 | break; |
1256 | } |
1257 | last_range->waiting = false; |
1258 | wake_up_process(tsk: last_range_task); |
1259 | } |
1260 | } |
1261 | |
1262 | static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) |
1263 | { |
1264 | unsigned long flags; |
1265 | |
1266 | spin_lock_irqsave(&ic->endio_wait.lock, flags); |
1267 | remove_range_unlocked(ic, range); |
1268 | spin_unlock_irqrestore(lock: &ic->endio_wait.lock, flags); |
1269 | } |
1270 | |
1271 | static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) |
1272 | { |
1273 | new_range->waiting = true; |
1274 | list_add_tail(new: &new_range->wait_entry, head: &ic->wait_list); |
1275 | new_range->task = current; |
1276 | do { |
1277 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1278 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
1279 | io_schedule(); |
1280 | spin_lock_irq(lock: &ic->endio_wait.lock); |
1281 | } while (unlikely(new_range->waiting)); |
1282 | } |
1283 | |
1284 | static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) |
1285 | { |
1286 | if (unlikely(!add_new_range(ic, new_range, true))) |
1287 | wait_and_add_new_range(ic, new_range); |
1288 | } |
1289 | |
1290 | static void init_journal_node(struct journal_node *node) |
1291 | { |
1292 | RB_CLEAR_NODE(&node->node); |
1293 | node->sector = (sector_t)-1; |
1294 | } |
1295 | |
1296 | static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) |
1297 | { |
1298 | struct rb_node **link; |
1299 | struct rb_node *parent; |
1300 | |
1301 | node->sector = sector; |
1302 | BUG_ON(!RB_EMPTY_NODE(&node->node)); |
1303 | |
1304 | link = &ic->journal_tree_root.rb_node; |
1305 | parent = NULL; |
1306 | |
1307 | while (*link) { |
1308 | struct journal_node *j; |
1309 | |
1310 | parent = *link; |
1311 | j = container_of(parent, struct journal_node, node); |
1312 | if (sector < j->sector) |
1313 | link = &j->node.rb_left; |
1314 | else |
1315 | link = &j->node.rb_right; |
1316 | } |
1317 | |
1318 | rb_link_node(node: &node->node, parent, rb_link: link); |
1319 | rb_insert_color(&node->node, &ic->journal_tree_root); |
1320 | } |
1321 | |
1322 | static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) |
1323 | { |
1324 | BUG_ON(RB_EMPTY_NODE(&node->node)); |
1325 | rb_erase(&node->node, &ic->journal_tree_root); |
1326 | init_journal_node(node); |
1327 | } |
1328 | |
1329 | #define NOT_FOUND (-1U) |
1330 | |
1331 | static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) |
1332 | { |
1333 | struct rb_node *n = ic->journal_tree_root.rb_node; |
1334 | unsigned int found = NOT_FOUND; |
1335 | |
1336 | *next_sector = (sector_t)-1; |
1337 | while (n) { |
1338 | struct journal_node *j = container_of(n, struct journal_node, node); |
1339 | |
1340 | if (sector == j->sector) |
1341 | found = j - ic->journal_tree; |
1342 | |
1343 | if (sector < j->sector) { |
1344 | *next_sector = j->sector; |
1345 | n = j->node.rb_left; |
1346 | } else |
1347 | n = j->node.rb_right; |
1348 | } |
1349 | |
1350 | return found; |
1351 | } |
1352 | |
1353 | static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector) |
1354 | { |
1355 | struct journal_node *node, *next_node; |
1356 | struct rb_node *next; |
1357 | |
1358 | if (unlikely(pos >= ic->journal_entries)) |
1359 | return false; |
1360 | node = &ic->journal_tree[pos]; |
1361 | if (unlikely(RB_EMPTY_NODE(&node->node))) |
1362 | return false; |
1363 | if (unlikely(node->sector != sector)) |
1364 | return false; |
1365 | |
1366 | next = rb_next(&node->node); |
1367 | if (unlikely(!next)) |
1368 | return true; |
1369 | |
1370 | next_node = container_of(next, struct journal_node, node); |
1371 | return next_node->sector != sector; |
1372 | } |
1373 | |
1374 | static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) |
1375 | { |
1376 | struct rb_node *next; |
1377 | struct journal_node *next_node; |
1378 | unsigned int next_section; |
1379 | |
1380 | BUG_ON(RB_EMPTY_NODE(&node->node)); |
1381 | |
1382 | next = rb_next(&node->node); |
1383 | if (unlikely(!next)) |
1384 | return false; |
1385 | |
1386 | next_node = container_of(next, struct journal_node, node); |
1387 | |
1388 | if (next_node->sector != node->sector) |
1389 | return false; |
1390 | |
1391 | next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries; |
1392 | if (next_section >= ic->committed_section && |
1393 | next_section < ic->committed_section + ic->n_committed_sections) |
1394 | return true; |
1395 | if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) |
1396 | return true; |
1397 | |
1398 | return false; |
1399 | } |
1400 | |
1401 | #define TAG_READ 0 |
1402 | #define TAG_WRITE 1 |
1403 | #define TAG_CMP 2 |
1404 | |
1405 | static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, |
1406 | unsigned int *metadata_offset, unsigned int total_size, int op) |
1407 | { |
1408 | #define MAY_BE_FILLER 1 |
1409 | #define MAY_BE_HASH 2 |
1410 | unsigned int hash_offset = 0; |
1411 | unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); |
1412 | |
1413 | do { |
1414 | unsigned char *data, *dp; |
1415 | struct dm_buffer *b; |
1416 | unsigned int to_copy; |
1417 | int r; |
1418 | |
1419 | r = dm_integrity_failed(ic); |
1420 | if (unlikely(r)) |
1421 | return r; |
1422 | |
1423 | data = dm_bufio_read(c: ic->bufio, block: *metadata_block, bp: &b); |
1424 | if (IS_ERR(ptr: data)) |
1425 | return PTR_ERR(ptr: data); |
1426 | |
1427 | to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); |
1428 | dp = data + *metadata_offset; |
1429 | if (op == TAG_READ) { |
1430 | memcpy(tag, dp, to_copy); |
1431 | } else if (op == TAG_WRITE) { |
1432 | if (memcmp(p: dp, q: tag, size: to_copy)) { |
1433 | memcpy(dp, tag, to_copy); |
1434 | dm_bufio_mark_partial_buffer_dirty(b, start: *metadata_offset, end: *metadata_offset + to_copy); |
1435 | } |
1436 | } else { |
1437 | /* e.g.: op == TAG_CMP */ |
1438 | |
1439 | if (likely(is_power_of_2(ic->tag_size))) { |
1440 | if (unlikely(memcmp(dp, tag, to_copy))) |
1441 | if (unlikely(!ic->discard) || |
1442 | unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) { |
1443 | goto thorough_test; |
1444 | } |
1445 | } else { |
1446 | unsigned int i, ts; |
1447 | thorough_test: |
1448 | ts = total_size; |
1449 | |
1450 | for (i = 0; i < to_copy; i++, ts--) { |
1451 | if (unlikely(dp[i] != tag[i])) |
1452 | may_be &= ~MAY_BE_HASH; |
1453 | if (likely(dp[i] != DISCARD_FILLER)) |
1454 | may_be &= ~MAY_BE_FILLER; |
1455 | hash_offset++; |
1456 | if (unlikely(hash_offset == ic->tag_size)) { |
1457 | if (unlikely(!may_be)) { |
1458 | dm_bufio_release(b); |
1459 | return ts; |
1460 | } |
1461 | hash_offset = 0; |
1462 | may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0); |
1463 | } |
1464 | } |
1465 | } |
1466 | } |
1467 | dm_bufio_release(b); |
1468 | |
1469 | tag += to_copy; |
1470 | *metadata_offset += to_copy; |
1471 | if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { |
1472 | (*metadata_block)++; |
1473 | *metadata_offset = 0; |
1474 | } |
1475 | |
1476 | if (unlikely(!is_power_of_2(ic->tag_size))) |
1477 | hash_offset = (hash_offset + to_copy) % ic->tag_size; |
1478 | |
1479 | total_size -= to_copy; |
1480 | } while (unlikely(total_size)); |
1481 | |
1482 | return 0; |
1483 | #undef MAY_BE_FILLER |
1484 | #undef MAY_BE_HASH |
1485 | } |
1486 | |
1487 | struct flush_request { |
1488 | struct dm_io_request io_req; |
1489 | struct dm_io_region io_reg; |
1490 | struct dm_integrity_c *ic; |
1491 | struct completion comp; |
1492 | }; |
1493 | |
1494 | static void flush_notify(unsigned long error, void *fr_) |
1495 | { |
1496 | struct flush_request *fr = fr_; |
1497 | |
1498 | if (unlikely(error != 0)) |
1499 | dm_integrity_io_error(ic: fr->ic, msg: "flushing disk cache" , err: -EIO); |
1500 | complete(&fr->comp); |
1501 | } |
1502 | |
1503 | static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) |
1504 | { |
1505 | int r; |
1506 | struct flush_request fr; |
1507 | |
1508 | if (!ic->meta_dev) |
1509 | flush_data = false; |
1510 | if (flush_data) { |
1511 | fr.io_req.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, |
1512 | fr.io_req.mem.type = DM_IO_KMEM, |
1513 | fr.io_req.mem.ptr.addr = NULL, |
1514 | fr.io_req.notify.fn = flush_notify, |
1515 | fr.io_req.notify.context = &fr; |
1516 | fr.io_req.client = dm_bufio_get_dm_io_client(c: ic->bufio), |
1517 | fr.io_reg.bdev = ic->dev->bdev, |
1518 | fr.io_reg.sector = 0, |
1519 | fr.io_reg.count = 0, |
1520 | fr.ic = ic; |
1521 | init_completion(x: &fr.comp); |
1522 | r = dm_io(io_req: &fr.io_req, num_regions: 1, region: &fr.io_reg, NULL, IOPRIO_DEFAULT); |
1523 | BUG_ON(r); |
1524 | } |
1525 | |
1526 | r = dm_bufio_write_dirty_buffers(c: ic->bufio); |
1527 | if (unlikely(r)) |
1528 | dm_integrity_io_error(ic, msg: "writing tags" , err: r); |
1529 | |
1530 | if (flush_data) |
1531 | wait_for_completion(&fr.comp); |
1532 | } |
1533 | |
1534 | static void sleep_on_endio_wait(struct dm_integrity_c *ic) |
1535 | { |
1536 | DECLARE_WAITQUEUE(wait, current); |
1537 | |
1538 | __add_wait_queue(wq_head: &ic->endio_wait, wq_entry: &wait); |
1539 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1540 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
1541 | io_schedule(); |
1542 | spin_lock_irq(lock: &ic->endio_wait.lock); |
1543 | __remove_wait_queue(wq_head: &ic->endio_wait, wq_entry: &wait); |
1544 | } |
1545 | |
1546 | static void autocommit_fn(struct timer_list *t) |
1547 | { |
1548 | struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); |
1549 | |
1550 | if (likely(!dm_integrity_failed(ic))) |
1551 | queue_work(wq: ic->commit_wq, work: &ic->commit_work); |
1552 | } |
1553 | |
1554 | static void schedule_autocommit(struct dm_integrity_c *ic) |
1555 | { |
1556 | if (!timer_pending(timer: &ic->autocommit_timer)) |
1557 | mod_timer(timer: &ic->autocommit_timer, expires: jiffies + ic->autocommit_jiffies); |
1558 | } |
1559 | |
1560 | static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
1561 | { |
1562 | struct bio *bio; |
1563 | unsigned long flags; |
1564 | |
1565 | spin_lock_irqsave(&ic->endio_wait.lock, flags); |
1566 | bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
1567 | bio_list_add(bl: &ic->flush_bio_list, bio); |
1568 | spin_unlock_irqrestore(lock: &ic->endio_wait.lock, flags); |
1569 | |
1570 | queue_work(wq: ic->commit_wq, work: &ic->commit_work); |
1571 | } |
1572 | |
1573 | static void do_endio(struct dm_integrity_c *ic, struct bio *bio) |
1574 | { |
1575 | int r; |
1576 | |
1577 | r = dm_integrity_failed(ic); |
1578 | if (unlikely(r) && !bio->bi_status) |
1579 | bio->bi_status = errno_to_blk_status(errno: r); |
1580 | if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { |
1581 | unsigned long flags; |
1582 | |
1583 | spin_lock_irqsave(&ic->endio_wait.lock, flags); |
1584 | bio_list_add(bl: &ic->synchronous_bios, bio); |
1585 | queue_delayed_work(wq: ic->commit_wq, dwork: &ic->bitmap_flush_work, delay: 0); |
1586 | spin_unlock_irqrestore(lock: &ic->endio_wait.lock, flags); |
1587 | return; |
1588 | } |
1589 | bio_endio(bio); |
1590 | } |
1591 | |
1592 | static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) |
1593 | { |
1594 | struct bio *bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
1595 | |
1596 | if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) |
1597 | submit_flush_bio(ic, dio); |
1598 | else |
1599 | do_endio(ic, bio); |
1600 | } |
1601 | |
1602 | static void dec_in_flight(struct dm_integrity_io *dio) |
1603 | { |
1604 | if (atomic_dec_and_test(v: &dio->in_flight)) { |
1605 | struct dm_integrity_c *ic = dio->ic; |
1606 | struct bio *bio; |
1607 | |
1608 | remove_range(ic, range: &dio->range); |
1609 | |
1610 | if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD)) |
1611 | schedule_autocommit(ic); |
1612 | |
1613 | bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
1614 | if (unlikely(dio->bi_status) && !bio->bi_status) |
1615 | bio->bi_status = dio->bi_status; |
1616 | if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { |
1617 | dio->range.logical_sector += dio->range.n_sectors; |
1618 | bio_advance(bio, nbytes: dio->range.n_sectors << SECTOR_SHIFT); |
1619 | INIT_WORK(&dio->work, integrity_bio_wait); |
1620 | queue_work(wq: ic->offload_wq, work: &dio->work); |
1621 | return; |
1622 | } |
1623 | do_endio_flush(ic, dio); |
1624 | } |
1625 | } |
1626 | |
1627 | static void integrity_end_io(struct bio *bio) |
1628 | { |
1629 | struct dm_integrity_io *dio = dm_per_bio_data(bio, data_size: sizeof(struct dm_integrity_io)); |
1630 | |
1631 | dm_bio_restore(bd: &dio->bio_details, bio); |
1632 | if (bio->bi_integrity) |
1633 | bio->bi_opf |= REQ_INTEGRITY; |
1634 | |
1635 | if (dio->completion) |
1636 | complete(dio->completion); |
1637 | |
1638 | dec_in_flight(dio); |
1639 | } |
1640 | |
1641 | static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, |
1642 | const char *data, char *result) |
1643 | { |
1644 | __le64 sector_le = cpu_to_le64(sector); |
1645 | SHASH_DESC_ON_STACK(req, ic->internal_hash); |
1646 | int r; |
1647 | unsigned int digest_size; |
1648 | |
1649 | req->tfm = ic->internal_hash; |
1650 | |
1651 | r = crypto_shash_init(desc: req); |
1652 | if (unlikely(r < 0)) { |
1653 | dm_integrity_io_error(ic, msg: "crypto_shash_init" , err: r); |
1654 | goto failed; |
1655 | } |
1656 | |
1657 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { |
1658 | r = crypto_shash_update(desc: req, data: (__u8 *)&ic->sb->salt, SALT_SIZE); |
1659 | if (unlikely(r < 0)) { |
1660 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
1661 | goto failed; |
1662 | } |
1663 | } |
1664 | |
1665 | r = crypto_shash_update(desc: req, data: (const __u8 *)§or_le, len: sizeof(sector_le)); |
1666 | if (unlikely(r < 0)) { |
1667 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
1668 | goto failed; |
1669 | } |
1670 | |
1671 | r = crypto_shash_update(desc: req, data, len: ic->sectors_per_block << SECTOR_SHIFT); |
1672 | if (unlikely(r < 0)) { |
1673 | dm_integrity_io_error(ic, msg: "crypto_shash_update" , err: r); |
1674 | goto failed; |
1675 | } |
1676 | |
1677 | r = crypto_shash_final(desc: req, out: result); |
1678 | if (unlikely(r < 0)) { |
1679 | dm_integrity_io_error(ic, msg: "crypto_shash_final" , err: r); |
1680 | goto failed; |
1681 | } |
1682 | |
1683 | digest_size = crypto_shash_digestsize(tfm: ic->internal_hash); |
1684 | if (unlikely(digest_size < ic->tag_size)) |
1685 | memset(result + digest_size, 0, ic->tag_size - digest_size); |
1686 | |
1687 | return; |
1688 | |
1689 | failed: |
1690 | /* this shouldn't happen anyway, the hash functions have no reason to fail */ |
1691 | get_random_bytes(buf: result, len: ic->tag_size); |
1692 | } |
1693 | |
1694 | static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checksum) |
1695 | { |
1696 | struct bio *bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
1697 | struct dm_integrity_c *ic = dio->ic; |
1698 | struct bvec_iter iter; |
1699 | struct bio_vec bv; |
1700 | sector_t sector, logical_sector, area, offset; |
1701 | struct page *page; |
1702 | |
1703 | get_area_and_offset(ic, data_sector: dio->range.logical_sector, area: &area, offset: &offset); |
1704 | dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, |
1705 | metadata_offset: &dio->metadata_offset); |
1706 | sector = get_data_sector(ic, area, offset); |
1707 | logical_sector = dio->range.logical_sector; |
1708 | |
1709 | page = mempool_alloc(pool: &ic->recheck_pool, GFP_NOIO); |
1710 | |
1711 | __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { |
1712 | unsigned pos = 0; |
1713 | |
1714 | do { |
1715 | sector_t alignment; |
1716 | char *mem; |
1717 | char *buffer = page_to_virt(page); |
1718 | int r; |
1719 | struct dm_io_request io_req; |
1720 | struct dm_io_region io_loc; |
1721 | io_req.bi_opf = REQ_OP_READ; |
1722 | io_req.mem.type = DM_IO_KMEM; |
1723 | io_req.mem.ptr.addr = buffer; |
1724 | io_req.notify.fn = NULL; |
1725 | io_req.client = ic->io; |
1726 | io_loc.bdev = ic->dev->bdev; |
1727 | io_loc.sector = sector; |
1728 | io_loc.count = ic->sectors_per_block; |
1729 | |
1730 | /* Align the bio to logical block size */ |
1731 | alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); |
1732 | alignment &= -alignment; |
1733 | io_loc.sector = round_down(io_loc.sector, alignment); |
1734 | io_loc.count += sector - io_loc.sector; |
1735 | buffer += (sector - io_loc.sector) << SECTOR_SHIFT; |
1736 | io_loc.count = round_up(io_loc.count, alignment); |
1737 | |
1738 | r = dm_io(io_req: &io_req, num_regions: 1, region: &io_loc, NULL, IOPRIO_DEFAULT); |
1739 | if (unlikely(r)) { |
1740 | dio->bi_status = errno_to_blk_status(errno: r); |
1741 | goto free_ret; |
1742 | } |
1743 | |
1744 | integrity_sector_checksum(ic, sector: logical_sector, data: buffer, result: checksum); |
1745 | r = dm_integrity_rw_tag(ic, tag: checksum, metadata_block: &dio->metadata_block, |
1746 | metadata_offset: &dio->metadata_offset, total_size: ic->tag_size, TAG_CMP); |
1747 | if (r) { |
1748 | if (r > 0) { |
1749 | DMERR_LIMIT("%pg: Checksum failed at sector 0x%llx" , |
1750 | bio->bi_bdev, logical_sector); |
1751 | atomic64_inc(v: &ic->number_of_mismatches); |
1752 | dm_audit_log_bio(DM_MSG_PREFIX, op: "integrity-checksum" , |
1753 | bio, sector: logical_sector, result: 0); |
1754 | r = -EILSEQ; |
1755 | } |
1756 | dio->bi_status = errno_to_blk_status(errno: r); |
1757 | goto free_ret; |
1758 | } |
1759 | |
1760 | mem = bvec_kmap_local(bvec: &bv); |
1761 | memcpy(mem + pos, buffer, ic->sectors_per_block << SECTOR_SHIFT); |
1762 | kunmap_local(mem); |
1763 | |
1764 | pos += ic->sectors_per_block << SECTOR_SHIFT; |
1765 | sector += ic->sectors_per_block; |
1766 | logical_sector += ic->sectors_per_block; |
1767 | } while (pos < bv.bv_len); |
1768 | } |
1769 | free_ret: |
1770 | mempool_free(element: page, pool: &ic->recheck_pool); |
1771 | } |
1772 | |
1773 | static void integrity_metadata(struct work_struct *w) |
1774 | { |
1775 | struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); |
1776 | struct dm_integrity_c *ic = dio->ic; |
1777 | |
1778 | int r; |
1779 | |
1780 | if (ic->internal_hash) { |
1781 | struct bvec_iter iter; |
1782 | struct bio_vec bv; |
1783 | unsigned int digest_size = crypto_shash_digestsize(tfm: ic->internal_hash); |
1784 | struct bio *bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
1785 | char *checksums; |
1786 | unsigned int = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; |
1787 | char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
1788 | sector_t sector; |
1789 | unsigned int sectors_to_process; |
1790 | |
1791 | if (unlikely(ic->mode == 'R')) |
1792 | goto skip_io; |
1793 | |
1794 | if (likely(dio->op != REQ_OP_DISCARD)) |
1795 | checksums = kmalloc(size: (PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, |
1796 | GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); |
1797 | else |
1798 | checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN); |
1799 | if (!checksums) { |
1800 | checksums = checksums_onstack; |
1801 | if (WARN_ON(extra_space && |
1802 | digest_size > sizeof(checksums_onstack))) { |
1803 | r = -EINVAL; |
1804 | goto error; |
1805 | } |
1806 | } |
1807 | |
1808 | if (unlikely(dio->op == REQ_OP_DISCARD)) { |
1809 | unsigned int bi_size = dio->bio_details.bi_iter.bi_size; |
1810 | unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE; |
1811 | unsigned int max_blocks = max_size / ic->tag_size; |
1812 | |
1813 | memset(checksums, DISCARD_FILLER, max_size); |
1814 | |
1815 | while (bi_size) { |
1816 | unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); |
1817 | |
1818 | this_step_blocks = min(this_step_blocks, max_blocks); |
1819 | r = dm_integrity_rw_tag(ic, tag: checksums, metadata_block: &dio->metadata_block, metadata_offset: &dio->metadata_offset, |
1820 | total_size: this_step_blocks * ic->tag_size, TAG_WRITE); |
1821 | if (unlikely(r)) { |
1822 | if (likely(checksums != checksums_onstack)) |
1823 | kfree(objp: checksums); |
1824 | goto error; |
1825 | } |
1826 | |
1827 | bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block); |
1828 | } |
1829 | |
1830 | if (likely(checksums != checksums_onstack)) |
1831 | kfree(objp: checksums); |
1832 | goto skip_io; |
1833 | } |
1834 | |
1835 | sector = dio->range.logical_sector; |
1836 | sectors_to_process = dio->range.n_sectors; |
1837 | |
1838 | __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) { |
1839 | struct bio_vec bv_copy = bv; |
1840 | unsigned int pos; |
1841 | char *mem, *checksums_ptr; |
1842 | |
1843 | again: |
1844 | mem = bvec_kmap_local(bvec: &bv_copy); |
1845 | pos = 0; |
1846 | checksums_ptr = checksums; |
1847 | do { |
1848 | integrity_sector_checksum(ic, sector, data: mem + pos, result: checksums_ptr); |
1849 | checksums_ptr += ic->tag_size; |
1850 | sectors_to_process -= ic->sectors_per_block; |
1851 | pos += ic->sectors_per_block << SECTOR_SHIFT; |
1852 | sector += ic->sectors_per_block; |
1853 | } while (pos < bv_copy.bv_len && sectors_to_process && checksums != checksums_onstack); |
1854 | kunmap_local(mem); |
1855 | |
1856 | r = dm_integrity_rw_tag(ic, tag: checksums, metadata_block: &dio->metadata_block, metadata_offset: &dio->metadata_offset, |
1857 | total_size: checksums_ptr - checksums, op: dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE); |
1858 | if (unlikely(r)) { |
1859 | if (likely(checksums != checksums_onstack)) |
1860 | kfree(objp: checksums); |
1861 | if (r > 0) { |
1862 | integrity_recheck(dio, checksum: checksums_onstack); |
1863 | goto skip_io; |
1864 | } |
1865 | goto error; |
1866 | } |
1867 | |
1868 | if (!sectors_to_process) |
1869 | break; |
1870 | |
1871 | if (unlikely(pos < bv_copy.bv_len)) { |
1872 | bv_copy.bv_offset += pos; |
1873 | bv_copy.bv_len -= pos; |
1874 | goto again; |
1875 | } |
1876 | } |
1877 | |
1878 | if (likely(checksums != checksums_onstack)) |
1879 | kfree(objp: checksums); |
1880 | } else { |
1881 | struct bio_integrity_payload *bip = dio->bio_details.bi_integrity; |
1882 | |
1883 | if (bip) { |
1884 | struct bio_vec biv; |
1885 | struct bvec_iter iter; |
1886 | unsigned int data_to_process = dio->range.n_sectors; |
1887 | |
1888 | sector_to_block(ic, data_to_process); |
1889 | data_to_process *= ic->tag_size; |
1890 | |
1891 | bip_for_each_vec(biv, bip, iter) { |
1892 | unsigned char *tag; |
1893 | unsigned int this_len; |
1894 | |
1895 | BUG_ON(PageHighMem(biv.bv_page)); |
1896 | tag = bvec_virt(bvec: &biv); |
1897 | this_len = min(biv.bv_len, data_to_process); |
1898 | r = dm_integrity_rw_tag(ic, tag, metadata_block: &dio->metadata_block, metadata_offset: &dio->metadata_offset, |
1899 | total_size: this_len, op: dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE); |
1900 | if (unlikely(r)) |
1901 | goto error; |
1902 | data_to_process -= this_len; |
1903 | if (!data_to_process) |
1904 | break; |
1905 | } |
1906 | } |
1907 | } |
1908 | skip_io: |
1909 | dec_in_flight(dio); |
1910 | return; |
1911 | error: |
1912 | dio->bi_status = errno_to_blk_status(errno: r); |
1913 | dec_in_flight(dio); |
1914 | } |
1915 | |
1916 | static int dm_integrity_map(struct dm_target *ti, struct bio *bio) |
1917 | { |
1918 | struct dm_integrity_c *ic = ti->private; |
1919 | struct dm_integrity_io *dio = dm_per_bio_data(bio, data_size: sizeof(struct dm_integrity_io)); |
1920 | struct bio_integrity_payload *bip; |
1921 | |
1922 | sector_t area, offset; |
1923 | |
1924 | dio->ic = ic; |
1925 | dio->bi_status = 0; |
1926 | dio->op = bio_op(bio); |
1927 | |
1928 | if (unlikely(dio->op == REQ_OP_DISCARD)) { |
1929 | if (ti->max_io_len) { |
1930 | sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector); |
1931 | unsigned int log2_max_io_len = __fls(word: ti->max_io_len); |
1932 | sector_t start_boundary = sec >> log2_max_io_len; |
1933 | sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; |
1934 | |
1935 | if (start_boundary < end_boundary) { |
1936 | sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1)); |
1937 | |
1938 | dm_accept_partial_bio(bio, n_sectors: len); |
1939 | } |
1940 | } |
1941 | } |
1942 | |
1943 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
1944 | submit_flush_bio(ic, dio); |
1945 | return DM_MAPIO_SUBMITTED; |
1946 | } |
1947 | |
1948 | dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); |
1949 | dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA; |
1950 | if (unlikely(dio->fua)) { |
1951 | /* |
1952 | * Don't pass down the FUA flag because we have to flush |
1953 | * disk cache anyway. |
1954 | */ |
1955 | bio->bi_opf &= ~REQ_FUA; |
1956 | } |
1957 | if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { |
1958 | DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx" , |
1959 | dio->range.logical_sector, bio_sectors(bio), |
1960 | ic->provided_data_sectors); |
1961 | return DM_MAPIO_KILL; |
1962 | } |
1963 | if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { |
1964 | DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x" , |
1965 | ic->sectors_per_block, |
1966 | dio->range.logical_sector, bio_sectors(bio)); |
1967 | return DM_MAPIO_KILL; |
1968 | } |
1969 | |
1970 | if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) { |
1971 | struct bvec_iter iter; |
1972 | struct bio_vec bv; |
1973 | |
1974 | bio_for_each_segment(bv, bio, iter) { |
1975 | if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { |
1976 | DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary" , |
1977 | bv.bv_offset, bv.bv_len, ic->sectors_per_block); |
1978 | return DM_MAPIO_KILL; |
1979 | } |
1980 | } |
1981 | } |
1982 | |
1983 | bip = bio_integrity(bio); |
1984 | if (!ic->internal_hash) { |
1985 | if (bip) { |
1986 | unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; |
1987 | |
1988 | if (ic->log2_tag_size >= 0) |
1989 | wanted_tag_size <<= ic->log2_tag_size; |
1990 | else |
1991 | wanted_tag_size *= ic->tag_size; |
1992 | if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) { |
1993 | DMERR("Invalid integrity data size %u, expected %u" , |
1994 | bip->bip_iter.bi_size, wanted_tag_size); |
1995 | return DM_MAPIO_KILL; |
1996 | } |
1997 | } |
1998 | } else { |
1999 | if (unlikely(bip != NULL)) { |
2000 | DMERR("Unexpected integrity data when using internal hash" ); |
2001 | return DM_MAPIO_KILL; |
2002 | } |
2003 | } |
2004 | |
2005 | if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ)) |
2006 | return DM_MAPIO_KILL; |
2007 | |
2008 | get_area_and_offset(ic, data_sector: dio->range.logical_sector, area: &area, offset: &offset); |
2009 | dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, metadata_offset: &dio->metadata_offset); |
2010 | bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); |
2011 | |
2012 | dm_integrity_map_continue(dio, from_map: true); |
2013 | return DM_MAPIO_SUBMITTED; |
2014 | } |
2015 | |
2016 | static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio, |
2017 | unsigned int journal_section, unsigned int journal_entry) |
2018 | { |
2019 | struct dm_integrity_c *ic = dio->ic; |
2020 | sector_t logical_sector; |
2021 | unsigned int n_sectors; |
2022 | |
2023 | logical_sector = dio->range.logical_sector; |
2024 | n_sectors = dio->range.n_sectors; |
2025 | do { |
2026 | struct bio_vec bv = bio_iovec(bio); |
2027 | char *mem; |
2028 | |
2029 | if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors)) |
2030 | bv.bv_len = n_sectors << SECTOR_SHIFT; |
2031 | n_sectors -= bv.bv_len >> SECTOR_SHIFT; |
2032 | bio_advance_iter(bio, iter: &bio->bi_iter, bytes: bv.bv_len); |
2033 | retry_kmap: |
2034 | mem = kmap_local_page(page: bv.bv_page); |
2035 | if (likely(dio->op == REQ_OP_WRITE)) |
2036 | flush_dcache_page(page: bv.bv_page); |
2037 | |
2038 | do { |
2039 | struct journal_entry *je = access_journal_entry(ic, section: journal_section, n: journal_entry); |
2040 | |
2041 | if (unlikely(dio->op == REQ_OP_READ)) { |
2042 | struct journal_sector *js; |
2043 | char *mem_ptr; |
2044 | unsigned int s; |
2045 | |
2046 | if (unlikely(journal_entry_is_inprogress(je))) { |
2047 | flush_dcache_page(page: bv.bv_page); |
2048 | kunmap_local(mem); |
2049 | |
2050 | __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); |
2051 | goto retry_kmap; |
2052 | } |
2053 | smp_rmb(); |
2054 | BUG_ON(journal_entry_get_sector(je) != logical_sector); |
2055 | js = access_journal_data(ic, section: journal_section, n: journal_entry); |
2056 | mem_ptr = mem + bv.bv_offset; |
2057 | s = 0; |
2058 | do { |
2059 | memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA); |
2060 | *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s]; |
2061 | js++; |
2062 | mem_ptr += 1 << SECTOR_SHIFT; |
2063 | } while (++s < ic->sectors_per_block); |
2064 | #ifdef INTERNAL_VERIFY |
2065 | if (ic->internal_hash) { |
2066 | char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
2067 | |
2068 | integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); |
2069 | if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { |
2070 | DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx" , |
2071 | logical_sector); |
2072 | dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum" , |
2073 | bio, logical_sector, 0); |
2074 | } |
2075 | } |
2076 | #endif |
2077 | } |
2078 | |
2079 | if (!ic->internal_hash) { |
2080 | struct bio_integrity_payload *bip = bio_integrity(bio); |
2081 | unsigned int tag_todo = ic->tag_size; |
2082 | char *tag_ptr = journal_entry_tag(ic, je); |
2083 | |
2084 | if (bip) { |
2085 | do { |
2086 | struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter); |
2087 | unsigned int tag_now = min(biv.bv_len, tag_todo); |
2088 | char *tag_addr; |
2089 | |
2090 | BUG_ON(PageHighMem(biv.bv_page)); |
2091 | tag_addr = bvec_virt(bvec: &biv); |
2092 | if (likely(dio->op == REQ_OP_WRITE)) |
2093 | memcpy(tag_ptr, tag_addr, tag_now); |
2094 | else |
2095 | memcpy(tag_addr, tag_ptr, tag_now); |
2096 | bvec_iter_advance(bv: bip->bip_vec, iter: &bip->bip_iter, bytes: tag_now); |
2097 | tag_ptr += tag_now; |
2098 | tag_todo -= tag_now; |
2099 | } while (unlikely(tag_todo)); |
2100 | } else if (likely(dio->op == REQ_OP_WRITE)) |
2101 | memset(tag_ptr, 0, tag_todo); |
2102 | } |
2103 | |
2104 | if (likely(dio->op == REQ_OP_WRITE)) { |
2105 | struct journal_sector *js; |
2106 | unsigned int s; |
2107 | |
2108 | js = access_journal_data(ic, section: journal_section, n: journal_entry); |
2109 | memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); |
2110 | |
2111 | s = 0; |
2112 | do { |
2113 | je->last_bytes[s] = js[s].commit_id; |
2114 | } while (++s < ic->sectors_per_block); |
2115 | |
2116 | if (ic->internal_hash) { |
2117 | unsigned int digest_size = crypto_shash_digestsize(tfm: ic->internal_hash); |
2118 | |
2119 | if (unlikely(digest_size > ic->tag_size)) { |
2120 | char checksums_onstack[HASH_MAX_DIGESTSIZE]; |
2121 | |
2122 | integrity_sector_checksum(ic, sector: logical_sector, data: (char *)js, result: checksums_onstack); |
2123 | memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); |
2124 | } else |
2125 | integrity_sector_checksum(ic, sector: logical_sector, data: (char *)js, journal_entry_tag(ic, je)); |
2126 | } |
2127 | |
2128 | journal_entry_set_sector(je, logical_sector); |
2129 | } |
2130 | logical_sector += ic->sectors_per_block; |
2131 | |
2132 | journal_entry++; |
2133 | if (unlikely(journal_entry == ic->journal_section_entries)) { |
2134 | journal_entry = 0; |
2135 | journal_section++; |
2136 | wraparound_section(ic, sec_ptr: &journal_section); |
2137 | } |
2138 | |
2139 | bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; |
2140 | } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); |
2141 | |
2142 | if (unlikely(dio->op == REQ_OP_READ)) |
2143 | flush_dcache_page(page: bv.bv_page); |
2144 | kunmap_local(mem); |
2145 | } while (n_sectors); |
2146 | |
2147 | if (likely(dio->op == REQ_OP_WRITE)) { |
2148 | smp_mb(); |
2149 | if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) |
2150 | wake_up(&ic->copy_to_journal_wait); |
2151 | if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) |
2152 | queue_work(wq: ic->commit_wq, work: &ic->commit_work); |
2153 | else |
2154 | schedule_autocommit(ic); |
2155 | } else |
2156 | remove_range(ic, range: &dio->range); |
2157 | |
2158 | if (unlikely(bio->bi_iter.bi_size)) { |
2159 | sector_t area, offset; |
2160 | |
2161 | dio->range.logical_sector = logical_sector; |
2162 | get_area_and_offset(ic, data_sector: dio->range.logical_sector, area: &area, offset: &offset); |
2163 | dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, metadata_offset: &dio->metadata_offset); |
2164 | return true; |
2165 | } |
2166 | |
2167 | return false; |
2168 | } |
2169 | |
2170 | static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map) |
2171 | { |
2172 | struct dm_integrity_c *ic = dio->ic; |
2173 | struct bio *bio = dm_bio_from_per_bio_data(data: dio, data_size: sizeof(struct dm_integrity_io)); |
2174 | unsigned int journal_section, journal_entry; |
2175 | unsigned int journal_read_pos; |
2176 | struct completion read_comp; |
2177 | bool discard_retried = false; |
2178 | bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ; |
2179 | |
2180 | if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D') |
2181 | need_sync_io = true; |
2182 | |
2183 | if (need_sync_io && from_map) { |
2184 | INIT_WORK(&dio->work, integrity_bio_wait); |
2185 | queue_work(wq: ic->offload_wq, work: &dio->work); |
2186 | return; |
2187 | } |
2188 | |
2189 | lock_retry: |
2190 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2191 | retry: |
2192 | if (unlikely(dm_integrity_failed(ic))) { |
2193 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2194 | do_endio(ic, bio); |
2195 | return; |
2196 | } |
2197 | dio->range.n_sectors = bio_sectors(bio); |
2198 | journal_read_pos = NOT_FOUND; |
2199 | if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) { |
2200 | if (dio->op == REQ_OP_WRITE) { |
2201 | unsigned int next_entry, i, pos; |
2202 | unsigned int ws, we, range_sectors; |
2203 | |
2204 | dio->range.n_sectors = min(dio->range.n_sectors, |
2205 | (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); |
2206 | if (unlikely(!dio->range.n_sectors)) { |
2207 | if (from_map) |
2208 | goto offload_to_thread; |
2209 | sleep_on_endio_wait(ic); |
2210 | goto retry; |
2211 | } |
2212 | range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; |
2213 | ic->free_sectors -= range_sectors; |
2214 | journal_section = ic->free_section; |
2215 | journal_entry = ic->free_section_entry; |
2216 | |
2217 | next_entry = ic->free_section_entry + range_sectors; |
2218 | ic->free_section_entry = next_entry % ic->journal_section_entries; |
2219 | ic->free_section += next_entry / ic->journal_section_entries; |
2220 | ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; |
2221 | wraparound_section(ic, sec_ptr: &ic->free_section); |
2222 | |
2223 | pos = journal_section * ic->journal_section_entries + journal_entry; |
2224 | ws = journal_section; |
2225 | we = journal_entry; |
2226 | i = 0; |
2227 | do { |
2228 | struct journal_entry *je; |
2229 | |
2230 | add_journal_node(ic, node: &ic->journal_tree[pos], sector: dio->range.logical_sector + i); |
2231 | pos++; |
2232 | if (unlikely(pos >= ic->journal_entries)) |
2233 | pos = 0; |
2234 | |
2235 | je = access_journal_entry(ic, section: ws, n: we); |
2236 | BUG_ON(!journal_entry_is_unused(je)); |
2237 | journal_entry_set_inprogress(je); |
2238 | we++; |
2239 | if (unlikely(we == ic->journal_section_entries)) { |
2240 | we = 0; |
2241 | ws++; |
2242 | wraparound_section(ic, sec_ptr: &ws); |
2243 | } |
2244 | } while ((i += ic->sectors_per_block) < dio->range.n_sectors); |
2245 | |
2246 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2247 | goto journal_read_write; |
2248 | } else { |
2249 | sector_t next_sector; |
2250 | |
2251 | journal_read_pos = find_journal_node(ic, sector: dio->range.logical_sector, next_sector: &next_sector); |
2252 | if (likely(journal_read_pos == NOT_FOUND)) { |
2253 | if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector)) |
2254 | dio->range.n_sectors = next_sector - dio->range.logical_sector; |
2255 | } else { |
2256 | unsigned int i; |
2257 | unsigned int jp = journal_read_pos + 1; |
2258 | |
2259 | for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { |
2260 | if (!test_journal_node(ic, pos: jp, sector: dio->range.logical_sector + i)) |
2261 | break; |
2262 | } |
2263 | dio->range.n_sectors = i; |
2264 | } |
2265 | } |
2266 | } |
2267 | if (unlikely(!add_new_range(ic, &dio->range, true))) { |
2268 | /* |
2269 | * We must not sleep in the request routine because it could |
2270 | * stall bios on current->bio_list. |
2271 | * So, we offload the bio to a workqueue if we have to sleep. |
2272 | */ |
2273 | if (from_map) { |
2274 | offload_to_thread: |
2275 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2276 | INIT_WORK(&dio->work, integrity_bio_wait); |
2277 | queue_work(wq: ic->wait_wq, work: &dio->work); |
2278 | return; |
2279 | } |
2280 | if (journal_read_pos != NOT_FOUND) |
2281 | dio->range.n_sectors = ic->sectors_per_block; |
2282 | wait_and_add_new_range(ic, new_range: &dio->range); |
2283 | /* |
2284 | * wait_and_add_new_range drops the spinlock, so the journal |
2285 | * may have been changed arbitrarily. We need to recheck. |
2286 | * To simplify the code, we restrict I/O size to just one block. |
2287 | */ |
2288 | if (journal_read_pos != NOT_FOUND) { |
2289 | sector_t next_sector; |
2290 | unsigned int new_pos; |
2291 | |
2292 | new_pos = find_journal_node(ic, sector: dio->range.logical_sector, next_sector: &next_sector); |
2293 | if (unlikely(new_pos != journal_read_pos)) { |
2294 | remove_range_unlocked(ic, range: &dio->range); |
2295 | goto retry; |
2296 | } |
2297 | } |
2298 | } |
2299 | if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) { |
2300 | sector_t next_sector; |
2301 | unsigned int new_pos; |
2302 | |
2303 | new_pos = find_journal_node(ic, sector: dio->range.logical_sector, next_sector: &next_sector); |
2304 | if (unlikely(new_pos != NOT_FOUND) || |
2305 | unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) { |
2306 | remove_range_unlocked(ic, range: &dio->range); |
2307 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2308 | queue_work(wq: ic->commit_wq, work: &ic->commit_work); |
2309 | flush_workqueue(ic->commit_wq); |
2310 | queue_work(wq: ic->writer_wq, work: &ic->writer_work); |
2311 | flush_workqueue(ic->writer_wq); |
2312 | discard_retried = true; |
2313 | goto lock_retry; |
2314 | } |
2315 | } |
2316 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2317 | |
2318 | if (unlikely(journal_read_pos != NOT_FOUND)) { |
2319 | journal_section = journal_read_pos / ic->journal_section_entries; |
2320 | journal_entry = journal_read_pos % ic->journal_section_entries; |
2321 | goto journal_read_write; |
2322 | } |
2323 | |
2324 | if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) { |
2325 | if (!block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: dio->range.logical_sector, |
2326 | n_sectors: dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { |
2327 | struct bitmap_block_status *bbs; |
2328 | |
2329 | bbs = sector_to_bitmap_block(ic, sector: dio->range.logical_sector); |
2330 | spin_lock(lock: &bbs->bio_queue_lock); |
2331 | bio_list_add(bl: &bbs->bio_queue, bio); |
2332 | spin_unlock(lock: &bbs->bio_queue_lock); |
2333 | queue_work(wq: ic->writer_wq, work: &bbs->work); |
2334 | return; |
2335 | } |
2336 | } |
2337 | |
2338 | dio->in_flight = (atomic_t)ATOMIC_INIT(2); |
2339 | |
2340 | if (need_sync_io) { |
2341 | init_completion(x: &read_comp); |
2342 | dio->completion = &read_comp; |
2343 | } else |
2344 | dio->completion = NULL; |
2345 | |
2346 | dm_bio_record(bd: &dio->bio_details, bio); |
2347 | bio_set_dev(bio, bdev: ic->dev->bdev); |
2348 | bio->bi_integrity = NULL; |
2349 | bio->bi_opf &= ~REQ_INTEGRITY; |
2350 | bio->bi_end_io = integrity_end_io; |
2351 | bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT; |
2352 | |
2353 | if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { |
2354 | integrity_metadata(w: &dio->work); |
2355 | dm_integrity_flush_buffers(ic, flush_data: false); |
2356 | |
2357 | dio->in_flight = (atomic_t)ATOMIC_INIT(1); |
2358 | dio->completion = NULL; |
2359 | |
2360 | submit_bio_noacct(bio); |
2361 | |
2362 | return; |
2363 | } |
2364 | |
2365 | submit_bio_noacct(bio); |
2366 | |
2367 | if (need_sync_io) { |
2368 | wait_for_completion_io(&read_comp); |
2369 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && |
2370 | dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) |
2371 | goto skip_check; |
2372 | if (ic->mode == 'B') { |
2373 | if (!block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: dio->range.logical_sector, |
2374 | n_sectors: dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) |
2375 | goto skip_check; |
2376 | } |
2377 | |
2378 | if (likely(!bio->bi_status)) |
2379 | integrity_metadata(w: &dio->work); |
2380 | else |
2381 | skip_check: |
2382 | dec_in_flight(dio); |
2383 | } else { |
2384 | INIT_WORK(&dio->work, integrity_metadata); |
2385 | queue_work(wq: ic->metadata_wq, work: &dio->work); |
2386 | } |
2387 | |
2388 | return; |
2389 | |
2390 | journal_read_write: |
2391 | if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry))) |
2392 | goto lock_retry; |
2393 | |
2394 | do_endio_flush(ic, dio); |
2395 | } |
2396 | |
2397 | |
2398 | static void integrity_bio_wait(struct work_struct *w) |
2399 | { |
2400 | struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work); |
2401 | |
2402 | dm_integrity_map_continue(dio, from_map: false); |
2403 | } |
2404 | |
2405 | static void pad_uncommitted(struct dm_integrity_c *ic) |
2406 | { |
2407 | if (ic->free_section_entry) { |
2408 | ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; |
2409 | ic->free_section_entry = 0; |
2410 | ic->free_section++; |
2411 | wraparound_section(ic, sec_ptr: &ic->free_section); |
2412 | ic->n_uncommitted_sections++; |
2413 | } |
2414 | if (WARN_ON(ic->journal_sections * ic->journal_section_entries != |
2415 | (ic->n_uncommitted_sections + ic->n_committed_sections) * |
2416 | ic->journal_section_entries + ic->free_sectors)) { |
2417 | DMCRIT("journal_sections %u, journal_section_entries %u, " |
2418 | "n_uncommitted_sections %u, n_committed_sections %u, " |
2419 | "journal_section_entries %u, free_sectors %u" , |
2420 | ic->journal_sections, ic->journal_section_entries, |
2421 | ic->n_uncommitted_sections, ic->n_committed_sections, |
2422 | ic->journal_section_entries, ic->free_sectors); |
2423 | } |
2424 | } |
2425 | |
2426 | static void integrity_commit(struct work_struct *w) |
2427 | { |
2428 | struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); |
2429 | unsigned int commit_start, commit_sections; |
2430 | unsigned int i, j, n; |
2431 | struct bio *flushes; |
2432 | |
2433 | del_timer(timer: &ic->autocommit_timer); |
2434 | |
2435 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2436 | flushes = bio_list_get(bl: &ic->flush_bio_list); |
2437 | if (unlikely(ic->mode != 'J')) { |
2438 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2439 | dm_integrity_flush_buffers(ic, flush_data: true); |
2440 | goto release_flush_bios; |
2441 | } |
2442 | |
2443 | pad_uncommitted(ic); |
2444 | commit_start = ic->uncommitted_section; |
2445 | commit_sections = ic->n_uncommitted_sections; |
2446 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2447 | |
2448 | if (!commit_sections) |
2449 | goto release_flush_bios; |
2450 | |
2451 | ic->wrote_to_journal = true; |
2452 | |
2453 | i = commit_start; |
2454 | for (n = 0; n < commit_sections; n++) { |
2455 | for (j = 0; j < ic->journal_section_entries; j++) { |
2456 | struct journal_entry *je; |
2457 | |
2458 | je = access_journal_entry(ic, section: i, n: j); |
2459 | io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); |
2460 | } |
2461 | for (j = 0; j < ic->journal_section_sectors; j++) { |
2462 | struct journal_sector *js; |
2463 | |
2464 | js = access_journal(ic, section: i, offset: j); |
2465 | js->commit_id = dm_integrity_commit_id(ic, i, j, seq: ic->commit_seq); |
2466 | } |
2467 | i++; |
2468 | if (unlikely(i >= ic->journal_sections)) |
2469 | ic->commit_seq = next_commit_seq(seq: ic->commit_seq); |
2470 | wraparound_section(ic, sec_ptr: &i); |
2471 | } |
2472 | smp_rmb(); |
2473 | |
2474 | write_journal(ic, commit_start, commit_sections); |
2475 | |
2476 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2477 | ic->uncommitted_section += commit_sections; |
2478 | wraparound_section(ic, sec_ptr: &ic->uncommitted_section); |
2479 | ic->n_uncommitted_sections -= commit_sections; |
2480 | ic->n_committed_sections += commit_sections; |
2481 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2482 | |
2483 | if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) |
2484 | queue_work(wq: ic->writer_wq, work: &ic->writer_work); |
2485 | |
2486 | release_flush_bios: |
2487 | while (flushes) { |
2488 | struct bio *next = flushes->bi_next; |
2489 | |
2490 | flushes->bi_next = NULL; |
2491 | do_endio(ic, bio: flushes); |
2492 | flushes = next; |
2493 | } |
2494 | } |
2495 | |
2496 | static void complete_copy_from_journal(unsigned long error, void *context) |
2497 | { |
2498 | struct journal_io *io = context; |
2499 | struct journal_completion *comp = io->comp; |
2500 | struct dm_integrity_c *ic = comp->ic; |
2501 | |
2502 | remove_range(ic, range: &io->range); |
2503 | mempool_free(element: io, pool: &ic->journal_io_mempool); |
2504 | if (unlikely(error != 0)) |
2505 | dm_integrity_io_error(ic, msg: "copying from journal" , err: -EIO); |
2506 | complete_journal_op(context: comp); |
2507 | } |
2508 | |
2509 | static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, |
2510 | struct journal_entry *je) |
2511 | { |
2512 | unsigned int s = 0; |
2513 | |
2514 | do { |
2515 | js->commit_id = je->last_bytes[s]; |
2516 | js++; |
2517 | } while (++s < ic->sectors_per_block); |
2518 | } |
2519 | |
2520 | static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start, |
2521 | unsigned int write_sections, bool from_replay) |
2522 | { |
2523 | unsigned int i, j, n; |
2524 | struct journal_completion comp; |
2525 | struct blk_plug plug; |
2526 | |
2527 | blk_start_plug(&plug); |
2528 | |
2529 | comp.ic = ic; |
2530 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
2531 | init_completion(x: &comp.comp); |
2532 | |
2533 | i = write_start; |
2534 | for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, sec_ptr: &i)) { |
2535 | #ifndef INTERNAL_VERIFY |
2536 | if (unlikely(from_replay)) |
2537 | #endif |
2538 | rw_section_mac(ic, section: i, wr: false); |
2539 | for (j = 0; j < ic->journal_section_entries; j++) { |
2540 | struct journal_entry *je = access_journal_entry(ic, section: i, n: j); |
2541 | sector_t sec, area, offset; |
2542 | unsigned int k, l, next_loop; |
2543 | sector_t metadata_block; |
2544 | unsigned int metadata_offset; |
2545 | struct journal_io *io; |
2546 | |
2547 | if (journal_entry_is_unused(je)) |
2548 | continue; |
2549 | BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay); |
2550 | sec = journal_entry_get_sector(je); |
2551 | if (unlikely(from_replay)) { |
2552 | if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) { |
2553 | dm_integrity_io_error(ic, msg: "invalid sector in journal" , err: -EIO); |
2554 | sec &= ~(sector_t)(ic->sectors_per_block - 1); |
2555 | } |
2556 | if (unlikely(sec >= ic->provided_data_sectors)) { |
2557 | journal_entry_set_unused(je); |
2558 | continue; |
2559 | } |
2560 | } |
2561 | get_area_and_offset(ic, data_sector: sec, area: &area, offset: &offset); |
2562 | restore_last_bytes(ic, js: access_journal_data(ic, section: i, n: j), je); |
2563 | for (k = j + 1; k < ic->journal_section_entries; k++) { |
2564 | struct journal_entry *je2 = access_journal_entry(ic, section: i, n: k); |
2565 | sector_t sec2, area2, offset2; |
2566 | |
2567 | if (journal_entry_is_unused(je2)) |
2568 | break; |
2569 | BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay); |
2570 | sec2 = journal_entry_get_sector(je2); |
2571 | if (unlikely(sec2 >= ic->provided_data_sectors)) |
2572 | break; |
2573 | get_area_and_offset(ic, data_sector: sec2, area: &area2, offset: &offset2); |
2574 | if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) |
2575 | break; |
2576 | restore_last_bytes(ic, js: access_journal_data(ic, section: i, n: k), je: je2); |
2577 | } |
2578 | next_loop = k - 1; |
2579 | |
2580 | io = mempool_alloc(pool: &ic->journal_io_mempool, GFP_NOIO); |
2581 | io->comp = ∁ |
2582 | io->range.logical_sector = sec; |
2583 | io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; |
2584 | |
2585 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2586 | add_new_range_and_wait(ic, new_range: &io->range); |
2587 | |
2588 | if (likely(!from_replay)) { |
2589 | struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; |
2590 | |
2591 | /* don't write if there is newer committed sector */ |
2592 | while (j < k && find_newer_committed_node(ic, node: §ion_node[j])) { |
2593 | struct journal_entry *je2 = access_journal_entry(ic, section: i, n: j); |
2594 | |
2595 | journal_entry_set_unused(je2); |
2596 | remove_journal_node(ic, node: §ion_node[j]); |
2597 | j++; |
2598 | sec += ic->sectors_per_block; |
2599 | offset += ic->sectors_per_block; |
2600 | } |
2601 | while (j < k && find_newer_committed_node(ic, node: §ion_node[k - 1])) { |
2602 | struct journal_entry *je2 = access_journal_entry(ic, section: i, n: k - 1); |
2603 | |
2604 | journal_entry_set_unused(je2); |
2605 | remove_journal_node(ic, node: §ion_node[k - 1]); |
2606 | k--; |
2607 | } |
2608 | if (j == k) { |
2609 | remove_range_unlocked(ic, range: &io->range); |
2610 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2611 | mempool_free(element: io, pool: &ic->journal_io_mempool); |
2612 | goto skip_io; |
2613 | } |
2614 | for (l = j; l < k; l++) |
2615 | remove_journal_node(ic, node: §ion_node[l]); |
2616 | } |
2617 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2618 | |
2619 | metadata_block = get_metadata_sector_and_offset(ic, area, offset, metadata_offset: &metadata_offset); |
2620 | for (l = j; l < k; l++) { |
2621 | int r; |
2622 | struct journal_entry *je2 = access_journal_entry(ic, section: i, n: l); |
2623 | |
2624 | if ( |
2625 | #ifndef INTERNAL_VERIFY |
2626 | unlikely(from_replay) && |
2627 | #endif |
2628 | ic->internal_hash) { |
2629 | char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)]; |
2630 | |
2631 | integrity_sector_checksum(ic, sector: sec + ((l - j) << ic->sb->log2_sectors_per_block), |
2632 | data: (char *)access_journal_data(ic, section: i, n: l), result: test_tag); |
2633 | if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) { |
2634 | dm_integrity_io_error(ic, msg: "tag mismatch when replaying journal" , err: -EILSEQ); |
2635 | dm_audit_log_target(DM_MSG_PREFIX, op: "integrity-replay-journal" , ti: ic->ti, result: 0); |
2636 | } |
2637 | } |
2638 | |
2639 | journal_entry_set_unused(je2); |
2640 | r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), metadata_block: &metadata_block, metadata_offset: &metadata_offset, |
2641 | total_size: ic->tag_size, TAG_WRITE); |
2642 | if (unlikely(r)) |
2643 | dm_integrity_io_error(ic, msg: "reading tags" , err: r); |
2644 | } |
2645 | |
2646 | atomic_inc(v: &comp.in_flight); |
2647 | copy_from_journal(ic, section: i, offset: j << ic->sb->log2_sectors_per_block, |
2648 | n_sectors: (k - j) << ic->sb->log2_sectors_per_block, |
2649 | target: get_data_sector(ic, area, offset), |
2650 | fn: complete_copy_from_journal, data: io); |
2651 | skip_io: |
2652 | j = next_loop; |
2653 | } |
2654 | } |
2655 | |
2656 | dm_bufio_write_dirty_buffers_async(c: ic->bufio); |
2657 | |
2658 | blk_finish_plug(&plug); |
2659 | |
2660 | complete_journal_op(context: &comp); |
2661 | wait_for_completion_io(&comp.comp); |
2662 | |
2663 | dm_integrity_flush_buffers(ic, flush_data: true); |
2664 | } |
2665 | |
2666 | static void integrity_writer(struct work_struct *w) |
2667 | { |
2668 | struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); |
2669 | unsigned int write_start, write_sections; |
2670 | unsigned int prev_free_sectors; |
2671 | |
2672 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2673 | write_start = ic->committed_section; |
2674 | write_sections = ic->n_committed_sections; |
2675 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2676 | |
2677 | if (!write_sections) |
2678 | return; |
2679 | |
2680 | do_journal_write(ic, write_start, write_sections, from_replay: false); |
2681 | |
2682 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2683 | |
2684 | ic->committed_section += write_sections; |
2685 | wraparound_section(ic, sec_ptr: &ic->committed_section); |
2686 | ic->n_committed_sections -= write_sections; |
2687 | |
2688 | prev_free_sectors = ic->free_sectors; |
2689 | ic->free_sectors += write_sections * ic->journal_section_entries; |
2690 | if (unlikely(!prev_free_sectors)) |
2691 | wake_up_locked(&ic->endio_wait); |
2692 | |
2693 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2694 | } |
2695 | |
2696 | static void recalc_write_super(struct dm_integrity_c *ic) |
2697 | { |
2698 | int r; |
2699 | |
2700 | dm_integrity_flush_buffers(ic, flush_data: false); |
2701 | if (dm_integrity_failed(ic)) |
2702 | return; |
2703 | |
2704 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE); |
2705 | if (unlikely(r)) |
2706 | dm_integrity_io_error(ic, msg: "writing superblock" , err: r); |
2707 | } |
2708 | |
2709 | static void integrity_recalc(struct work_struct *w) |
2710 | { |
2711 | struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); |
2712 | size_t recalc_tags_size; |
2713 | u8 *recalc_buffer = NULL; |
2714 | u8 *recalc_tags = NULL; |
2715 | struct dm_integrity_range range; |
2716 | struct dm_io_request io_req; |
2717 | struct dm_io_region io_loc; |
2718 | sector_t area, offset; |
2719 | sector_t metadata_block; |
2720 | unsigned int metadata_offset; |
2721 | sector_t logical_sector, n_sectors; |
2722 | __u8 *t; |
2723 | unsigned int i; |
2724 | int r; |
2725 | unsigned int super_counter = 0; |
2726 | unsigned recalc_sectors = RECALC_SECTORS; |
2727 | |
2728 | retry: |
2729 | recalc_buffer = __vmalloc(size: recalc_sectors << SECTOR_SHIFT, GFP_NOIO); |
2730 | if (!recalc_buffer) { |
2731 | oom: |
2732 | recalc_sectors >>= 1; |
2733 | if (recalc_sectors >= 1U << ic->sb->log2_sectors_per_block) |
2734 | goto retry; |
2735 | DMCRIT("out of memory for recalculate buffer - recalculation disabled" ); |
2736 | goto free_ret; |
2737 | } |
2738 | recalc_tags_size = (recalc_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; |
2739 | if (crypto_shash_digestsize(tfm: ic->internal_hash) > ic->tag_size) |
2740 | recalc_tags_size += crypto_shash_digestsize(tfm: ic->internal_hash) - ic->tag_size; |
2741 | recalc_tags = kvmalloc(size: recalc_tags_size, GFP_NOIO); |
2742 | if (!recalc_tags) { |
2743 | vfree(addr: recalc_buffer); |
2744 | recalc_buffer = NULL; |
2745 | goto oom; |
2746 | } |
2747 | |
2748 | DEBUG_print("start recalculation... (position %llx)\n" , le64_to_cpu(ic->sb->recalc_sector)); |
2749 | |
2750 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2751 | |
2752 | next_chunk: |
2753 | |
2754 | if (unlikely(dm_post_suspending(ic->ti))) |
2755 | goto unlock_ret; |
2756 | |
2757 | range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); |
2758 | if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { |
2759 | if (ic->mode == 'B') { |
2760 | block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_CLEAR); |
2761 | DEBUG_print("queue_delayed_work: bitmap_flush_work\n" ); |
2762 | queue_delayed_work(wq: ic->commit_wq, dwork: &ic->bitmap_flush_work, delay: 0); |
2763 | } |
2764 | goto unlock_ret; |
2765 | } |
2766 | |
2767 | get_area_and_offset(ic, data_sector: range.logical_sector, area: &area, offset: &offset); |
2768 | range.n_sectors = min((sector_t)recalc_sectors, ic->provided_data_sectors - range.logical_sector); |
2769 | if (!ic->meta_dev) |
2770 | range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset); |
2771 | |
2772 | add_new_range_and_wait(ic, new_range: &range); |
2773 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2774 | logical_sector = range.logical_sector; |
2775 | n_sectors = range.n_sectors; |
2776 | |
2777 | if (ic->mode == 'B') { |
2778 | if (block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) |
2779 | goto advance_and_next; |
2780 | |
2781 | while (block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: logical_sector, |
2782 | n_sectors: ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { |
2783 | logical_sector += ic->sectors_per_block; |
2784 | n_sectors -= ic->sectors_per_block; |
2785 | cond_resched(); |
2786 | } |
2787 | while (block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: logical_sector + n_sectors - ic->sectors_per_block, |
2788 | n_sectors: ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { |
2789 | n_sectors -= ic->sectors_per_block; |
2790 | cond_resched(); |
2791 | } |
2792 | get_area_and_offset(ic, data_sector: logical_sector, area: &area, offset: &offset); |
2793 | } |
2794 | |
2795 | DEBUG_print("recalculating: %llx, %llx\n" , logical_sector, n_sectors); |
2796 | |
2797 | if (unlikely(++super_counter == RECALC_WRITE_SUPER)) { |
2798 | recalc_write_super(ic); |
2799 | if (ic->mode == 'B') |
2800 | queue_delayed_work(wq: ic->commit_wq, dwork: &ic->bitmap_flush_work, delay: ic->bitmap_flush_interval); |
2801 | |
2802 | super_counter = 0; |
2803 | } |
2804 | |
2805 | if (unlikely(dm_integrity_failed(ic))) |
2806 | goto err; |
2807 | |
2808 | io_req.bi_opf = REQ_OP_READ; |
2809 | io_req.mem.type = DM_IO_VMA; |
2810 | io_req.mem.ptr.addr = recalc_buffer; |
2811 | io_req.notify.fn = NULL; |
2812 | io_req.client = ic->io; |
2813 | io_loc.bdev = ic->dev->bdev; |
2814 | io_loc.sector = get_data_sector(ic, area, offset); |
2815 | io_loc.count = n_sectors; |
2816 | |
2817 | r = dm_io(io_req: &io_req, num_regions: 1, region: &io_loc, NULL, IOPRIO_DEFAULT); |
2818 | if (unlikely(r)) { |
2819 | dm_integrity_io_error(ic, msg: "reading data" , err: r); |
2820 | goto err; |
2821 | } |
2822 | |
2823 | t = recalc_tags; |
2824 | for (i = 0; i < n_sectors; i += ic->sectors_per_block) { |
2825 | integrity_sector_checksum(ic, sector: logical_sector + i, data: recalc_buffer + (i << SECTOR_SHIFT), result: t); |
2826 | t += ic->tag_size; |
2827 | } |
2828 | |
2829 | metadata_block = get_metadata_sector_and_offset(ic, area, offset, metadata_offset: &metadata_offset); |
2830 | |
2831 | r = dm_integrity_rw_tag(ic, tag: recalc_tags, metadata_block: &metadata_block, metadata_offset: &metadata_offset, total_size: t - recalc_tags, TAG_WRITE); |
2832 | if (unlikely(r)) { |
2833 | dm_integrity_io_error(ic, msg: "writing tags" , err: r); |
2834 | goto err; |
2835 | } |
2836 | |
2837 | if (ic->mode == 'B') { |
2838 | sector_t start, end; |
2839 | |
2840 | start = (range.logical_sector >> |
2841 | (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << |
2842 | (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
2843 | end = ((range.logical_sector + range.n_sectors) >> |
2844 | (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << |
2845 | (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
2846 | block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: start, n_sectors: end - start, BITMAP_OP_CLEAR); |
2847 | } |
2848 | |
2849 | advance_and_next: |
2850 | cond_resched(); |
2851 | |
2852 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2853 | remove_range_unlocked(ic, range: &range); |
2854 | ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); |
2855 | goto next_chunk; |
2856 | |
2857 | err: |
2858 | remove_range(ic, range: &range); |
2859 | goto free_ret; |
2860 | |
2861 | unlock_ret: |
2862 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2863 | |
2864 | recalc_write_super(ic); |
2865 | |
2866 | free_ret: |
2867 | vfree(addr: recalc_buffer); |
2868 | kvfree(addr: recalc_tags); |
2869 | } |
2870 | |
2871 | static void bitmap_block_work(struct work_struct *w) |
2872 | { |
2873 | struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work); |
2874 | struct dm_integrity_c *ic = bbs->ic; |
2875 | struct bio *bio; |
2876 | struct bio_list bio_queue; |
2877 | struct bio_list waiting; |
2878 | |
2879 | bio_list_init(bl: &waiting); |
2880 | |
2881 | spin_lock(lock: &bbs->bio_queue_lock); |
2882 | bio_queue = bbs->bio_queue; |
2883 | bio_list_init(bl: &bbs->bio_queue); |
2884 | spin_unlock(lock: &bbs->bio_queue_lock); |
2885 | |
2886 | while ((bio = bio_list_pop(bl: &bio_queue))) { |
2887 | struct dm_integrity_io *dio; |
2888 | |
2889 | dio = dm_per_bio_data(bio, data_size: sizeof(struct dm_integrity_io)); |
2890 | |
2891 | if (block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: dio->range.logical_sector, |
2892 | n_sectors: dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) { |
2893 | remove_range(ic, range: &dio->range); |
2894 | INIT_WORK(&dio->work, integrity_bio_wait); |
2895 | queue_work(wq: ic->offload_wq, work: &dio->work); |
2896 | } else { |
2897 | block_bitmap_op(ic, bitmap: ic->journal, sector: dio->range.logical_sector, |
2898 | n_sectors: dio->range.n_sectors, BITMAP_OP_SET); |
2899 | bio_list_add(bl: &waiting, bio); |
2900 | } |
2901 | } |
2902 | |
2903 | if (bio_list_empty(bl: &waiting)) |
2904 | return; |
2905 | |
2906 | rw_journal_sectors(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, |
2907 | sector: bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), |
2908 | BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL); |
2909 | |
2910 | while ((bio = bio_list_pop(bl: &waiting))) { |
2911 | struct dm_integrity_io *dio = dm_per_bio_data(bio, data_size: sizeof(struct dm_integrity_io)); |
2912 | |
2913 | block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: dio->range.logical_sector, |
2914 | n_sectors: dio->range.n_sectors, BITMAP_OP_SET); |
2915 | |
2916 | remove_range(ic, range: &dio->range); |
2917 | INIT_WORK(&dio->work, integrity_bio_wait); |
2918 | queue_work(wq: ic->offload_wq, work: &dio->work); |
2919 | } |
2920 | |
2921 | queue_delayed_work(wq: ic->commit_wq, dwork: &ic->bitmap_flush_work, delay: ic->bitmap_flush_interval); |
2922 | } |
2923 | |
2924 | static void bitmap_flush_work(struct work_struct *work) |
2925 | { |
2926 | struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); |
2927 | struct dm_integrity_range range; |
2928 | unsigned long limit; |
2929 | struct bio *bio; |
2930 | |
2931 | dm_integrity_flush_buffers(ic, flush_data: false); |
2932 | |
2933 | range.logical_sector = 0; |
2934 | range.n_sectors = ic->provided_data_sectors; |
2935 | |
2936 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2937 | add_new_range_and_wait(ic, new_range: &range); |
2938 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2939 | |
2940 | dm_integrity_flush_buffers(ic, flush_data: true); |
2941 | |
2942 | limit = ic->provided_data_sectors; |
2943 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { |
2944 | limit = le64_to_cpu(ic->sb->recalc_sector) |
2945 | >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) |
2946 | << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); |
2947 | } |
2948 | /*DEBUG_print("zeroing journal\n");*/ |
2949 | block_bitmap_op(ic, bitmap: ic->journal, sector: 0, n_sectors: limit, BITMAP_OP_CLEAR); |
2950 | block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: 0, n_sectors: limit, BITMAP_OP_CLEAR); |
2951 | |
2952 | rw_journal_sectors(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, sector: 0, |
2953 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
2954 | |
2955 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2956 | remove_range_unlocked(ic, range: &range); |
2957 | while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { |
2958 | bio_endio(bio); |
2959 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2960 | spin_lock_irq(lock: &ic->endio_wait.lock); |
2961 | } |
2962 | spin_unlock_irq(lock: &ic->endio_wait.lock); |
2963 | } |
2964 | |
2965 | |
2966 | static void init_journal(struct dm_integrity_c *ic, unsigned int start_section, |
2967 | unsigned int n_sections, unsigned char commit_seq) |
2968 | { |
2969 | unsigned int i, j, n; |
2970 | |
2971 | if (!n_sections) |
2972 | return; |
2973 | |
2974 | for (n = 0; n < n_sections; n++) { |
2975 | i = start_section + n; |
2976 | wraparound_section(ic, sec_ptr: &i); |
2977 | for (j = 0; j < ic->journal_section_sectors; j++) { |
2978 | struct journal_sector *js = access_journal(ic, section: i, offset: j); |
2979 | |
2980 | BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA); |
2981 | memset(&js->sectors, 0, sizeof(js->sectors)); |
2982 | js->commit_id = dm_integrity_commit_id(ic, i, j, seq: commit_seq); |
2983 | } |
2984 | for (j = 0; j < ic->journal_section_entries; j++) { |
2985 | struct journal_entry *je = access_journal_entry(ic, section: i, n: j); |
2986 | |
2987 | journal_entry_set_unused(je); |
2988 | } |
2989 | } |
2990 | |
2991 | write_journal(ic, commit_start: start_section, commit_sections: n_sections); |
2992 | } |
2993 | |
2994 | static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id) |
2995 | { |
2996 | unsigned char k; |
2997 | |
2998 | for (k = 0; k < N_COMMIT_IDS; k++) { |
2999 | if (dm_integrity_commit_id(ic, i, j, seq: k) == id) |
3000 | return k; |
3001 | } |
3002 | dm_integrity_io_error(ic, msg: "journal commit id" , err: -EIO); |
3003 | return -EIO; |
3004 | } |
3005 | |
3006 | static void replay_journal(struct dm_integrity_c *ic) |
3007 | { |
3008 | unsigned int i, j; |
3009 | bool used_commit_ids[N_COMMIT_IDS]; |
3010 | unsigned int max_commit_id_sections[N_COMMIT_IDS]; |
3011 | unsigned int write_start, write_sections; |
3012 | unsigned int continue_section; |
3013 | bool journal_empty; |
3014 | unsigned char unused, last_used, want_commit_seq; |
3015 | |
3016 | if (ic->mode == 'R') |
3017 | return; |
3018 | |
3019 | if (ic->journal_uptodate) |
3020 | return; |
3021 | |
3022 | last_used = 0; |
3023 | write_start = 0; |
3024 | |
3025 | if (!ic->just_formatted) { |
3026 | DEBUG_print("reading journal\n" ); |
3027 | rw_journal(ic, opf: REQ_OP_READ, section: 0, n_sections: ic->journal_sections, NULL); |
3028 | if (ic->journal_io) |
3029 | DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal" ); |
3030 | if (ic->journal_io) { |
3031 | struct journal_completion crypt_comp; |
3032 | |
3033 | crypt_comp.ic = ic; |
3034 | init_completion(x: &crypt_comp.comp); |
3035 | crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0); |
3036 | encrypt_journal(ic, encrypt: false, section: 0, n_sections: ic->journal_sections, comp: &crypt_comp); |
3037 | wait_for_completion(&crypt_comp.comp); |
3038 | } |
3039 | DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal" ); |
3040 | } |
3041 | |
3042 | if (dm_integrity_failed(ic)) |
3043 | goto clear_journal; |
3044 | |
3045 | journal_empty = true; |
3046 | memset(used_commit_ids, 0, sizeof(used_commit_ids)); |
3047 | memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections)); |
3048 | for (i = 0; i < ic->journal_sections; i++) { |
3049 | for (j = 0; j < ic->journal_section_sectors; j++) { |
3050 | int k; |
3051 | struct journal_sector *js = access_journal(ic, section: i, offset: j); |
3052 | |
3053 | k = find_commit_seq(ic, i, j, id: js->commit_id); |
3054 | if (k < 0) |
3055 | goto clear_journal; |
3056 | used_commit_ids[k] = true; |
3057 | max_commit_id_sections[k] = i; |
3058 | } |
3059 | if (journal_empty) { |
3060 | for (j = 0; j < ic->journal_section_entries; j++) { |
3061 | struct journal_entry *je = access_journal_entry(ic, section: i, n: j); |
3062 | |
3063 | if (!journal_entry_is_unused(je)) { |
3064 | journal_empty = false; |
3065 | break; |
3066 | } |
3067 | } |
3068 | } |
3069 | } |
3070 | |
3071 | if (!used_commit_ids[N_COMMIT_IDS - 1]) { |
3072 | unused = N_COMMIT_IDS - 1; |
3073 | while (unused && !used_commit_ids[unused - 1]) |
3074 | unused--; |
3075 | } else { |
3076 | for (unused = 0; unused < N_COMMIT_IDS; unused++) |
3077 | if (!used_commit_ids[unused]) |
3078 | break; |
3079 | if (unused == N_COMMIT_IDS) { |
3080 | dm_integrity_io_error(ic, msg: "journal commit ids" , err: -EIO); |
3081 | goto clear_journal; |
3082 | } |
3083 | } |
3084 | DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n" , |
3085 | unused, used_commit_ids[0], used_commit_ids[1], |
3086 | used_commit_ids[2], used_commit_ids[3]); |
3087 | |
3088 | last_used = prev_commit_seq(seq: unused); |
3089 | want_commit_seq = prev_commit_seq(seq: last_used); |
3090 | |
3091 | if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(seq: want_commit_seq)]) |
3092 | journal_empty = true; |
3093 | |
3094 | write_start = max_commit_id_sections[last_used] + 1; |
3095 | if (unlikely(write_start >= ic->journal_sections)) |
3096 | want_commit_seq = next_commit_seq(seq: want_commit_seq); |
3097 | wraparound_section(ic, sec_ptr: &write_start); |
3098 | |
3099 | i = write_start; |
3100 | for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { |
3101 | for (j = 0; j < ic->journal_section_sectors; j++) { |
3102 | struct journal_sector *js = access_journal(ic, section: i, offset: j); |
3103 | |
3104 | if (js->commit_id != dm_integrity_commit_id(ic, i, j, seq: want_commit_seq)) { |
3105 | /* |
3106 | * This could be caused by crash during writing. |
3107 | * We won't replay the inconsistent part of the |
3108 | * journal. |
3109 | */ |
3110 | DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n" , |
3111 | i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); |
3112 | goto brk; |
3113 | } |
3114 | } |
3115 | i++; |
3116 | if (unlikely(i >= ic->journal_sections)) |
3117 | want_commit_seq = next_commit_seq(seq: want_commit_seq); |
3118 | wraparound_section(ic, sec_ptr: &i); |
3119 | } |
3120 | brk: |
3121 | |
3122 | if (!journal_empty) { |
3123 | DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n" , |
3124 | write_sections, write_start, want_commit_seq); |
3125 | do_journal_write(ic, write_start, write_sections, from_replay: true); |
3126 | } |
3127 | |
3128 | if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { |
3129 | continue_section = write_start; |
3130 | ic->commit_seq = want_commit_seq; |
3131 | DEBUG_print("continuing from section %u, commit seq %d\n" , write_start, ic->commit_seq); |
3132 | } else { |
3133 | unsigned int s; |
3134 | unsigned char erase_seq; |
3135 | |
3136 | clear_journal: |
3137 | DEBUG_print("clearing journal\n" ); |
3138 | |
3139 | erase_seq = prev_commit_seq(seq: prev_commit_seq(seq: last_used)); |
3140 | s = write_start; |
3141 | init_journal(ic, start_section: s, n_sections: 1, commit_seq: erase_seq); |
3142 | s++; |
3143 | wraparound_section(ic, sec_ptr: &s); |
3144 | if (ic->journal_sections >= 2) { |
3145 | init_journal(ic, start_section: s, n_sections: ic->journal_sections - 2, commit_seq: erase_seq); |
3146 | s += ic->journal_sections - 2; |
3147 | wraparound_section(ic, sec_ptr: &s); |
3148 | init_journal(ic, start_section: s, n_sections: 1, commit_seq: erase_seq); |
3149 | } |
3150 | |
3151 | continue_section = 0; |
3152 | ic->commit_seq = next_commit_seq(seq: erase_seq); |
3153 | } |
3154 | |
3155 | ic->committed_section = continue_section; |
3156 | ic->n_committed_sections = 0; |
3157 | |
3158 | ic->uncommitted_section = continue_section; |
3159 | ic->n_uncommitted_sections = 0; |
3160 | |
3161 | ic->free_section = continue_section; |
3162 | ic->free_section_entry = 0; |
3163 | ic->free_sectors = ic->journal_entries; |
3164 | |
3165 | ic->journal_tree_root = RB_ROOT; |
3166 | for (i = 0; i < ic->journal_entries; i++) |
3167 | init_journal_node(node: &ic->journal_tree[i]); |
3168 | } |
3169 | |
3170 | static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) |
3171 | { |
3172 | DEBUG_print("%s\n" , __func__); |
3173 | |
3174 | if (ic->mode == 'B') { |
3175 | ic->bitmap_flush_interval = msecs_to_jiffies(m: 10) + 1; |
3176 | ic->synchronous_mode = 1; |
3177 | |
3178 | cancel_delayed_work_sync(dwork: &ic->bitmap_flush_work); |
3179 | queue_delayed_work(wq: ic->commit_wq, dwork: &ic->bitmap_flush_work, delay: 0); |
3180 | flush_workqueue(ic->commit_wq); |
3181 | } |
3182 | } |
3183 | |
3184 | static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x) |
3185 | { |
3186 | struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); |
3187 | |
3188 | DEBUG_print("%s\n" , __func__); |
3189 | |
3190 | dm_integrity_enter_synchronous_mode(ic); |
3191 | |
3192 | return NOTIFY_DONE; |
3193 | } |
3194 | |
3195 | static void dm_integrity_postsuspend(struct dm_target *ti) |
3196 | { |
3197 | struct dm_integrity_c *ic = ti->private; |
3198 | int r; |
3199 | |
3200 | WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); |
3201 | |
3202 | del_timer_sync(timer: &ic->autocommit_timer); |
3203 | |
3204 | if (ic->recalc_wq) |
3205 | drain_workqueue(wq: ic->recalc_wq); |
3206 | |
3207 | if (ic->mode == 'B') |
3208 | cancel_delayed_work_sync(dwork: &ic->bitmap_flush_work); |
3209 | |
3210 | queue_work(wq: ic->commit_wq, work: &ic->commit_work); |
3211 | drain_workqueue(wq: ic->commit_wq); |
3212 | |
3213 | if (ic->mode == 'J') { |
3214 | queue_work(wq: ic->writer_wq, work: &ic->writer_work); |
3215 | drain_workqueue(wq: ic->writer_wq); |
3216 | dm_integrity_flush_buffers(ic, flush_data: true); |
3217 | if (ic->wrote_to_journal) { |
3218 | init_journal(ic, start_section: ic->free_section, |
3219 | n_sections: ic->journal_sections - ic->free_section, commit_seq: ic->commit_seq); |
3220 | if (ic->free_section) { |
3221 | init_journal(ic, start_section: 0, n_sections: ic->free_section, |
3222 | commit_seq: next_commit_seq(seq: ic->commit_seq)); |
3223 | } |
3224 | } |
3225 | } |
3226 | |
3227 | if (ic->mode == 'B') { |
3228 | dm_integrity_flush_buffers(ic, flush_data: true); |
3229 | #if 1 |
3230 | /* set to 0 to test bitmap replay code */ |
3231 | init_journal(ic, start_section: 0, n_sections: ic->journal_sections, commit_seq: 0); |
3232 | ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); |
3233 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE | REQ_FUA); |
3234 | if (unlikely(r)) |
3235 | dm_integrity_io_error(ic, msg: "writing superblock" , err: r); |
3236 | #endif |
3237 | } |
3238 | |
3239 | BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); |
3240 | |
3241 | ic->journal_uptodate = true; |
3242 | } |
3243 | |
3244 | static void dm_integrity_resume(struct dm_target *ti) |
3245 | { |
3246 | struct dm_integrity_c *ic = ti->private; |
3247 | __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); |
3248 | int r; |
3249 | |
3250 | DEBUG_print("resume\n" ); |
3251 | |
3252 | ic->wrote_to_journal = false; |
3253 | |
3254 | if (ic->provided_data_sectors != old_provided_data_sectors) { |
3255 | if (ic->provided_data_sectors > old_provided_data_sectors && |
3256 | ic->mode == 'B' && |
3257 | ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { |
3258 | rw_journal_sectors(ic, opf: REQ_OP_READ, sector: 0, |
3259 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
3260 | block_bitmap_op(ic, bitmap: ic->journal, sector: old_provided_data_sectors, |
3261 | n_sectors: ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET); |
3262 | rw_journal_sectors(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, sector: 0, |
3263 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
3264 | } |
3265 | |
3266 | ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); |
3267 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE | REQ_FUA); |
3268 | if (unlikely(r)) |
3269 | dm_integrity_io_error(ic, msg: "writing superblock" , err: r); |
3270 | } |
3271 | |
3272 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { |
3273 | DEBUG_print("resume dirty_bitmap\n" ); |
3274 | rw_journal_sectors(ic, opf: REQ_OP_READ, sector: 0, |
3275 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
3276 | if (ic->mode == 'B') { |
3277 | if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && |
3278 | !ic->reset_recalculate_flag) { |
3279 | block_bitmap_copy(ic, dst: ic->recalc_bitmap, src: ic->journal); |
3280 | block_bitmap_copy(ic, dst: ic->may_write_bitmap, src: ic->journal); |
3281 | if (!block_bitmap_op(ic, bitmap: ic->journal, sector: 0, n_sectors: ic->provided_data_sectors, |
3282 | BITMAP_OP_TEST_ALL_CLEAR)) { |
3283 | ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); |
3284 | ic->sb->recalc_sector = cpu_to_le64(0); |
3285 | } |
3286 | } else { |
3287 | DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n" , |
3288 | ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); |
3289 | ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; |
3290 | block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_SET); |
3291 | block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_SET); |
3292 | block_bitmap_op(ic, bitmap: ic->journal, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_SET); |
3293 | rw_journal_sectors(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, sector: 0, |
3294 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
3295 | ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); |
3296 | ic->sb->recalc_sector = cpu_to_le64(0); |
3297 | } |
3298 | } else { |
3299 | if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && |
3300 | block_bitmap_op(ic, bitmap: ic->journal, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) || |
3301 | ic->reset_recalculate_flag) { |
3302 | ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); |
3303 | ic->sb->recalc_sector = cpu_to_le64(0); |
3304 | } |
3305 | init_journal(ic, start_section: 0, n_sections: ic->journal_sections, commit_seq: 0); |
3306 | replay_journal(ic); |
3307 | ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); |
3308 | } |
3309 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE | REQ_FUA); |
3310 | if (unlikely(r)) |
3311 | dm_integrity_io_error(ic, msg: "writing superblock" , err: r); |
3312 | } else { |
3313 | replay_journal(ic); |
3314 | if (ic->reset_recalculate_flag) { |
3315 | ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); |
3316 | ic->sb->recalc_sector = cpu_to_le64(0); |
3317 | } |
3318 | if (ic->mode == 'B') { |
3319 | ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); |
3320 | ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; |
3321 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE | REQ_FUA); |
3322 | if (unlikely(r)) |
3323 | dm_integrity_io_error(ic, msg: "writing superblock" , err: r); |
3324 | |
3325 | block_bitmap_op(ic, bitmap: ic->journal, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_CLEAR); |
3326 | block_bitmap_op(ic, bitmap: ic->recalc_bitmap, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_CLEAR); |
3327 | block_bitmap_op(ic, bitmap: ic->may_write_bitmap, sector: 0, n_sectors: ic->provided_data_sectors, BITMAP_OP_CLEAR); |
3328 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && |
3329 | le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { |
3330 | block_bitmap_op(ic, bitmap: ic->journal, le64_to_cpu(ic->sb->recalc_sector), |
3331 | n_sectors: ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); |
3332 | block_bitmap_op(ic, bitmap: ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), |
3333 | n_sectors: ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); |
3334 | block_bitmap_op(ic, bitmap: ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), |
3335 | n_sectors: ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); |
3336 | } |
3337 | rw_journal_sectors(ic, opf: REQ_OP_WRITE | REQ_FUA | REQ_SYNC, sector: 0, |
3338 | n_sectors: ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); |
3339 | } |
3340 | } |
3341 | |
3342 | DEBUG_print("testing recalc: %x\n" , ic->sb->flags); |
3343 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { |
3344 | __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); |
3345 | |
3346 | DEBUG_print("recalc pos: %llx / %llx\n" , recalc_pos, ic->provided_data_sectors); |
3347 | if (recalc_pos < ic->provided_data_sectors) { |
3348 | queue_work(wq: ic->recalc_wq, work: &ic->recalc_work); |
3349 | } else if (recalc_pos > ic->provided_data_sectors) { |
3350 | ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); |
3351 | recalc_write_super(ic); |
3352 | } |
3353 | } |
3354 | |
3355 | ic->reboot_notifier.notifier_call = dm_integrity_reboot; |
3356 | ic->reboot_notifier.next = NULL; |
3357 | ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ |
3358 | WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); |
3359 | |
3360 | #if 0 |
3361 | /* set to 1 to stress test synchronous mode */ |
3362 | dm_integrity_enter_synchronous_mode(ic); |
3363 | #endif |
3364 | } |
3365 | |
3366 | static void dm_integrity_status(struct dm_target *ti, status_type_t type, |
3367 | unsigned int status_flags, char *result, unsigned int maxlen) |
3368 | { |
3369 | struct dm_integrity_c *ic = ti->private; |
3370 | unsigned int arg_count; |
3371 | size_t sz = 0; |
3372 | |
3373 | switch (type) { |
3374 | case STATUSTYPE_INFO: |
3375 | DMEMIT("%llu %llu" , |
3376 | (unsigned long long)atomic64_read(&ic->number_of_mismatches), |
3377 | ic->provided_data_sectors); |
3378 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) |
3379 | DMEMIT(" %llu" , le64_to_cpu(ic->sb->recalc_sector)); |
3380 | else |
3381 | DMEMIT(" -" ); |
3382 | break; |
3383 | |
3384 | case STATUSTYPE_TABLE: { |
3385 | __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; |
3386 | |
3387 | watermark_percentage += ic->journal_entries / 2; |
3388 | do_div(watermark_percentage, ic->journal_entries); |
3389 | arg_count = 3; |
3390 | arg_count += !!ic->meta_dev; |
3391 | arg_count += ic->sectors_per_block != 1; |
3392 | arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); |
3393 | arg_count += ic->reset_recalculate_flag; |
3394 | arg_count += ic->discard; |
3395 | arg_count += ic->mode == 'J'; |
3396 | arg_count += ic->mode == 'J'; |
3397 | arg_count += ic->mode == 'B'; |
3398 | arg_count += ic->mode == 'B'; |
3399 | arg_count += !!ic->internal_hash_alg.alg_string; |
3400 | arg_count += !!ic->journal_crypt_alg.alg_string; |
3401 | arg_count += !!ic->journal_mac_alg.alg_string; |
3402 | arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; |
3403 | arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0; |
3404 | arg_count += ic->legacy_recalculate; |
3405 | DMEMIT("%s %llu %u %c %u" , ic->dev->name, ic->start, |
3406 | ic->tag_size, ic->mode, arg_count); |
3407 | if (ic->meta_dev) |
3408 | DMEMIT(" meta_device:%s" , ic->meta_dev->name); |
3409 | if (ic->sectors_per_block != 1) |
3410 | DMEMIT(" block_size:%u" , ic->sectors_per_block << SECTOR_SHIFT); |
3411 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) |
3412 | DMEMIT(" recalculate" ); |
3413 | if (ic->reset_recalculate_flag) |
3414 | DMEMIT(" reset_recalculate" ); |
3415 | if (ic->discard) |
3416 | DMEMIT(" allow_discards" ); |
3417 | DMEMIT(" journal_sectors:%u" , ic->initial_sectors - SB_SECTORS); |
3418 | DMEMIT(" interleave_sectors:%u" , 1U << ic->sb->log2_interleave_sectors); |
3419 | DMEMIT(" buffer_sectors:%u" , 1U << ic->log2_buffer_sectors); |
3420 | if (ic->mode == 'J') { |
3421 | DMEMIT(" journal_watermark:%u" , (unsigned int)watermark_percentage); |
3422 | DMEMIT(" commit_time:%u" , ic->autocommit_msec); |
3423 | } |
3424 | if (ic->mode == 'B') { |
3425 | DMEMIT(" sectors_per_bit:%llu" , (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); |
3426 | DMEMIT(" bitmap_flush_interval:%u" , jiffies_to_msecs(ic->bitmap_flush_interval)); |
3427 | } |
3428 | if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) |
3429 | DMEMIT(" fix_padding" ); |
3430 | if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) |
3431 | DMEMIT(" fix_hmac" ); |
3432 | if (ic->legacy_recalculate) |
3433 | DMEMIT(" legacy_recalculate" ); |
3434 | |
3435 | #define EMIT_ALG(a, n) \ |
3436 | do { \ |
3437 | if (ic->a.alg_string) { \ |
3438 | DMEMIT(" %s:%s", n, ic->a.alg_string); \ |
3439 | if (ic->a.key_string) \ |
3440 | DMEMIT(":%s", ic->a.key_string);\ |
3441 | } \ |
3442 | } while (0) |
3443 | EMIT_ALG(internal_hash_alg, "internal_hash" ); |
3444 | EMIT_ALG(journal_crypt_alg, "journal_crypt" ); |
3445 | EMIT_ALG(journal_mac_alg, "journal_mac" ); |
3446 | break; |
3447 | } |
3448 | case STATUSTYPE_IMA: |
3449 | DMEMIT_TARGET_NAME_VERSION(ti->type); |
3450 | DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c" , |
3451 | ic->dev->name, ic->start, ic->tag_size, ic->mode); |
3452 | |
3453 | if (ic->meta_dev) |
3454 | DMEMIT(",meta_device=%s" , ic->meta_dev->name); |
3455 | if (ic->sectors_per_block != 1) |
3456 | DMEMIT(",block_size=%u" , ic->sectors_per_block << SECTOR_SHIFT); |
3457 | |
3458 | DMEMIT(",recalculate=%c" , (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ? |
3459 | 'y' : 'n'); |
3460 | DMEMIT(",allow_discards=%c" , ic->discard ? 'y' : 'n'); |
3461 | DMEMIT(",fix_padding=%c" , |
3462 | ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n'); |
3463 | DMEMIT(",fix_hmac=%c" , |
3464 | ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n'); |
3465 | DMEMIT(",legacy_recalculate=%c" , ic->legacy_recalculate ? 'y' : 'n'); |
3466 | |
3467 | DMEMIT(",journal_sectors=%u" , ic->initial_sectors - SB_SECTORS); |
3468 | DMEMIT(",interleave_sectors=%u" , 1U << ic->sb->log2_interleave_sectors); |
3469 | DMEMIT(",buffer_sectors=%u" , 1U << ic->log2_buffer_sectors); |
3470 | DMEMIT(";" ); |
3471 | break; |
3472 | } |
3473 | } |
3474 | |
3475 | static int dm_integrity_iterate_devices(struct dm_target *ti, |
3476 | iterate_devices_callout_fn fn, void *data) |
3477 | { |
3478 | struct dm_integrity_c *ic = ti->private; |
3479 | |
3480 | if (!ic->meta_dev) |
3481 | return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); |
3482 | else |
3483 | return fn(ti, ic->dev, 0, ti->len, data); |
3484 | } |
3485 | |
3486 | static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits) |
3487 | { |
3488 | struct dm_integrity_c *ic = ti->private; |
3489 | |
3490 | if (ic->sectors_per_block > 1) { |
3491 | limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; |
3492 | limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; |
3493 | blk_limits_io_min(limits, min: ic->sectors_per_block << SECTOR_SHIFT); |
3494 | limits->dma_alignment = limits->logical_block_size - 1; |
3495 | } |
3496 | limits->max_integrity_segments = USHRT_MAX; |
3497 | } |
3498 | |
3499 | static void calculate_journal_section_size(struct dm_integrity_c *ic) |
3500 | { |
3501 | unsigned int sector_space = JOURNAL_SECTOR_DATA; |
3502 | |
3503 | ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); |
3504 | ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, |
3505 | JOURNAL_ENTRY_ROUNDUP); |
3506 | |
3507 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) |
3508 | sector_space -= JOURNAL_MAC_PER_SECTOR; |
3509 | ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; |
3510 | ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; |
3511 | ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; |
3512 | ic->journal_entries = ic->journal_section_entries * ic->journal_sections; |
3513 | } |
3514 | |
3515 | static int calculate_device_limits(struct dm_integrity_c *ic) |
3516 | { |
3517 | __u64 initial_sectors; |
3518 | |
3519 | calculate_journal_section_size(ic); |
3520 | initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; |
3521 | if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX) |
3522 | return -EINVAL; |
3523 | ic->initial_sectors = initial_sectors; |
3524 | |
3525 | if (!ic->meta_dev) { |
3526 | sector_t last_sector, last_area, last_offset; |
3527 | |
3528 | /* we have to maintain excessive padding for compatibility with existing volumes */ |
3529 | __u64 metadata_run_padding = |
3530 | ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ? |
3531 | (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) : |
3532 | (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS); |
3533 | |
3534 | ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), |
3535 | metadata_run_padding) >> SECTOR_SHIFT; |
3536 | if (!(ic->metadata_run & (ic->metadata_run - 1))) |
3537 | ic->log2_metadata_run = __ffs(ic->metadata_run); |
3538 | else |
3539 | ic->log2_metadata_run = -1; |
3540 | |
3541 | get_area_and_offset(ic, data_sector: ic->provided_data_sectors - 1, area: &last_area, offset: &last_offset); |
3542 | last_sector = get_data_sector(ic, area: last_area, offset: last_offset); |
3543 | if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) |
3544 | return -EINVAL; |
3545 | } else { |
3546 | __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; |
3547 | |
3548 | meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) |
3549 | >> (ic->log2_buffer_sectors + SECTOR_SHIFT); |
3550 | meta_size <<= ic->log2_buffer_sectors; |
3551 | if (ic->initial_sectors + meta_size < ic->initial_sectors || |
3552 | ic->initial_sectors + meta_size > ic->meta_device_sectors) |
3553 | return -EINVAL; |
3554 | ic->metadata_run = 1; |
3555 | ic->log2_metadata_run = 0; |
3556 | } |
3557 | |
3558 | return 0; |
3559 | } |
3560 | |
3561 | static void get_provided_data_sectors(struct dm_integrity_c *ic) |
3562 | { |
3563 | if (!ic->meta_dev) { |
3564 | int test_bit; |
3565 | |
3566 | ic->provided_data_sectors = 0; |
3567 | for (test_bit = fls64(x: ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { |
3568 | __u64 prev_data_sectors = ic->provided_data_sectors; |
3569 | |
3570 | ic->provided_data_sectors |= (sector_t)1 << test_bit; |
3571 | if (calculate_device_limits(ic)) |
3572 | ic->provided_data_sectors = prev_data_sectors; |
3573 | } |
3574 | } else { |
3575 | ic->provided_data_sectors = ic->data_device_sectors; |
3576 | ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); |
3577 | } |
3578 | } |
3579 | |
3580 | static int initialize_superblock(struct dm_integrity_c *ic, |
3581 | unsigned int journal_sectors, unsigned int interleave_sectors) |
3582 | { |
3583 | unsigned int journal_sections; |
3584 | int test_bit; |
3585 | |
3586 | memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); |
3587 | memcpy(ic->sb->magic, SB_MAGIC, 8); |
3588 | ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); |
3589 | ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); |
3590 | if (ic->journal_mac_alg.alg_string) |
3591 | ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); |
3592 | |
3593 | calculate_journal_section_size(ic); |
3594 | journal_sections = journal_sectors / ic->journal_section_sectors; |
3595 | if (!journal_sections) |
3596 | journal_sections = 1; |
3597 | |
3598 | if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) { |
3599 | ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC); |
3600 | get_random_bytes(buf: ic->sb->salt, SALT_SIZE); |
3601 | } |
3602 | |
3603 | if (!ic->meta_dev) { |
3604 | if (ic->fix_padding) |
3605 | ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); |
3606 | ic->sb->journal_sections = cpu_to_le32(journal_sections); |
3607 | if (!interleave_sectors) |
3608 | interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; |
3609 | ic->sb->log2_interleave_sectors = __fls(word: interleave_sectors); |
3610 | ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); |
3611 | ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); |
3612 | |
3613 | get_provided_data_sectors(ic); |
3614 | if (!ic->provided_data_sectors) |
3615 | return -EINVAL; |
3616 | } else { |
3617 | ic->sb->log2_interleave_sectors = 0; |
3618 | |
3619 | get_provided_data_sectors(ic); |
3620 | if (!ic->provided_data_sectors) |
3621 | return -EINVAL; |
3622 | |
3623 | try_smaller_buffer: |
3624 | ic->sb->journal_sections = cpu_to_le32(0); |
3625 | for (test_bit = fls(x: journal_sections) - 1; test_bit >= 0; test_bit--) { |
3626 | __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); |
3627 | __u32 test_journal_sections = prev_journal_sections | (1U << test_bit); |
3628 | |
3629 | if (test_journal_sections > journal_sections) |
3630 | continue; |
3631 | ic->sb->journal_sections = cpu_to_le32(test_journal_sections); |
3632 | if (calculate_device_limits(ic)) |
3633 | ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); |
3634 | |
3635 | } |
3636 | if (!le32_to_cpu(ic->sb->journal_sections)) { |
3637 | if (ic->log2_buffer_sectors > 3) { |
3638 | ic->log2_buffer_sectors--; |
3639 | goto try_smaller_buffer; |
3640 | } |
3641 | return -EINVAL; |
3642 | } |
3643 | } |
3644 | |
3645 | ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); |
3646 | |
3647 | sb_set_version(ic); |
3648 | |
3649 | return 0; |
3650 | } |
3651 | |
3652 | static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) |
3653 | { |
3654 | struct gendisk *disk = dm_disk(md: dm_table_get_md(t: ti->table)); |
3655 | struct blk_integrity bi; |
3656 | |
3657 | memset(&bi, 0, sizeof(bi)); |
3658 | bi.profile = &dm_integrity_profile; |
3659 | bi.tuple_size = ic->tag_size; |
3660 | bi.tag_size = bi.tuple_size; |
3661 | bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; |
3662 | |
3663 | blk_integrity_register(disk, &bi); |
3664 | } |
3665 | |
3666 | static void dm_integrity_free_page_list(struct page_list *pl) |
3667 | { |
3668 | unsigned int i; |
3669 | |
3670 | if (!pl) |
3671 | return; |
3672 | for (i = 0; pl[i].page; i++) |
3673 | __free_page(pl[i].page); |
3674 | kvfree(addr: pl); |
3675 | } |
3676 | |
3677 | static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages) |
3678 | { |
3679 | struct page_list *pl; |
3680 | unsigned int i; |
3681 | |
3682 | pl = kvmalloc_array(n: n_pages + 1, size: sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO); |
3683 | if (!pl) |
3684 | return NULL; |
3685 | |
3686 | for (i = 0; i < n_pages; i++) { |
3687 | pl[i].page = alloc_page(GFP_KERNEL); |
3688 | if (!pl[i].page) { |
3689 | dm_integrity_free_page_list(pl); |
3690 | return NULL; |
3691 | } |
3692 | if (i) |
3693 | pl[i - 1].next = &pl[i]; |
3694 | } |
3695 | pl[i].page = NULL; |
3696 | pl[i].next = NULL; |
3697 | |
3698 | return pl; |
3699 | } |
3700 | |
3701 | static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) |
3702 | { |
3703 | unsigned int i; |
3704 | |
3705 | for (i = 0; i < ic->journal_sections; i++) |
3706 | kvfree(addr: sl[i]); |
3707 | kvfree(addr: sl); |
3708 | } |
3709 | |
3710 | static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, |
3711 | struct page_list *pl) |
3712 | { |
3713 | struct scatterlist **sl; |
3714 | unsigned int i; |
3715 | |
3716 | sl = kvmalloc_array(n: ic->journal_sections, |
3717 | size: sizeof(struct scatterlist *), |
3718 | GFP_KERNEL | __GFP_ZERO); |
3719 | if (!sl) |
3720 | return NULL; |
3721 | |
3722 | for (i = 0; i < ic->journal_sections; i++) { |
3723 | struct scatterlist *s; |
3724 | unsigned int start_index, start_offset; |
3725 | unsigned int end_index, end_offset; |
3726 | unsigned int n_pages; |
3727 | unsigned int idx; |
3728 | |
3729 | page_list_location(ic, section: i, offset: 0, pl_index: &start_index, pl_offset: &start_offset); |
3730 | page_list_location(ic, section: i, offset: ic->journal_section_sectors - 1, |
3731 | pl_index: &end_index, pl_offset: &end_offset); |
3732 | |
3733 | n_pages = (end_index - start_index + 1); |
3734 | |
3735 | s = kvmalloc_array(n: n_pages, size: sizeof(struct scatterlist), |
3736 | GFP_KERNEL); |
3737 | if (!s) { |
3738 | dm_integrity_free_journal_scatterlist(ic, sl); |
3739 | return NULL; |
3740 | } |
3741 | |
3742 | sg_init_table(s, n_pages); |
3743 | for (idx = start_index; idx <= end_index; idx++) { |
3744 | char *va = lowmem_page_address(page: pl[idx].page); |
3745 | unsigned int start = 0, end = PAGE_SIZE; |
3746 | |
3747 | if (idx == start_index) |
3748 | start = start_offset; |
3749 | if (idx == end_index) |
3750 | end = end_offset + (1 << SECTOR_SHIFT); |
3751 | sg_set_buf(sg: &s[idx - start_index], buf: va + start, buflen: end - start); |
3752 | } |
3753 | |
3754 | sl[i] = s; |
3755 | } |
3756 | |
3757 | return sl; |
3758 | } |
3759 | |
3760 | static void free_alg(struct alg_spec *a) |
3761 | { |
3762 | kfree_sensitive(objp: a->alg_string); |
3763 | kfree_sensitive(objp: a->key); |
3764 | memset(a, 0, sizeof(*a)); |
3765 | } |
3766 | |
3767 | static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval) |
3768 | { |
3769 | char *k; |
3770 | |
3771 | free_alg(a); |
3772 | |
3773 | a->alg_string = kstrdup(s: strchr(arg, ':') + 1, GFP_KERNEL); |
3774 | if (!a->alg_string) |
3775 | goto nomem; |
3776 | |
3777 | k = strchr(a->alg_string, ':'); |
3778 | if (k) { |
3779 | *k = 0; |
3780 | a->key_string = k + 1; |
3781 | if (strlen(a->key_string) & 1) |
3782 | goto inval; |
3783 | |
3784 | a->key_size = strlen(a->key_string) / 2; |
3785 | a->key = kmalloc(size: a->key_size, GFP_KERNEL); |
3786 | if (!a->key) |
3787 | goto nomem; |
3788 | if (hex2bin(dst: a->key, src: a->key_string, count: a->key_size)) |
3789 | goto inval; |
3790 | } |
3791 | |
3792 | return 0; |
3793 | inval: |
3794 | *error = error_inval; |
3795 | return -EINVAL; |
3796 | nomem: |
3797 | *error = "Out of memory for an argument" ; |
3798 | return -ENOMEM; |
3799 | } |
3800 | |
3801 | static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error, |
3802 | char *error_alg, char *error_key) |
3803 | { |
3804 | int r; |
3805 | |
3806 | if (a->alg_string) { |
3807 | *hash = crypto_alloc_shash(alg_name: a->alg_string, type: 0, CRYPTO_ALG_ALLOCATES_MEMORY); |
3808 | if (IS_ERR(ptr: *hash)) { |
3809 | *error = error_alg; |
3810 | r = PTR_ERR(ptr: *hash); |
3811 | *hash = NULL; |
3812 | return r; |
3813 | } |
3814 | |
3815 | if (a->key) { |
3816 | r = crypto_shash_setkey(tfm: *hash, key: a->key, keylen: a->key_size); |
3817 | if (r) { |
3818 | *error = error_key; |
3819 | return r; |
3820 | } |
3821 | } else if (crypto_shash_get_flags(tfm: *hash) & CRYPTO_TFM_NEED_KEY) { |
3822 | *error = error_key; |
3823 | return -ENOKEY; |
3824 | } |
3825 | } |
3826 | |
3827 | return 0; |
3828 | } |
3829 | |
3830 | static int create_journal(struct dm_integrity_c *ic, char **error) |
3831 | { |
3832 | int r = 0; |
3833 | unsigned int i; |
3834 | __u64 journal_pages, journal_desc_size, journal_tree_size; |
3835 | unsigned char *crypt_data = NULL, *crypt_iv = NULL; |
3836 | struct skcipher_request *req = NULL; |
3837 | |
3838 | ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); |
3839 | ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); |
3840 | ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); |
3841 | ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); |
3842 | |
3843 | journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, |
3844 | PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT); |
3845 | journal_desc_size = journal_pages * sizeof(struct page_list); |
3846 | if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) { |
3847 | *error = "Journal doesn't fit into memory" ; |
3848 | r = -ENOMEM; |
3849 | goto bad; |
3850 | } |
3851 | ic->journal_pages = journal_pages; |
3852 | |
3853 | ic->journal = dm_integrity_alloc_page_list(n_pages: ic->journal_pages); |
3854 | if (!ic->journal) { |
3855 | *error = "Could not allocate memory for journal" ; |
3856 | r = -ENOMEM; |
3857 | goto bad; |
3858 | } |
3859 | if (ic->journal_crypt_alg.alg_string) { |
3860 | unsigned int ivsize, blocksize; |
3861 | struct journal_completion comp; |
3862 | |
3863 | comp.ic = ic; |
3864 | ic->journal_crypt = crypto_alloc_skcipher(alg_name: ic->journal_crypt_alg.alg_string, type: 0, CRYPTO_ALG_ALLOCATES_MEMORY); |
3865 | if (IS_ERR(ptr: ic->journal_crypt)) { |
3866 | *error = "Invalid journal cipher" ; |
3867 | r = PTR_ERR(ptr: ic->journal_crypt); |
3868 | ic->journal_crypt = NULL; |
3869 | goto bad; |
3870 | } |
3871 | ivsize = crypto_skcipher_ivsize(tfm: ic->journal_crypt); |
3872 | blocksize = crypto_skcipher_blocksize(tfm: ic->journal_crypt); |
3873 | |
3874 | if (ic->journal_crypt_alg.key) { |
3875 | r = crypto_skcipher_setkey(tfm: ic->journal_crypt, key: ic->journal_crypt_alg.key, |
3876 | keylen: ic->journal_crypt_alg.key_size); |
3877 | if (r) { |
3878 | *error = "Error setting encryption key" ; |
3879 | goto bad; |
3880 | } |
3881 | } |
3882 | DEBUG_print("cipher %s, block size %u iv size %u\n" , |
3883 | ic->journal_crypt_alg.alg_string, blocksize, ivsize); |
3884 | |
3885 | ic->journal_io = dm_integrity_alloc_page_list(n_pages: ic->journal_pages); |
3886 | if (!ic->journal_io) { |
3887 | *error = "Could not allocate memory for journal io" ; |
3888 | r = -ENOMEM; |
3889 | goto bad; |
3890 | } |
3891 | |
3892 | if (blocksize == 1) { |
3893 | struct scatterlist *sg; |
3894 | |
3895 | req = skcipher_request_alloc(tfm: ic->journal_crypt, GFP_KERNEL); |
3896 | if (!req) { |
3897 | *error = "Could not allocate crypt request" ; |
3898 | r = -ENOMEM; |
3899 | goto bad; |
3900 | } |
3901 | |
3902 | crypt_iv = kzalloc(size: ivsize, GFP_KERNEL); |
3903 | if (!crypt_iv) { |
3904 | *error = "Could not allocate iv" ; |
3905 | r = -ENOMEM; |
3906 | goto bad; |
3907 | } |
3908 | |
3909 | ic->journal_xor = dm_integrity_alloc_page_list(n_pages: ic->journal_pages); |
3910 | if (!ic->journal_xor) { |
3911 | *error = "Could not allocate memory for journal xor" ; |
3912 | r = -ENOMEM; |
3913 | goto bad; |
3914 | } |
3915 | |
3916 | sg = kvmalloc_array(n: ic->journal_pages + 1, |
3917 | size: sizeof(struct scatterlist), |
3918 | GFP_KERNEL); |
3919 | if (!sg) { |
3920 | *error = "Unable to allocate sg list" ; |
3921 | r = -ENOMEM; |
3922 | goto bad; |
3923 | } |
3924 | sg_init_table(sg, ic->journal_pages + 1); |
3925 | for (i = 0; i < ic->journal_pages; i++) { |
3926 | char *va = lowmem_page_address(page: ic->journal_xor[i].page); |
3927 | |
3928 | clear_page(page: va); |
3929 | sg_set_buf(sg: &sg[i], buf: va, PAGE_SIZE); |
3930 | } |
3931 | sg_set_buf(sg: &sg[i], buf: &ic->commit_ids, buflen: sizeof(ic->commit_ids)); |
3932 | |
3933 | skcipher_request_set_crypt(req, src: sg, dst: sg, |
3934 | PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), iv: crypt_iv); |
3935 | init_completion(x: &comp.comp); |
3936 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
3937 | if (do_crypt(encrypt: true, req, comp: &comp)) |
3938 | wait_for_completion(&comp.comp); |
3939 | kvfree(addr: sg); |
3940 | r = dm_integrity_failed(ic); |
3941 | if (r) { |
3942 | *error = "Unable to encrypt journal" ; |
3943 | goto bad; |
3944 | } |
3945 | DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data" ); |
3946 | |
3947 | crypto_free_skcipher(tfm: ic->journal_crypt); |
3948 | ic->journal_crypt = NULL; |
3949 | } else { |
3950 | unsigned int crypt_len = roundup(ivsize, blocksize); |
3951 | |
3952 | req = skcipher_request_alloc(tfm: ic->journal_crypt, GFP_KERNEL); |
3953 | if (!req) { |
3954 | *error = "Could not allocate crypt request" ; |
3955 | r = -ENOMEM; |
3956 | goto bad; |
3957 | } |
3958 | |
3959 | crypt_iv = kmalloc(size: ivsize, GFP_KERNEL); |
3960 | if (!crypt_iv) { |
3961 | *error = "Could not allocate iv" ; |
3962 | r = -ENOMEM; |
3963 | goto bad; |
3964 | } |
3965 | |
3966 | crypt_data = kmalloc(size: crypt_len, GFP_KERNEL); |
3967 | if (!crypt_data) { |
3968 | *error = "Unable to allocate crypt data" ; |
3969 | r = -ENOMEM; |
3970 | goto bad; |
3971 | } |
3972 | |
3973 | ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, pl: ic->journal); |
3974 | if (!ic->journal_scatterlist) { |
3975 | *error = "Unable to allocate sg list" ; |
3976 | r = -ENOMEM; |
3977 | goto bad; |
3978 | } |
3979 | ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, pl: ic->journal_io); |
3980 | if (!ic->journal_io_scatterlist) { |
3981 | *error = "Unable to allocate sg list" ; |
3982 | r = -ENOMEM; |
3983 | goto bad; |
3984 | } |
3985 | ic->sk_requests = kvmalloc_array(n: ic->journal_sections, |
3986 | size: sizeof(struct skcipher_request *), |
3987 | GFP_KERNEL | __GFP_ZERO); |
3988 | if (!ic->sk_requests) { |
3989 | *error = "Unable to allocate sk requests" ; |
3990 | r = -ENOMEM; |
3991 | goto bad; |
3992 | } |
3993 | for (i = 0; i < ic->journal_sections; i++) { |
3994 | struct scatterlist sg; |
3995 | struct skcipher_request *section_req; |
3996 | __le32 section_le = cpu_to_le32(i); |
3997 | |
3998 | memset(crypt_iv, 0x00, ivsize); |
3999 | memset(crypt_data, 0x00, crypt_len); |
4000 | memcpy(crypt_data, §ion_le, min_t(size_t, crypt_len, sizeof(section_le))); |
4001 | |
4002 | sg_init_one(&sg, crypt_data, crypt_len); |
4003 | skcipher_request_set_crypt(req, src: &sg, dst: &sg, cryptlen: crypt_len, iv: crypt_iv); |
4004 | init_completion(x: &comp.comp); |
4005 | comp.in_flight = (atomic_t)ATOMIC_INIT(1); |
4006 | if (do_crypt(encrypt: true, req, comp: &comp)) |
4007 | wait_for_completion(&comp.comp); |
4008 | |
4009 | r = dm_integrity_failed(ic); |
4010 | if (r) { |
4011 | *error = "Unable to generate iv" ; |
4012 | goto bad; |
4013 | } |
4014 | |
4015 | section_req = skcipher_request_alloc(tfm: ic->journal_crypt, GFP_KERNEL); |
4016 | if (!section_req) { |
4017 | *error = "Unable to allocate crypt request" ; |
4018 | r = -ENOMEM; |
4019 | goto bad; |
4020 | } |
4021 | section_req->iv = kmalloc_array(n: ivsize, size: 2, |
4022 | GFP_KERNEL); |
4023 | if (!section_req->iv) { |
4024 | skcipher_request_free(req: section_req); |
4025 | *error = "Unable to allocate iv" ; |
4026 | r = -ENOMEM; |
4027 | goto bad; |
4028 | } |
4029 | memcpy(section_req->iv + ivsize, crypt_data, ivsize); |
4030 | section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; |
4031 | ic->sk_requests[i] = section_req; |
4032 | DEBUG_bytes(crypt_data, ivsize, "iv(%u)" , i); |
4033 | } |
4034 | } |
4035 | } |
4036 | |
4037 | for (i = 0; i < N_COMMIT_IDS; i++) { |
4038 | unsigned int j; |
4039 | |
4040 | retest_commit_id: |
4041 | for (j = 0; j < i; j++) { |
4042 | if (ic->commit_ids[j] == ic->commit_ids[i]) { |
4043 | ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); |
4044 | goto retest_commit_id; |
4045 | } |
4046 | } |
4047 | DEBUG_print("commit id %u: %016llx\n" , i, ic->commit_ids[i]); |
4048 | } |
4049 | |
4050 | journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); |
4051 | if (journal_tree_size > ULONG_MAX) { |
4052 | *error = "Journal doesn't fit into memory" ; |
4053 | r = -ENOMEM; |
4054 | goto bad; |
4055 | } |
4056 | ic->journal_tree = kvmalloc(size: journal_tree_size, GFP_KERNEL); |
4057 | if (!ic->journal_tree) { |
4058 | *error = "Could not allocate memory for journal tree" ; |
4059 | r = -ENOMEM; |
4060 | } |
4061 | bad: |
4062 | kfree(objp: crypt_data); |
4063 | kfree(objp: crypt_iv); |
4064 | skcipher_request_free(req); |
4065 | |
4066 | return r; |
4067 | } |
4068 | |
4069 | /* |
4070 | * Construct a integrity mapping |
4071 | * |
4072 | * Arguments: |
4073 | * device |
4074 | * offset from the start of the device |
4075 | * tag size |
4076 | * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode |
4077 | * number of optional arguments |
4078 | * optional arguments: |
4079 | * journal_sectors |
4080 | * interleave_sectors |
4081 | * buffer_sectors |
4082 | * journal_watermark |
4083 | * commit_time |
4084 | * meta_device |
4085 | * block_size |
4086 | * sectors_per_bit |
4087 | * bitmap_flush_interval |
4088 | * internal_hash |
4089 | * journal_crypt |
4090 | * journal_mac |
4091 | * recalculate |
4092 | */ |
4093 | static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
4094 | { |
4095 | struct dm_integrity_c *ic; |
4096 | char dummy; |
4097 | int r; |
4098 | unsigned int ; |
4099 | struct dm_arg_set as; |
4100 | static const struct dm_arg _args[] = { |
4101 | {0, 18, "Invalid number of feature args" }, |
4102 | }; |
4103 | unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; |
4104 | bool should_write_sb; |
4105 | __u64 threshold; |
4106 | unsigned long long start; |
4107 | __s8 log2_sectors_per_bitmap_bit = -1; |
4108 | __s8 log2_blocks_per_bitmap_bit; |
4109 | __u64 bits_in_journal; |
4110 | __u64 n_bitmap_bits; |
4111 | |
4112 | #define DIRECT_ARGUMENTS 4 |
4113 | |
4114 | if (argc <= DIRECT_ARGUMENTS) { |
4115 | ti->error = "Invalid argument count" ; |
4116 | return -EINVAL; |
4117 | } |
4118 | |
4119 | ic = kzalloc(size: sizeof(struct dm_integrity_c), GFP_KERNEL); |
4120 | if (!ic) { |
4121 | ti->error = "Cannot allocate integrity context" ; |
4122 | return -ENOMEM; |
4123 | } |
4124 | ti->private = ic; |
4125 | ti->per_io_data_size = sizeof(struct dm_integrity_io); |
4126 | ic->ti = ti; |
4127 | |
4128 | ic->in_progress = RB_ROOT; |
4129 | INIT_LIST_HEAD(list: &ic->wait_list); |
4130 | init_waitqueue_head(&ic->endio_wait); |
4131 | bio_list_init(bl: &ic->flush_bio_list); |
4132 | init_waitqueue_head(&ic->copy_to_journal_wait); |
4133 | init_completion(x: &ic->crypto_backoff); |
4134 | atomic64_set(v: &ic->number_of_mismatches, i: 0); |
4135 | ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; |
4136 | |
4137 | r = dm_get_device(ti, path: argv[0], mode: dm_table_get_mode(t: ti->table), result: &ic->dev); |
4138 | if (r) { |
4139 | ti->error = "Device lookup failed" ; |
4140 | goto bad; |
4141 | } |
4142 | |
4143 | if (sscanf(argv[1], "%llu%c" , &start, &dummy) != 1 || start != (sector_t)start) { |
4144 | ti->error = "Invalid starting offset" ; |
4145 | r = -EINVAL; |
4146 | goto bad; |
4147 | } |
4148 | ic->start = start; |
4149 | |
4150 | if (strcmp(argv[2], "-" )) { |
4151 | if (sscanf(argv[2], "%u%c" , &ic->tag_size, &dummy) != 1 || !ic->tag_size) { |
4152 | ti->error = "Invalid tag size" ; |
4153 | r = -EINVAL; |
4154 | goto bad; |
4155 | } |
4156 | } |
4157 | |
4158 | if (!strcmp(argv[3], "J" ) || !strcmp(argv[3], "B" ) || |
4159 | !strcmp(argv[3], "D" ) || !strcmp(argv[3], "R" )) { |
4160 | ic->mode = argv[3][0]; |
4161 | } else { |
4162 | ti->error = "Invalid mode (expecting J, B, D, R)" ; |
4163 | r = -EINVAL; |
4164 | goto bad; |
4165 | } |
4166 | |
4167 | journal_sectors = 0; |
4168 | interleave_sectors = DEFAULT_INTERLEAVE_SECTORS; |
4169 | buffer_sectors = DEFAULT_BUFFER_SECTORS; |
4170 | journal_watermark = DEFAULT_JOURNAL_WATERMARK; |
4171 | sync_msec = DEFAULT_SYNC_MSEC; |
4172 | ic->sectors_per_block = 1; |
4173 | |
4174 | as.argc = argc - DIRECT_ARGUMENTS; |
4175 | as.argv = argv + DIRECT_ARGUMENTS; |
4176 | r = dm_read_arg_group(arg: _args, arg_set: &as, num_args: &extra_args, error: &ti->error); |
4177 | if (r) |
4178 | goto bad; |
4179 | |
4180 | while (extra_args--) { |
4181 | const char *opt_string; |
4182 | unsigned int val; |
4183 | unsigned long long llval; |
4184 | |
4185 | opt_string = dm_shift_arg(as: &as); |
4186 | if (!opt_string) { |
4187 | r = -EINVAL; |
4188 | ti->error = "Not enough feature arguments" ; |
4189 | goto bad; |
4190 | } |
4191 | if (sscanf(opt_string, "journal_sectors:%u%c" , &val, &dummy) == 1) |
4192 | journal_sectors = val ? val : 1; |
4193 | else if (sscanf(opt_string, "interleave_sectors:%u%c" , &val, &dummy) == 1) |
4194 | interleave_sectors = val; |
4195 | else if (sscanf(opt_string, "buffer_sectors:%u%c" , &val, &dummy) == 1) |
4196 | buffer_sectors = val; |
4197 | else if (sscanf(opt_string, "journal_watermark:%u%c" , &val, &dummy) == 1 && val <= 100) |
4198 | journal_watermark = val; |
4199 | else if (sscanf(opt_string, "commit_time:%u%c" , &val, &dummy) == 1) |
4200 | sync_msec = val; |
4201 | else if (!strncmp(opt_string, "meta_device:" , strlen("meta_device:" ))) { |
4202 | if (ic->meta_dev) { |
4203 | dm_put_device(ti, d: ic->meta_dev); |
4204 | ic->meta_dev = NULL; |
4205 | } |
4206 | r = dm_get_device(ti, path: strchr(opt_string, ':') + 1, |
4207 | mode: dm_table_get_mode(t: ti->table), result: &ic->meta_dev); |
4208 | if (r) { |
4209 | ti->error = "Device lookup failed" ; |
4210 | goto bad; |
4211 | } |
4212 | } else if (sscanf(opt_string, "block_size:%u%c" , &val, &dummy) == 1) { |
4213 | if (val < 1 << SECTOR_SHIFT || |
4214 | val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT || |
4215 | (val & (val - 1))) { |
4216 | r = -EINVAL; |
4217 | ti->error = "Invalid block_size argument" ; |
4218 | goto bad; |
4219 | } |
4220 | ic->sectors_per_block = val >> SECTOR_SHIFT; |
4221 | } else if (sscanf(opt_string, "sectors_per_bit:%llu%c" , &llval, &dummy) == 1) { |
4222 | log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(n: llval); |
4223 | } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c" , &val, &dummy) == 1) { |
4224 | if ((uint64_t)val >= (uint64_t)UINT_MAX * 1000 / HZ) { |
4225 | r = -EINVAL; |
4226 | ti->error = "Invalid bitmap_flush_interval argument" ; |
4227 | goto bad; |
4228 | } |
4229 | ic->bitmap_flush_interval = msecs_to_jiffies(m: val); |
4230 | } else if (!strncmp(opt_string, "internal_hash:" , strlen("internal_hash:" ))) { |
4231 | r = get_alg_and_key(arg: opt_string, a: &ic->internal_hash_alg, error: &ti->error, |
4232 | error_inval: "Invalid internal_hash argument" ); |
4233 | if (r) |
4234 | goto bad; |
4235 | } else if (!strncmp(opt_string, "journal_crypt:" , strlen("journal_crypt:" ))) { |
4236 | r = get_alg_and_key(arg: opt_string, a: &ic->journal_crypt_alg, error: &ti->error, |
4237 | error_inval: "Invalid journal_crypt argument" ); |
4238 | if (r) |
4239 | goto bad; |
4240 | } else if (!strncmp(opt_string, "journal_mac:" , strlen("journal_mac:" ))) { |
4241 | r = get_alg_and_key(arg: opt_string, a: &ic->journal_mac_alg, error: &ti->error, |
4242 | error_inval: "Invalid journal_mac argument" ); |
4243 | if (r) |
4244 | goto bad; |
4245 | } else if (!strcmp(opt_string, "recalculate" )) { |
4246 | ic->recalculate_flag = true; |
4247 | } else if (!strcmp(opt_string, "reset_recalculate" )) { |
4248 | ic->recalculate_flag = true; |
4249 | ic->reset_recalculate_flag = true; |
4250 | } else if (!strcmp(opt_string, "allow_discards" )) { |
4251 | ic->discard = true; |
4252 | } else if (!strcmp(opt_string, "fix_padding" )) { |
4253 | ic->fix_padding = true; |
4254 | } else if (!strcmp(opt_string, "fix_hmac" )) { |
4255 | ic->fix_hmac = true; |
4256 | } else if (!strcmp(opt_string, "legacy_recalculate" )) { |
4257 | ic->legacy_recalculate = true; |
4258 | } else { |
4259 | r = -EINVAL; |
4260 | ti->error = "Invalid argument" ; |
4261 | goto bad; |
4262 | } |
4263 | } |
4264 | |
4265 | ic->data_device_sectors = bdev_nr_sectors(bdev: ic->dev->bdev); |
4266 | if (!ic->meta_dev) |
4267 | ic->meta_device_sectors = ic->data_device_sectors; |
4268 | else |
4269 | ic->meta_device_sectors = bdev_nr_sectors(bdev: ic->meta_dev->bdev); |
4270 | |
4271 | if (!journal_sectors) { |
4272 | journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS, |
4273 | ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); |
4274 | } |
4275 | |
4276 | if (!buffer_sectors) |
4277 | buffer_sectors = 1; |
4278 | ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); |
4279 | |
4280 | r = get_mac(hash: &ic->internal_hash, a: &ic->internal_hash_alg, error: &ti->error, |
4281 | error_alg: "Invalid internal hash" , error_key: "Error setting internal hash key" ); |
4282 | if (r) |
4283 | goto bad; |
4284 | |
4285 | r = get_mac(hash: &ic->journal_mac, a: &ic->journal_mac_alg, error: &ti->error, |
4286 | error_alg: "Invalid journal mac" , error_key: "Error setting journal mac key" ); |
4287 | if (r) |
4288 | goto bad; |
4289 | |
4290 | if (!ic->tag_size) { |
4291 | if (!ic->internal_hash) { |
4292 | ti->error = "Unknown tag size" ; |
4293 | r = -EINVAL; |
4294 | goto bad; |
4295 | } |
4296 | ic->tag_size = crypto_shash_digestsize(tfm: ic->internal_hash); |
4297 | } |
4298 | if (ic->tag_size > MAX_TAG_SIZE) { |
4299 | ti->error = "Too big tag size" ; |
4300 | r = -EINVAL; |
4301 | goto bad; |
4302 | } |
4303 | if (!(ic->tag_size & (ic->tag_size - 1))) |
4304 | ic->log2_tag_size = __ffs(ic->tag_size); |
4305 | else |
4306 | ic->log2_tag_size = -1; |
4307 | |
4308 | if (ic->mode == 'B' && !ic->internal_hash) { |
4309 | r = -EINVAL; |
4310 | ti->error = "Bitmap mode can be only used with internal hash" ; |
4311 | goto bad; |
4312 | } |
4313 | |
4314 | if (ic->discard && !ic->internal_hash) { |
4315 | r = -EINVAL; |
4316 | ti->error = "Discard can be only used with internal hash" ; |
4317 | goto bad; |
4318 | } |
4319 | |
4320 | ic->autocommit_jiffies = msecs_to_jiffies(m: sync_msec); |
4321 | ic->autocommit_msec = sync_msec; |
4322 | timer_setup(&ic->autocommit_timer, autocommit_fn, 0); |
4323 | |
4324 | ic->io = dm_io_client_create(); |
4325 | if (IS_ERR(ptr: ic->io)) { |
4326 | r = PTR_ERR(ptr: ic->io); |
4327 | ic->io = NULL; |
4328 | ti->error = "Cannot allocate dm io" ; |
4329 | goto bad; |
4330 | } |
4331 | |
4332 | r = mempool_init_slab_pool(pool: &ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, kc: journal_io_cache); |
4333 | if (r) { |
4334 | ti->error = "Cannot allocate mempool" ; |
4335 | goto bad; |
4336 | } |
4337 | |
4338 | r = mempool_init_page_pool(pool: &ic->recheck_pool, min_nr: 1, order: 0); |
4339 | if (r) { |
4340 | ti->error = "Cannot allocate mempool" ; |
4341 | goto bad; |
4342 | } |
4343 | |
4344 | ic->metadata_wq = alloc_workqueue(fmt: "dm-integrity-metadata" , |
4345 | flags: WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE); |
4346 | if (!ic->metadata_wq) { |
4347 | ti->error = "Cannot allocate workqueue" ; |
4348 | r = -ENOMEM; |
4349 | goto bad; |
4350 | } |
4351 | |
4352 | /* |
4353 | * If this workqueue weren't ordered, it would cause bio reordering |
4354 | * and reduced performance. |
4355 | */ |
4356 | ic->wait_wq = alloc_ordered_workqueue("dm-integrity-wait" , WQ_MEM_RECLAIM); |
4357 | if (!ic->wait_wq) { |
4358 | ti->error = "Cannot allocate workqueue" ; |
4359 | r = -ENOMEM; |
4360 | goto bad; |
4361 | } |
4362 | |
4363 | ic->offload_wq = alloc_workqueue(fmt: "dm-integrity-offload" , flags: WQ_MEM_RECLAIM, |
4364 | METADATA_WORKQUEUE_MAX_ACTIVE); |
4365 | if (!ic->offload_wq) { |
4366 | ti->error = "Cannot allocate workqueue" ; |
4367 | r = -ENOMEM; |
4368 | goto bad; |
4369 | } |
4370 | |
4371 | ic->commit_wq = alloc_workqueue(fmt: "dm-integrity-commit" , flags: WQ_MEM_RECLAIM, max_active: 1); |
4372 | if (!ic->commit_wq) { |
4373 | ti->error = "Cannot allocate workqueue" ; |
4374 | r = -ENOMEM; |
4375 | goto bad; |
4376 | } |
4377 | INIT_WORK(&ic->commit_work, integrity_commit); |
4378 | |
4379 | if (ic->mode == 'J' || ic->mode == 'B') { |
4380 | ic->writer_wq = alloc_workqueue(fmt: "dm-integrity-writer" , flags: WQ_MEM_RECLAIM, max_active: 1); |
4381 | if (!ic->writer_wq) { |
4382 | ti->error = "Cannot allocate workqueue" ; |
4383 | r = -ENOMEM; |
4384 | goto bad; |
4385 | } |
4386 | INIT_WORK(&ic->writer_work, integrity_writer); |
4387 | } |
4388 | |
4389 | ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); |
4390 | if (!ic->sb) { |
4391 | r = -ENOMEM; |
4392 | ti->error = "Cannot allocate superblock area" ; |
4393 | goto bad; |
4394 | } |
4395 | |
4396 | r = sync_rw_sb(ic, opf: REQ_OP_READ); |
4397 | if (r) { |
4398 | ti->error = "Error reading superblock" ; |
4399 | goto bad; |
4400 | } |
4401 | should_write_sb = false; |
4402 | if (memcmp(p: ic->sb->magic, SB_MAGIC, size: 8)) { |
4403 | if (ic->mode != 'R') { |
4404 | if (memchr_inv(p: ic->sb, c: 0, SB_SECTORS << SECTOR_SHIFT)) { |
4405 | r = -EINVAL; |
4406 | ti->error = "The device is not initialized" ; |
4407 | goto bad; |
4408 | } |
4409 | } |
4410 | |
4411 | r = initialize_superblock(ic, journal_sectors, interleave_sectors); |
4412 | if (r) { |
4413 | ti->error = "Could not initialize superblock" ; |
4414 | goto bad; |
4415 | } |
4416 | if (ic->mode != 'R') |
4417 | should_write_sb = true; |
4418 | } |
4419 | |
4420 | if (!ic->sb->version || ic->sb->version > SB_VERSION_5) { |
4421 | r = -EINVAL; |
4422 | ti->error = "Unknown version" ; |
4423 | goto bad; |
4424 | } |
4425 | if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { |
4426 | r = -EINVAL; |
4427 | ti->error = "Tag size doesn't match the information in superblock" ; |
4428 | goto bad; |
4429 | } |
4430 | if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { |
4431 | r = -EINVAL; |
4432 | ti->error = "Block size doesn't match the information in superblock" ; |
4433 | goto bad; |
4434 | } |
4435 | if (!le32_to_cpu(ic->sb->journal_sections)) { |
4436 | r = -EINVAL; |
4437 | ti->error = "Corrupted superblock, journal_sections is 0" ; |
4438 | goto bad; |
4439 | } |
4440 | /* make sure that ti->max_io_len doesn't overflow */ |
4441 | if (!ic->meta_dev) { |
4442 | if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || |
4443 | ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { |
4444 | r = -EINVAL; |
4445 | ti->error = "Invalid interleave_sectors in the superblock" ; |
4446 | goto bad; |
4447 | } |
4448 | } else { |
4449 | if (ic->sb->log2_interleave_sectors) { |
4450 | r = -EINVAL; |
4451 | ti->error = "Invalid interleave_sectors in the superblock" ; |
4452 | goto bad; |
4453 | } |
4454 | } |
4455 | if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { |
4456 | r = -EINVAL; |
4457 | ti->error = "Journal mac mismatch" ; |
4458 | goto bad; |
4459 | } |
4460 | |
4461 | get_provided_data_sectors(ic); |
4462 | if (!ic->provided_data_sectors) { |
4463 | r = -EINVAL; |
4464 | ti->error = "The device is too small" ; |
4465 | goto bad; |
4466 | } |
4467 | |
4468 | try_smaller_buffer: |
4469 | r = calculate_device_limits(ic); |
4470 | if (r) { |
4471 | if (ic->meta_dev) { |
4472 | if (ic->log2_buffer_sectors > 3) { |
4473 | ic->log2_buffer_sectors--; |
4474 | goto try_smaller_buffer; |
4475 | } |
4476 | } |
4477 | ti->error = "The device is too small" ; |
4478 | goto bad; |
4479 | } |
4480 | |
4481 | if (log2_sectors_per_bitmap_bit < 0) |
4482 | log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT); |
4483 | if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) |
4484 | log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; |
4485 | |
4486 | bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3); |
4487 | if (bits_in_journal > UINT_MAX) |
4488 | bits_in_journal = UINT_MAX; |
4489 | while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) |
4490 | log2_sectors_per_bitmap_bit++; |
4491 | |
4492 | log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; |
4493 | ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; |
4494 | if (should_write_sb) |
4495 | ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; |
4496 | |
4497 | n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) |
4498 | + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit; |
4499 | ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); |
4500 | |
4501 | if (!ic->meta_dev) |
4502 | ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); |
4503 | |
4504 | if (ti->len > ic->provided_data_sectors) { |
4505 | r = -EINVAL; |
4506 | ti->error = "Not enough provided sectors for requested mapping size" ; |
4507 | goto bad; |
4508 | } |
4509 | |
4510 | |
4511 | threshold = (__u64)ic->journal_entries * (100 - journal_watermark); |
4512 | threshold += 50; |
4513 | do_div(threshold, 100); |
4514 | ic->free_sectors_threshold = threshold; |
4515 | |
4516 | DEBUG_print("initialized:\n" ); |
4517 | DEBUG_print(" integrity_tag_size %u\n" , le16_to_cpu(ic->sb->integrity_tag_size)); |
4518 | DEBUG_print(" journal_entry_size %u\n" , ic->journal_entry_size); |
4519 | DEBUG_print(" journal_entries_per_sector %u\n" , ic->journal_entries_per_sector); |
4520 | DEBUG_print(" journal_section_entries %u\n" , ic->journal_section_entries); |
4521 | DEBUG_print(" journal_section_sectors %u\n" , ic->journal_section_sectors); |
4522 | DEBUG_print(" journal_sections %u\n" , (unsigned int)le32_to_cpu(ic->sb->journal_sections)); |
4523 | DEBUG_print(" journal_entries %u\n" , ic->journal_entries); |
4524 | DEBUG_print(" log2_interleave_sectors %d\n" , ic->sb->log2_interleave_sectors); |
4525 | DEBUG_print(" data_device_sectors 0x%llx\n" , bdev_nr_sectors(ic->dev->bdev)); |
4526 | DEBUG_print(" initial_sectors 0x%x\n" , ic->initial_sectors); |
4527 | DEBUG_print(" metadata_run 0x%x\n" , ic->metadata_run); |
4528 | DEBUG_print(" log2_metadata_run %d\n" , ic->log2_metadata_run); |
4529 | DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n" , ic->provided_data_sectors, ic->provided_data_sectors); |
4530 | DEBUG_print(" log2_buffer_sectors %u\n" , ic->log2_buffer_sectors); |
4531 | DEBUG_print(" bits_in_journal %llu\n" , bits_in_journal); |
4532 | |
4533 | if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { |
4534 | ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); |
4535 | ic->sb->recalc_sector = cpu_to_le64(0); |
4536 | } |
4537 | |
4538 | if (ic->internal_hash) { |
4539 | ic->recalc_wq = alloc_workqueue(fmt: "dm-integrity-recalc" , flags: WQ_MEM_RECLAIM, max_active: 1); |
4540 | if (!ic->recalc_wq) { |
4541 | ti->error = "Cannot allocate workqueue" ; |
4542 | r = -ENOMEM; |
4543 | goto bad; |
4544 | } |
4545 | INIT_WORK(&ic->recalc_work, integrity_recalc); |
4546 | } else { |
4547 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { |
4548 | ti->error = "Recalculate can only be specified with internal_hash" ; |
4549 | r = -EINVAL; |
4550 | goto bad; |
4551 | } |
4552 | } |
4553 | |
4554 | if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && |
4555 | le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && |
4556 | dm_integrity_disable_recalculate(ic)) { |
4557 | ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"" ; |
4558 | r = -EOPNOTSUPP; |
4559 | goto bad; |
4560 | } |
4561 | |
4562 | ic->bufio = dm_bufio_client_create(bdev: ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, |
4563 | block_size: 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), reserved_buffers: 1, aux_size: 0, NULL, NULL, flags: 0); |
4564 | if (IS_ERR(ptr: ic->bufio)) { |
4565 | r = PTR_ERR(ptr: ic->bufio); |
4566 | ti->error = "Cannot initialize dm-bufio" ; |
4567 | ic->bufio = NULL; |
4568 | goto bad; |
4569 | } |
4570 | dm_bufio_set_sector_offset(c: ic->bufio, start: ic->start + ic->initial_sectors); |
4571 | |
4572 | if (ic->mode != 'R') { |
4573 | r = create_journal(ic, error: &ti->error); |
4574 | if (r) |
4575 | goto bad; |
4576 | |
4577 | } |
4578 | |
4579 | if (ic->mode == 'B') { |
4580 | unsigned int i; |
4581 | unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); |
4582 | |
4583 | ic->recalc_bitmap = dm_integrity_alloc_page_list(n_pages: n_bitmap_pages); |
4584 | if (!ic->recalc_bitmap) { |
4585 | r = -ENOMEM; |
4586 | goto bad; |
4587 | } |
4588 | ic->may_write_bitmap = dm_integrity_alloc_page_list(n_pages: n_bitmap_pages); |
4589 | if (!ic->may_write_bitmap) { |
4590 | r = -ENOMEM; |
4591 | goto bad; |
4592 | } |
4593 | ic->bbs = kvmalloc_array(n: ic->n_bitmap_blocks, size: sizeof(struct bitmap_block_status), GFP_KERNEL); |
4594 | if (!ic->bbs) { |
4595 | r = -ENOMEM; |
4596 | goto bad; |
4597 | } |
4598 | INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); |
4599 | for (i = 0; i < ic->n_bitmap_blocks; i++) { |
4600 | struct bitmap_block_status *bbs = &ic->bbs[i]; |
4601 | unsigned int sector, pl_index, pl_offset; |
4602 | |
4603 | INIT_WORK(&bbs->work, bitmap_block_work); |
4604 | bbs->ic = ic; |
4605 | bbs->idx = i; |
4606 | bio_list_init(bl: &bbs->bio_queue); |
4607 | spin_lock_init(&bbs->bio_queue_lock); |
4608 | |
4609 | sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT); |
4610 | pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT); |
4611 | pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1); |
4612 | |
4613 | bbs->bitmap = lowmem_page_address(page: ic->journal[pl_index].page) + pl_offset; |
4614 | } |
4615 | } |
4616 | |
4617 | if (should_write_sb) { |
4618 | init_journal(ic, start_section: 0, n_sections: ic->journal_sections, commit_seq: 0); |
4619 | r = dm_integrity_failed(ic); |
4620 | if (unlikely(r)) { |
4621 | ti->error = "Error initializing journal" ; |
4622 | goto bad; |
4623 | } |
4624 | r = sync_rw_sb(ic, opf: REQ_OP_WRITE | REQ_FUA); |
4625 | if (r) { |
4626 | ti->error = "Error initializing superblock" ; |
4627 | goto bad; |
4628 | } |
4629 | ic->just_formatted = true; |
4630 | } |
4631 | |
4632 | if (!ic->meta_dev) { |
4633 | r = dm_set_target_max_io_len(ti, len: 1U << ic->sb->log2_interleave_sectors); |
4634 | if (r) |
4635 | goto bad; |
4636 | } |
4637 | if (ic->mode == 'B') { |
4638 | unsigned int max_io_len; |
4639 | |
4640 | max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); |
4641 | if (!max_io_len) |
4642 | max_io_len = 1U << 31; |
4643 | DEBUG_print("max_io_len: old %u, new %u\n" , ti->max_io_len, max_io_len); |
4644 | if (!ti->max_io_len || ti->max_io_len > max_io_len) { |
4645 | r = dm_set_target_max_io_len(ti, len: max_io_len); |
4646 | if (r) |
4647 | goto bad; |
4648 | } |
4649 | } |
4650 | |
4651 | if (!ic->internal_hash) |
4652 | dm_integrity_set(ti, ic); |
4653 | |
4654 | ti->num_flush_bios = 1; |
4655 | ti->flush_supported = true; |
4656 | if (ic->discard) |
4657 | ti->num_discard_bios = 1; |
4658 | |
4659 | dm_audit_log_ctr(DM_MSG_PREFIX, ti, result: 1); |
4660 | return 0; |
4661 | |
4662 | bad: |
4663 | dm_audit_log_ctr(DM_MSG_PREFIX, ti, result: 0); |
4664 | dm_integrity_dtr(ti); |
4665 | return r; |
4666 | } |
4667 | |
4668 | static void dm_integrity_dtr(struct dm_target *ti) |
4669 | { |
4670 | struct dm_integrity_c *ic = ti->private; |
4671 | |
4672 | BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); |
4673 | BUG_ON(!list_empty(&ic->wait_list)); |
4674 | |
4675 | if (ic->mode == 'B') |
4676 | cancel_delayed_work_sync(dwork: &ic->bitmap_flush_work); |
4677 | if (ic->metadata_wq) |
4678 | destroy_workqueue(wq: ic->metadata_wq); |
4679 | if (ic->wait_wq) |
4680 | destroy_workqueue(wq: ic->wait_wq); |
4681 | if (ic->offload_wq) |
4682 | destroy_workqueue(wq: ic->offload_wq); |
4683 | if (ic->commit_wq) |
4684 | destroy_workqueue(wq: ic->commit_wq); |
4685 | if (ic->writer_wq) |
4686 | destroy_workqueue(wq: ic->writer_wq); |
4687 | if (ic->recalc_wq) |
4688 | destroy_workqueue(wq: ic->recalc_wq); |
4689 | kvfree(addr: ic->bbs); |
4690 | if (ic->bufio) |
4691 | dm_bufio_client_destroy(c: ic->bufio); |
4692 | mempool_exit(pool: &ic->recheck_pool); |
4693 | mempool_exit(pool: &ic->journal_io_mempool); |
4694 | if (ic->io) |
4695 | dm_io_client_destroy(client: ic->io); |
4696 | if (ic->dev) |
4697 | dm_put_device(ti, d: ic->dev); |
4698 | if (ic->meta_dev) |
4699 | dm_put_device(ti, d: ic->meta_dev); |
4700 | dm_integrity_free_page_list(pl: ic->journal); |
4701 | dm_integrity_free_page_list(pl: ic->journal_io); |
4702 | dm_integrity_free_page_list(pl: ic->journal_xor); |
4703 | dm_integrity_free_page_list(pl: ic->recalc_bitmap); |
4704 | dm_integrity_free_page_list(pl: ic->may_write_bitmap); |
4705 | if (ic->journal_scatterlist) |
4706 | dm_integrity_free_journal_scatterlist(ic, sl: ic->journal_scatterlist); |
4707 | if (ic->journal_io_scatterlist) |
4708 | dm_integrity_free_journal_scatterlist(ic, sl: ic->journal_io_scatterlist); |
4709 | if (ic->sk_requests) { |
4710 | unsigned int i; |
4711 | |
4712 | for (i = 0; i < ic->journal_sections; i++) { |
4713 | struct skcipher_request *req; |
4714 | |
4715 | req = ic->sk_requests[i]; |
4716 | if (req) { |
4717 | kfree_sensitive(objp: req->iv); |
4718 | skcipher_request_free(req); |
4719 | } |
4720 | } |
4721 | kvfree(addr: ic->sk_requests); |
4722 | } |
4723 | kvfree(addr: ic->journal_tree); |
4724 | if (ic->sb) |
4725 | free_pages_exact(virt: ic->sb, SB_SECTORS << SECTOR_SHIFT); |
4726 | |
4727 | if (ic->internal_hash) |
4728 | crypto_free_shash(tfm: ic->internal_hash); |
4729 | free_alg(a: &ic->internal_hash_alg); |
4730 | |
4731 | if (ic->journal_crypt) |
4732 | crypto_free_skcipher(tfm: ic->journal_crypt); |
4733 | free_alg(a: &ic->journal_crypt_alg); |
4734 | |
4735 | if (ic->journal_mac) |
4736 | crypto_free_shash(tfm: ic->journal_mac); |
4737 | free_alg(a: &ic->journal_mac_alg); |
4738 | |
4739 | kfree(objp: ic); |
4740 | dm_audit_log_dtr(DM_MSG_PREFIX, ti, result: 1); |
4741 | } |
4742 | |
4743 | static struct target_type integrity_target = { |
4744 | .name = "integrity" , |
4745 | .version = {1, 11, 0}, |
4746 | .module = THIS_MODULE, |
4747 | .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, |
4748 | .ctr = dm_integrity_ctr, |
4749 | .dtr = dm_integrity_dtr, |
4750 | .map = dm_integrity_map, |
4751 | .postsuspend = dm_integrity_postsuspend, |
4752 | .resume = dm_integrity_resume, |
4753 | .status = dm_integrity_status, |
4754 | .iterate_devices = dm_integrity_iterate_devices, |
4755 | .io_hints = dm_integrity_io_hints, |
4756 | }; |
4757 | |
4758 | static int __init dm_integrity_init(void) |
4759 | { |
4760 | int r; |
4761 | |
4762 | journal_io_cache = kmem_cache_create(name: "integrity_journal_io" , |
4763 | size: sizeof(struct journal_io), align: 0, flags: 0, NULL); |
4764 | if (!journal_io_cache) { |
4765 | DMERR("can't allocate journal io cache" ); |
4766 | return -ENOMEM; |
4767 | } |
4768 | |
4769 | r = dm_register_target(t: &integrity_target); |
4770 | if (r < 0) { |
4771 | kmem_cache_destroy(s: journal_io_cache); |
4772 | return r; |
4773 | } |
4774 | |
4775 | return 0; |
4776 | } |
4777 | |
4778 | static void __exit dm_integrity_exit(void) |
4779 | { |
4780 | dm_unregister_target(t: &integrity_target); |
4781 | kmem_cache_destroy(s: journal_io_cache); |
4782 | } |
4783 | |
4784 | module_init(dm_integrity_init); |
4785 | module_exit(dm_integrity_exit); |
4786 | |
4787 | MODULE_AUTHOR("Milan Broz" ); |
4788 | MODULE_AUTHOR("Mikulas Patocka" ); |
4789 | MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension" ); |
4790 | MODULE_LICENSE("GPL" ); |
4791 | |