1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 Red Hat, Inc. |
4 | * |
5 | * This file is released under the GPL. |
6 | */ |
7 | |
8 | #include "dm-cache-metadata.h" |
9 | |
10 | #include "persistent-data/dm-array.h" |
11 | #include "persistent-data/dm-bitset.h" |
12 | #include "persistent-data/dm-space-map.h" |
13 | #include "persistent-data/dm-space-map-disk.h" |
14 | #include "persistent-data/dm-transaction-manager.h" |
15 | |
16 | #include <linux/device-mapper.h> |
17 | #include <linux/refcount.h> |
18 | |
19 | /*----------------------------------------------------------------*/ |
20 | |
21 | #define DM_MSG_PREFIX "cache metadata" |
22 | |
23 | #define CACHE_SUPERBLOCK_MAGIC 06142003 |
24 | #define CACHE_SUPERBLOCK_LOCATION 0 |
25 | |
26 | /* |
27 | * defines a range of metadata versions that this module can handle. |
28 | */ |
29 | #define MIN_CACHE_VERSION 1 |
30 | #define MAX_CACHE_VERSION 2 |
31 | |
32 | /* |
33 | * 3 for btree insert + |
34 | * 2 for btree lookup used within space map |
35 | */ |
36 | #define CACHE_MAX_CONCURRENT_LOCKS 5 |
37 | #define SPACE_MAP_ROOT_SIZE 128 |
38 | |
39 | enum superblock_flag_bits { |
40 | /* for spotting crashes that would invalidate the dirty bitset */ |
41 | CLEAN_SHUTDOWN, |
42 | /* metadata must be checked using the tools */ |
43 | NEEDS_CHECK, |
44 | }; |
45 | |
46 | /* |
47 | * Each mapping from cache block -> origin block carries a set of flags. |
48 | */ |
49 | enum mapping_bits { |
50 | /* |
51 | * A valid mapping. Because we're using an array we clear this |
52 | * flag for an non existant mapping. |
53 | */ |
54 | M_VALID = 1, |
55 | |
56 | /* |
57 | * The data on the cache is different from that on the origin. |
58 | * This flag is only used by metadata format 1. |
59 | */ |
60 | M_DIRTY = 2 |
61 | }; |
62 | |
63 | struct cache_disk_superblock { |
64 | __le32 csum; |
65 | __le32 flags; |
66 | __le64 blocknr; |
67 | |
68 | __u8 uuid[16]; |
69 | __le64 magic; |
70 | __le32 version; |
71 | |
72 | __u8 policy_name[CACHE_POLICY_NAME_SIZE]; |
73 | __le32 policy_hint_size; |
74 | |
75 | __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; |
76 | __le64 mapping_root; |
77 | __le64 hint_root; |
78 | |
79 | __le64 discard_root; |
80 | __le64 discard_block_size; |
81 | __le64 discard_nr_blocks; |
82 | |
83 | __le32 data_block_size; |
84 | __le32 metadata_block_size; |
85 | __le32 cache_blocks; |
86 | |
87 | __le32 compat_flags; |
88 | __le32 compat_ro_flags; |
89 | __le32 incompat_flags; |
90 | |
91 | __le32 read_hits; |
92 | __le32 read_misses; |
93 | __le32 write_hits; |
94 | __le32 write_misses; |
95 | |
96 | __le32 policy_version[CACHE_POLICY_VERSION_SIZE]; |
97 | |
98 | /* |
99 | * Metadata format 2 fields. |
100 | */ |
101 | __le64 dirty_root; |
102 | } __packed; |
103 | |
104 | struct dm_cache_metadata { |
105 | refcount_t ref_count; |
106 | struct list_head list; |
107 | |
108 | unsigned int version; |
109 | struct block_device *bdev; |
110 | struct dm_block_manager *bm; |
111 | struct dm_space_map *metadata_sm; |
112 | struct dm_transaction_manager *tm; |
113 | |
114 | struct dm_array_info info; |
115 | struct dm_array_info hint_info; |
116 | struct dm_disk_bitset discard_info; |
117 | |
118 | struct rw_semaphore root_lock; |
119 | unsigned long flags; |
120 | dm_block_t root; |
121 | dm_block_t hint_root; |
122 | dm_block_t discard_root; |
123 | |
124 | sector_t discard_block_size; |
125 | dm_dblock_t discard_nr_blocks; |
126 | |
127 | sector_t data_block_size; |
128 | dm_cblock_t cache_blocks; |
129 | bool changed:1; |
130 | bool clean_when_opened:1; |
131 | |
132 | char policy_name[CACHE_POLICY_NAME_SIZE]; |
133 | unsigned int policy_version[CACHE_POLICY_VERSION_SIZE]; |
134 | size_t policy_hint_size; |
135 | struct dm_cache_statistics stats; |
136 | |
137 | /* |
138 | * Reading the space map root can fail, so we read it into this |
139 | * buffer before the superblock is locked and updated. |
140 | */ |
141 | __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE]; |
142 | |
143 | /* |
144 | * Set if a transaction has to be aborted but the attempt to roll |
145 | * back to the previous (good) transaction failed. The only |
146 | * metadata operation permissible in this state is the closing of |
147 | * the device. |
148 | */ |
149 | bool fail_io:1; |
150 | |
151 | /* |
152 | * Metadata format 2 fields. |
153 | */ |
154 | dm_block_t dirty_root; |
155 | struct dm_disk_bitset dirty_info; |
156 | |
157 | /* |
158 | * These structures are used when loading metadata. They're too |
159 | * big to put on the stack. |
160 | */ |
161 | struct dm_array_cursor mapping_cursor; |
162 | struct dm_array_cursor hint_cursor; |
163 | struct dm_bitset_cursor dirty_cursor; |
164 | }; |
165 | |
166 | /* |
167 | *----------------------------------------------------------------- |
168 | * superblock validator |
169 | *----------------------------------------------------------------- |
170 | */ |
171 | #define SUPERBLOCK_CSUM_XOR 9031977 |
172 | |
173 | static void sb_prepare_for_write(struct dm_block_validator *v, |
174 | struct dm_block *b, |
175 | size_t sb_block_size) |
176 | { |
177 | struct cache_disk_superblock *disk_super = dm_block_data(b); |
178 | |
179 | disk_super->blocknr = cpu_to_le64(dm_block_location(b)); |
180 | disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags, |
181 | sb_block_size - sizeof(__le32), |
182 | SUPERBLOCK_CSUM_XOR)); |
183 | } |
184 | |
185 | static int check_metadata_version(struct cache_disk_superblock *disk_super) |
186 | { |
187 | uint32_t metadata_version = le32_to_cpu(disk_super->version); |
188 | |
189 | if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) { |
190 | DMERR("Cache metadata version %u found, but only versions between %u and %u supported." , |
191 | metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION); |
192 | return -EINVAL; |
193 | } |
194 | |
195 | return 0; |
196 | } |
197 | |
198 | static int sb_check(struct dm_block_validator *v, |
199 | struct dm_block *b, |
200 | size_t sb_block_size) |
201 | { |
202 | struct cache_disk_superblock *disk_super = dm_block_data(b); |
203 | __le32 csum_le; |
204 | |
205 | if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) { |
206 | DMERR("%s failed: blocknr %llu: wanted %llu" , |
207 | __func__, le64_to_cpu(disk_super->blocknr), |
208 | (unsigned long long)dm_block_location(b)); |
209 | return -ENOTBLK; |
210 | } |
211 | |
212 | if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) { |
213 | DMERR("%s failed: magic %llu: wanted %llu" , |
214 | __func__, le64_to_cpu(disk_super->magic), |
215 | (unsigned long long)CACHE_SUPERBLOCK_MAGIC); |
216 | return -EILSEQ; |
217 | } |
218 | |
219 | csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags, |
220 | sb_block_size - sizeof(__le32), |
221 | SUPERBLOCK_CSUM_XOR)); |
222 | if (csum_le != disk_super->csum) { |
223 | DMERR("%s failed: csum %u: wanted %u" , |
224 | __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum)); |
225 | return -EILSEQ; |
226 | } |
227 | |
228 | return check_metadata_version(disk_super); |
229 | } |
230 | |
231 | static struct dm_block_validator sb_validator = { |
232 | .name = "superblock" , |
233 | .prepare_for_write = sb_prepare_for_write, |
234 | .check = sb_check |
235 | }; |
236 | |
237 | /*----------------------------------------------------------------*/ |
238 | |
239 | static int superblock_read_lock(struct dm_cache_metadata *cmd, |
240 | struct dm_block **sblock) |
241 | { |
242 | return dm_bm_read_lock(bm: cmd->bm, CACHE_SUPERBLOCK_LOCATION, |
243 | v: &sb_validator, result: sblock); |
244 | } |
245 | |
246 | static int superblock_lock_zero(struct dm_cache_metadata *cmd, |
247 | struct dm_block **sblock) |
248 | { |
249 | return dm_bm_write_lock_zero(bm: cmd->bm, CACHE_SUPERBLOCK_LOCATION, |
250 | v: &sb_validator, result: sblock); |
251 | } |
252 | |
253 | static int superblock_lock(struct dm_cache_metadata *cmd, |
254 | struct dm_block **sblock) |
255 | { |
256 | return dm_bm_write_lock(bm: cmd->bm, CACHE_SUPERBLOCK_LOCATION, |
257 | v: &sb_validator, result: sblock); |
258 | } |
259 | |
260 | /*----------------------------------------------------------------*/ |
261 | |
262 | static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result) |
263 | { |
264 | int r; |
265 | unsigned int i; |
266 | struct dm_block *b; |
267 | __le64 *data_le, zero = cpu_to_le64(0); |
268 | unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64); |
269 | |
270 | /* |
271 | * We can't use a validator here - it may be all zeroes. |
272 | */ |
273 | r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, result: &b); |
274 | if (r) |
275 | return r; |
276 | |
277 | data_le = dm_block_data(b); |
278 | *result = true; |
279 | for (i = 0; i < sb_block_size; i++) { |
280 | if (data_le[i] != zero) { |
281 | *result = false; |
282 | break; |
283 | } |
284 | } |
285 | |
286 | dm_bm_unlock(b); |
287 | |
288 | return 0; |
289 | } |
290 | |
291 | static void __setup_mapping_info(struct dm_cache_metadata *cmd) |
292 | { |
293 | struct dm_btree_value_type vt; |
294 | |
295 | vt.context = NULL; |
296 | vt.size = sizeof(__le64); |
297 | vt.inc = NULL; |
298 | vt.dec = NULL; |
299 | vt.equal = NULL; |
300 | dm_array_info_init(info: &cmd->info, tm: cmd->tm, vt: &vt); |
301 | |
302 | if (cmd->policy_hint_size) { |
303 | vt.size = sizeof(__le32); |
304 | dm_array_info_init(info: &cmd->hint_info, tm: cmd->tm, vt: &vt); |
305 | } |
306 | } |
307 | |
308 | static int __save_sm_root(struct dm_cache_metadata *cmd) |
309 | { |
310 | int r; |
311 | size_t metadata_len; |
312 | |
313 | r = dm_sm_root_size(sm: cmd->metadata_sm, result: &metadata_len); |
314 | if (r < 0) |
315 | return r; |
316 | |
317 | return dm_sm_copy_root(sm: cmd->metadata_sm, copy_to_here_le: &cmd->metadata_space_map_root, |
318 | len: metadata_len); |
319 | } |
320 | |
321 | static void __copy_sm_root(struct dm_cache_metadata *cmd, |
322 | struct cache_disk_superblock *disk_super) |
323 | { |
324 | memcpy(&disk_super->metadata_space_map_root, |
325 | &cmd->metadata_space_map_root, |
326 | sizeof(cmd->metadata_space_map_root)); |
327 | } |
328 | |
329 | static bool separate_dirty_bits(struct dm_cache_metadata *cmd) |
330 | { |
331 | return cmd->version >= 2; |
332 | } |
333 | |
334 | static int __write_initial_superblock(struct dm_cache_metadata *cmd) |
335 | { |
336 | int r; |
337 | struct dm_block *sblock; |
338 | struct cache_disk_superblock *disk_super; |
339 | sector_t bdev_size = bdev_nr_sectors(bdev: cmd->bdev); |
340 | |
341 | /* FIXME: see if we can lose the max sectors limit */ |
342 | if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS) |
343 | bdev_size = DM_CACHE_METADATA_MAX_SECTORS; |
344 | |
345 | r = dm_tm_pre_commit(tm: cmd->tm); |
346 | if (r < 0) |
347 | return r; |
348 | |
349 | /* |
350 | * dm_sm_copy_root() can fail. So we need to do it before we start |
351 | * updating the superblock. |
352 | */ |
353 | r = __save_sm_root(cmd); |
354 | if (r) |
355 | return r; |
356 | |
357 | r = superblock_lock_zero(cmd, sblock: &sblock); |
358 | if (r) |
359 | return r; |
360 | |
361 | disk_super = dm_block_data(b: sblock); |
362 | disk_super->flags = 0; |
363 | memset(disk_super->uuid, 0, sizeof(disk_super->uuid)); |
364 | disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC); |
365 | disk_super->version = cpu_to_le32(cmd->version); |
366 | memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); |
367 | memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); |
368 | disk_super->policy_hint_size = cpu_to_le32(0); |
369 | |
370 | __copy_sm_root(cmd, disk_super); |
371 | |
372 | disk_super->mapping_root = cpu_to_le64(cmd->root); |
373 | disk_super->hint_root = cpu_to_le64(cmd->hint_root); |
374 | disk_super->discard_root = cpu_to_le64(cmd->discard_root); |
375 | disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); |
376 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); |
377 | disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE); |
378 | disk_super->data_block_size = cpu_to_le32(cmd->data_block_size); |
379 | disk_super->cache_blocks = cpu_to_le32(0); |
380 | |
381 | disk_super->read_hits = cpu_to_le32(0); |
382 | disk_super->read_misses = cpu_to_le32(0); |
383 | disk_super->write_hits = cpu_to_le32(0); |
384 | disk_super->write_misses = cpu_to_le32(0); |
385 | |
386 | if (separate_dirty_bits(cmd)) |
387 | disk_super->dirty_root = cpu_to_le64(cmd->dirty_root); |
388 | |
389 | return dm_tm_commit(tm: cmd->tm, superblock: sblock); |
390 | } |
391 | |
392 | static int __format_metadata(struct dm_cache_metadata *cmd) |
393 | { |
394 | int r; |
395 | |
396 | r = dm_tm_create_with_sm(bm: cmd->bm, CACHE_SUPERBLOCK_LOCATION, |
397 | tm: &cmd->tm, sm: &cmd->metadata_sm); |
398 | if (r < 0) { |
399 | DMERR("tm_create_with_sm failed" ); |
400 | return r; |
401 | } |
402 | |
403 | __setup_mapping_info(cmd); |
404 | |
405 | r = dm_array_empty(info: &cmd->info, root: &cmd->root); |
406 | if (r < 0) |
407 | goto bad; |
408 | |
409 | if (separate_dirty_bits(cmd)) { |
410 | dm_disk_bitset_init(tm: cmd->tm, info: &cmd->dirty_info); |
411 | r = dm_bitset_empty(info: &cmd->dirty_info, new_root: &cmd->dirty_root); |
412 | if (r < 0) |
413 | goto bad; |
414 | } |
415 | |
416 | dm_disk_bitset_init(tm: cmd->tm, info: &cmd->discard_info); |
417 | r = dm_bitset_empty(info: &cmd->discard_info, new_root: &cmd->discard_root); |
418 | if (r < 0) |
419 | goto bad; |
420 | |
421 | cmd->discard_block_size = 0; |
422 | cmd->discard_nr_blocks = 0; |
423 | |
424 | r = __write_initial_superblock(cmd); |
425 | if (r) |
426 | goto bad; |
427 | |
428 | cmd->clean_when_opened = true; |
429 | return 0; |
430 | |
431 | bad: |
432 | dm_tm_destroy(tm: cmd->tm); |
433 | dm_sm_destroy(sm: cmd->metadata_sm); |
434 | |
435 | return r; |
436 | } |
437 | |
438 | static int __check_incompat_features(struct cache_disk_superblock *disk_super, |
439 | struct dm_cache_metadata *cmd) |
440 | { |
441 | uint32_t incompat_flags, features; |
442 | |
443 | incompat_flags = le32_to_cpu(disk_super->incompat_flags); |
444 | features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP; |
445 | if (features) { |
446 | DMERR("could not access metadata due to unsupported optional features (%lx)." , |
447 | (unsigned long)features); |
448 | return -EINVAL; |
449 | } |
450 | |
451 | /* |
452 | * Check for read-only metadata to skip the following RDWR checks. |
453 | */ |
454 | if (bdev_read_only(bdev: cmd->bdev)) |
455 | return 0; |
456 | |
457 | features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP; |
458 | if (features) { |
459 | DMERR("could not access metadata RDWR due to unsupported optional features (%lx)." , |
460 | (unsigned long)features); |
461 | return -EINVAL; |
462 | } |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | static int __open_metadata(struct dm_cache_metadata *cmd) |
468 | { |
469 | int r; |
470 | struct dm_block *sblock; |
471 | struct cache_disk_superblock *disk_super; |
472 | unsigned long sb_flags; |
473 | |
474 | r = superblock_read_lock(cmd, sblock: &sblock); |
475 | if (r < 0) { |
476 | DMERR("couldn't read lock superblock" ); |
477 | return r; |
478 | } |
479 | |
480 | disk_super = dm_block_data(b: sblock); |
481 | |
482 | /* Verify the data block size hasn't changed */ |
483 | if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) { |
484 | DMERR("changing the data block size (from %u to %llu) is not supported" , |
485 | le32_to_cpu(disk_super->data_block_size), |
486 | (unsigned long long)cmd->data_block_size); |
487 | r = -EINVAL; |
488 | goto bad; |
489 | } |
490 | |
491 | r = __check_incompat_features(disk_super, cmd); |
492 | if (r < 0) |
493 | goto bad; |
494 | |
495 | r = dm_tm_open_with_sm(bm: cmd->bm, CACHE_SUPERBLOCK_LOCATION, |
496 | sm_root: disk_super->metadata_space_map_root, |
497 | root_len: sizeof(disk_super->metadata_space_map_root), |
498 | tm: &cmd->tm, sm: &cmd->metadata_sm); |
499 | if (r < 0) { |
500 | DMERR("tm_open_with_sm failed" ); |
501 | goto bad; |
502 | } |
503 | |
504 | __setup_mapping_info(cmd); |
505 | dm_disk_bitset_init(tm: cmd->tm, info: &cmd->dirty_info); |
506 | dm_disk_bitset_init(tm: cmd->tm, info: &cmd->discard_info); |
507 | sb_flags = le32_to_cpu(disk_super->flags); |
508 | cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags); |
509 | dm_bm_unlock(b: sblock); |
510 | |
511 | return 0; |
512 | |
513 | bad: |
514 | dm_bm_unlock(b: sblock); |
515 | return r; |
516 | } |
517 | |
518 | static int __open_or_format_metadata(struct dm_cache_metadata *cmd, |
519 | bool format_device) |
520 | { |
521 | int r; |
522 | bool unformatted = false; |
523 | |
524 | r = __superblock_all_zeroes(bm: cmd->bm, result: &unformatted); |
525 | if (r) |
526 | return r; |
527 | |
528 | if (unformatted) |
529 | return format_device ? __format_metadata(cmd) : -EPERM; |
530 | |
531 | return __open_metadata(cmd); |
532 | } |
533 | |
534 | static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, |
535 | bool may_format_device) |
536 | { |
537 | int r; |
538 | |
539 | cmd->bm = dm_block_manager_create(bdev: cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, |
540 | CACHE_MAX_CONCURRENT_LOCKS); |
541 | if (IS_ERR(ptr: cmd->bm)) { |
542 | DMERR("could not create block manager" ); |
543 | r = PTR_ERR(ptr: cmd->bm); |
544 | cmd->bm = NULL; |
545 | return r; |
546 | } |
547 | |
548 | r = __open_or_format_metadata(cmd, format_device: may_format_device); |
549 | if (r) { |
550 | dm_block_manager_destroy(bm: cmd->bm); |
551 | cmd->bm = NULL; |
552 | } |
553 | |
554 | return r; |
555 | } |
556 | |
557 | static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd, |
558 | bool destroy_bm) |
559 | { |
560 | dm_sm_destroy(sm: cmd->metadata_sm); |
561 | dm_tm_destroy(tm: cmd->tm); |
562 | if (destroy_bm) |
563 | dm_block_manager_destroy(bm: cmd->bm); |
564 | } |
565 | |
566 | typedef unsigned long (*flags_mutator)(unsigned long); |
567 | |
568 | static void update_flags(struct cache_disk_superblock *disk_super, |
569 | flags_mutator mutator) |
570 | { |
571 | uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags)); |
572 | |
573 | disk_super->flags = cpu_to_le32(sb_flags); |
574 | } |
575 | |
576 | static unsigned long set_clean_shutdown(unsigned long flags) |
577 | { |
578 | set_bit(nr: CLEAN_SHUTDOWN, addr: &flags); |
579 | return flags; |
580 | } |
581 | |
582 | static unsigned long clear_clean_shutdown(unsigned long flags) |
583 | { |
584 | clear_bit(nr: CLEAN_SHUTDOWN, addr: &flags); |
585 | return flags; |
586 | } |
587 | |
588 | static void read_superblock_fields(struct dm_cache_metadata *cmd, |
589 | struct cache_disk_superblock *disk_super) |
590 | { |
591 | cmd->version = le32_to_cpu(disk_super->version); |
592 | cmd->flags = le32_to_cpu(disk_super->flags); |
593 | cmd->root = le64_to_cpu(disk_super->mapping_root); |
594 | cmd->hint_root = le64_to_cpu(disk_super->hint_root); |
595 | cmd->discard_root = le64_to_cpu(disk_super->discard_root); |
596 | cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size); |
597 | cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks)); |
598 | cmd->data_block_size = le32_to_cpu(disk_super->data_block_size); |
599 | cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks)); |
600 | strscpy(p: cmd->policy_name, q: disk_super->policy_name, size: sizeof(cmd->policy_name)); |
601 | cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]); |
602 | cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]); |
603 | cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]); |
604 | cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size); |
605 | |
606 | cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits); |
607 | cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses); |
608 | cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits); |
609 | cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses); |
610 | |
611 | if (separate_dirty_bits(cmd)) |
612 | cmd->dirty_root = le64_to_cpu(disk_super->dirty_root); |
613 | |
614 | cmd->changed = false; |
615 | } |
616 | |
617 | /* |
618 | * The mutator updates the superblock flags. |
619 | */ |
620 | static int __begin_transaction_flags(struct dm_cache_metadata *cmd, |
621 | flags_mutator mutator) |
622 | { |
623 | int r; |
624 | struct cache_disk_superblock *disk_super; |
625 | struct dm_block *sblock; |
626 | |
627 | r = superblock_lock(cmd, sblock: &sblock); |
628 | if (r) |
629 | return r; |
630 | |
631 | disk_super = dm_block_data(b: sblock); |
632 | update_flags(disk_super, mutator); |
633 | read_superblock_fields(cmd, disk_super); |
634 | dm_bm_unlock(b: sblock); |
635 | |
636 | return dm_bm_flush(bm: cmd->bm); |
637 | } |
638 | |
639 | static int __begin_transaction(struct dm_cache_metadata *cmd) |
640 | { |
641 | int r; |
642 | struct cache_disk_superblock *disk_super; |
643 | struct dm_block *sblock; |
644 | |
645 | /* |
646 | * We re-read the superblock every time. Shouldn't need to do this |
647 | * really. |
648 | */ |
649 | r = superblock_read_lock(cmd, sblock: &sblock); |
650 | if (r) |
651 | return r; |
652 | |
653 | disk_super = dm_block_data(b: sblock); |
654 | read_superblock_fields(cmd, disk_super); |
655 | dm_bm_unlock(b: sblock); |
656 | |
657 | return 0; |
658 | } |
659 | |
660 | static int __commit_transaction(struct dm_cache_metadata *cmd, |
661 | flags_mutator mutator) |
662 | { |
663 | int r; |
664 | struct cache_disk_superblock *disk_super; |
665 | struct dm_block *sblock; |
666 | |
667 | /* |
668 | * We need to know if the cache_disk_superblock exceeds a 512-byte sector. |
669 | */ |
670 | BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512); |
671 | |
672 | if (separate_dirty_bits(cmd)) { |
673 | r = dm_bitset_flush(info: &cmd->dirty_info, root: cmd->dirty_root, |
674 | new_root: &cmd->dirty_root); |
675 | if (r) |
676 | return r; |
677 | } |
678 | |
679 | r = dm_bitset_flush(info: &cmd->discard_info, root: cmd->discard_root, |
680 | new_root: &cmd->discard_root); |
681 | if (r) |
682 | return r; |
683 | |
684 | r = dm_tm_pre_commit(tm: cmd->tm); |
685 | if (r < 0) |
686 | return r; |
687 | |
688 | r = __save_sm_root(cmd); |
689 | if (r) |
690 | return r; |
691 | |
692 | r = superblock_lock(cmd, sblock: &sblock); |
693 | if (r) |
694 | return r; |
695 | |
696 | disk_super = dm_block_data(b: sblock); |
697 | |
698 | disk_super->flags = cpu_to_le32(cmd->flags); |
699 | if (mutator) |
700 | update_flags(disk_super, mutator); |
701 | |
702 | disk_super->mapping_root = cpu_to_le64(cmd->root); |
703 | if (separate_dirty_bits(cmd)) |
704 | disk_super->dirty_root = cpu_to_le64(cmd->dirty_root); |
705 | disk_super->hint_root = cpu_to_le64(cmd->hint_root); |
706 | disk_super->discard_root = cpu_to_le64(cmd->discard_root); |
707 | disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size); |
708 | disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks)); |
709 | disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks)); |
710 | strscpy(p: disk_super->policy_name, q: cmd->policy_name, size: sizeof(disk_super->policy_name)); |
711 | disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); |
712 | disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); |
713 | disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); |
714 | disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size); |
715 | |
716 | disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); |
717 | disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); |
718 | disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits); |
719 | disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses); |
720 | __copy_sm_root(cmd, disk_super); |
721 | |
722 | return dm_tm_commit(tm: cmd->tm, superblock: sblock); |
723 | } |
724 | |
725 | /*----------------------------------------------------------------*/ |
726 | |
727 | /* |
728 | * The mappings are held in a dm-array that has 64-bit values stored in |
729 | * little-endian format. The index is the cblock, the high 48bits of the |
730 | * value are the oblock and the low 16 bit the flags. |
731 | */ |
732 | #define FLAGS_MASK ((1 << 16) - 1) |
733 | |
734 | static __le64 pack_value(dm_oblock_t block, unsigned int flags) |
735 | { |
736 | uint64_t value = from_oblock(b: block); |
737 | |
738 | value <<= 16; |
739 | value = value | (flags & FLAGS_MASK); |
740 | return cpu_to_le64(value); |
741 | } |
742 | |
743 | static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags) |
744 | { |
745 | uint64_t value = le64_to_cpu(value_le); |
746 | uint64_t b = value >> 16; |
747 | |
748 | *block = to_oblock(b); |
749 | *flags = value & FLAGS_MASK; |
750 | } |
751 | |
752 | /*----------------------------------------------------------------*/ |
753 | |
754 | static struct dm_cache_metadata *metadata_open(struct block_device *bdev, |
755 | sector_t data_block_size, |
756 | bool may_format_device, |
757 | size_t policy_hint_size, |
758 | unsigned int metadata_version) |
759 | { |
760 | int r; |
761 | struct dm_cache_metadata *cmd; |
762 | |
763 | cmd = kzalloc(size: sizeof(*cmd), GFP_KERNEL); |
764 | if (!cmd) { |
765 | DMERR("could not allocate metadata struct" ); |
766 | return ERR_PTR(error: -ENOMEM); |
767 | } |
768 | |
769 | cmd->version = metadata_version; |
770 | refcount_set(r: &cmd->ref_count, n: 1); |
771 | init_rwsem(&cmd->root_lock); |
772 | cmd->bdev = bdev; |
773 | cmd->data_block_size = data_block_size; |
774 | cmd->cache_blocks = 0; |
775 | cmd->policy_hint_size = policy_hint_size; |
776 | cmd->changed = true; |
777 | cmd->fail_io = false; |
778 | |
779 | r = __create_persistent_data_objects(cmd, may_format_device); |
780 | if (r) { |
781 | kfree(objp: cmd); |
782 | return ERR_PTR(error: r); |
783 | } |
784 | |
785 | r = __begin_transaction_flags(cmd, mutator: clear_clean_shutdown); |
786 | if (r < 0) { |
787 | dm_cache_metadata_close(cmd); |
788 | return ERR_PTR(error: r); |
789 | } |
790 | |
791 | return cmd; |
792 | } |
793 | |
794 | /* |
795 | * We keep a little list of ref counted metadata objects to prevent two |
796 | * different target instances creating separate bufio instances. This is |
797 | * an issue if a table is reloaded before the suspend. |
798 | */ |
799 | static DEFINE_MUTEX(table_lock); |
800 | static LIST_HEAD(table); |
801 | |
802 | static struct dm_cache_metadata *lookup(struct block_device *bdev) |
803 | { |
804 | struct dm_cache_metadata *cmd; |
805 | |
806 | list_for_each_entry(cmd, &table, list) |
807 | if (cmd->bdev == bdev) { |
808 | refcount_inc(r: &cmd->ref_count); |
809 | return cmd; |
810 | } |
811 | |
812 | return NULL; |
813 | } |
814 | |
815 | static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev, |
816 | sector_t data_block_size, |
817 | bool may_format_device, |
818 | size_t policy_hint_size, |
819 | unsigned int metadata_version) |
820 | { |
821 | struct dm_cache_metadata *cmd, *cmd2; |
822 | |
823 | mutex_lock(&table_lock); |
824 | cmd = lookup(bdev); |
825 | mutex_unlock(lock: &table_lock); |
826 | |
827 | if (cmd) |
828 | return cmd; |
829 | |
830 | cmd = metadata_open(bdev, data_block_size, may_format_device, |
831 | policy_hint_size, metadata_version); |
832 | if (!IS_ERR(ptr: cmd)) { |
833 | mutex_lock(&table_lock); |
834 | cmd2 = lookup(bdev); |
835 | if (cmd2) { |
836 | mutex_unlock(lock: &table_lock); |
837 | __destroy_persistent_data_objects(cmd, destroy_bm: true); |
838 | kfree(objp: cmd); |
839 | return cmd2; |
840 | } |
841 | list_add(new: &cmd->list, head: &table); |
842 | mutex_unlock(lock: &table_lock); |
843 | } |
844 | |
845 | return cmd; |
846 | } |
847 | |
848 | static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size) |
849 | { |
850 | if (cmd->data_block_size != data_block_size) { |
851 | DMERR("data_block_size (%llu) different from that in metadata (%llu)" , |
852 | (unsigned long long) data_block_size, |
853 | (unsigned long long) cmd->data_block_size); |
854 | return false; |
855 | } |
856 | |
857 | return true; |
858 | } |
859 | |
860 | struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev, |
861 | sector_t data_block_size, |
862 | bool may_format_device, |
863 | size_t policy_hint_size, |
864 | unsigned int metadata_version) |
865 | { |
866 | struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device, |
867 | policy_hint_size, metadata_version); |
868 | |
869 | if (!IS_ERR(ptr: cmd) && !same_params(cmd, data_block_size)) { |
870 | dm_cache_metadata_close(cmd); |
871 | return ERR_PTR(error: -EINVAL); |
872 | } |
873 | |
874 | return cmd; |
875 | } |
876 | |
877 | void dm_cache_metadata_close(struct dm_cache_metadata *cmd) |
878 | { |
879 | if (refcount_dec_and_test(r: &cmd->ref_count)) { |
880 | mutex_lock(&table_lock); |
881 | list_del(entry: &cmd->list); |
882 | mutex_unlock(lock: &table_lock); |
883 | |
884 | if (!cmd->fail_io) |
885 | __destroy_persistent_data_objects(cmd, destroy_bm: true); |
886 | kfree(objp: cmd); |
887 | } |
888 | } |
889 | |
890 | /* |
891 | * Checks that the given cache block is either unmapped or clean. |
892 | */ |
893 | static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b, |
894 | bool *result) |
895 | { |
896 | int r; |
897 | __le64 value; |
898 | dm_oblock_t ob; |
899 | unsigned int flags; |
900 | |
901 | r = dm_array_get_value(info: &cmd->info, root: cmd->root, index: from_cblock(b), value: &value); |
902 | if (r) |
903 | return r; |
904 | |
905 | unpack_value(value_le: value, block: &ob, flags: &flags); |
906 | *result = !((flags & M_VALID) && (flags & M_DIRTY)); |
907 | |
908 | return 0; |
909 | } |
910 | |
911 | static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd, |
912 | dm_cblock_t begin, dm_cblock_t end, |
913 | bool *result) |
914 | { |
915 | int r; |
916 | *result = true; |
917 | |
918 | while (begin != end) { |
919 | r = block_clean_combined_dirty(cmd, b: begin, result); |
920 | if (r) { |
921 | DMERR("block_clean_combined_dirty failed" ); |
922 | return r; |
923 | } |
924 | |
925 | if (!*result) { |
926 | DMERR("cache block %llu is dirty" , |
927 | (unsigned long long) from_cblock(begin)); |
928 | return 0; |
929 | } |
930 | |
931 | begin = to_cblock(b: from_cblock(b: begin) + 1); |
932 | } |
933 | |
934 | return 0; |
935 | } |
936 | |
937 | static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd, |
938 | dm_cblock_t begin, dm_cblock_t end, |
939 | bool *result) |
940 | { |
941 | int r; |
942 | bool dirty_flag; |
943 | *result = true; |
944 | |
945 | if (from_cblock(b: cmd->cache_blocks) == 0) |
946 | /* Nothing to do */ |
947 | return 0; |
948 | |
949 | r = dm_bitset_cursor_begin(info: &cmd->dirty_info, root: cmd->dirty_root, |
950 | nr_entries: from_cblock(b: cmd->cache_blocks), c: &cmd->dirty_cursor); |
951 | if (r) { |
952 | DMERR("%s: dm_bitset_cursor_begin for dirty failed" , __func__); |
953 | return r; |
954 | } |
955 | |
956 | r = dm_bitset_cursor_skip(c: &cmd->dirty_cursor, count: from_cblock(b: begin)); |
957 | if (r) { |
958 | DMERR("%s: dm_bitset_cursor_skip for dirty failed" , __func__); |
959 | dm_bitset_cursor_end(c: &cmd->dirty_cursor); |
960 | return r; |
961 | } |
962 | |
963 | while (begin != end) { |
964 | /* |
965 | * We assume that unmapped blocks have their dirty bit |
966 | * cleared. |
967 | */ |
968 | dirty_flag = dm_bitset_cursor_get_value(c: &cmd->dirty_cursor); |
969 | if (dirty_flag) { |
970 | DMERR("%s: cache block %llu is dirty" , __func__, |
971 | (unsigned long long) from_cblock(begin)); |
972 | dm_bitset_cursor_end(c: &cmd->dirty_cursor); |
973 | *result = false; |
974 | return 0; |
975 | } |
976 | |
977 | begin = to_cblock(b: from_cblock(b: begin) + 1); |
978 | if (begin == end) |
979 | break; |
980 | |
981 | r = dm_bitset_cursor_next(c: &cmd->dirty_cursor); |
982 | if (r) { |
983 | DMERR("%s: dm_bitset_cursor_next for dirty failed" , __func__); |
984 | dm_bitset_cursor_end(c: &cmd->dirty_cursor); |
985 | return r; |
986 | } |
987 | } |
988 | |
989 | dm_bitset_cursor_end(c: &cmd->dirty_cursor); |
990 | |
991 | return 0; |
992 | } |
993 | |
994 | static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd, |
995 | dm_cblock_t begin, dm_cblock_t end, |
996 | bool *result) |
997 | { |
998 | if (separate_dirty_bits(cmd)) |
999 | return blocks_are_clean_separate_dirty(cmd, begin, end, result); |
1000 | else |
1001 | return blocks_are_clean_combined_dirty(cmd, begin, end, result); |
1002 | } |
1003 | |
1004 | static bool cmd_write_lock(struct dm_cache_metadata *cmd) |
1005 | { |
1006 | down_write(sem: &cmd->root_lock); |
1007 | if (cmd->fail_io || dm_bm_is_read_only(bm: cmd->bm)) { |
1008 | up_write(sem: &cmd->root_lock); |
1009 | return false; |
1010 | } |
1011 | return true; |
1012 | } |
1013 | |
1014 | #define WRITE_LOCK(cmd) \ |
1015 | do { \ |
1016 | if (!cmd_write_lock((cmd))) \ |
1017 | return -EINVAL; \ |
1018 | } while (0) |
1019 | |
1020 | #define WRITE_LOCK_VOID(cmd) \ |
1021 | do { \ |
1022 | if (!cmd_write_lock((cmd))) \ |
1023 | return; \ |
1024 | } while (0) |
1025 | |
1026 | #define WRITE_UNLOCK(cmd) \ |
1027 | up_write(&(cmd)->root_lock) |
1028 | |
1029 | static bool cmd_read_lock(struct dm_cache_metadata *cmd) |
1030 | { |
1031 | down_read(sem: &cmd->root_lock); |
1032 | if (cmd->fail_io) { |
1033 | up_read(sem: &cmd->root_lock); |
1034 | return false; |
1035 | } |
1036 | return true; |
1037 | } |
1038 | |
1039 | #define READ_LOCK(cmd) \ |
1040 | do { \ |
1041 | if (!cmd_read_lock((cmd))) \ |
1042 | return -EINVAL; \ |
1043 | } while (0) |
1044 | |
1045 | #define READ_LOCK_VOID(cmd) \ |
1046 | do { \ |
1047 | if (!cmd_read_lock((cmd))) \ |
1048 | return; \ |
1049 | } while (0) |
1050 | |
1051 | #define READ_UNLOCK(cmd) \ |
1052 | up_read(&(cmd)->root_lock) |
1053 | |
1054 | int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size) |
1055 | { |
1056 | int r; |
1057 | bool clean; |
1058 | __le64 null_mapping = pack_value(block: 0, flags: 0); |
1059 | |
1060 | WRITE_LOCK(cmd); |
1061 | __dm_bless_for_disk(&null_mapping); |
1062 | |
1063 | if (from_cblock(b: new_cache_size) < from_cblock(b: cmd->cache_blocks)) { |
1064 | r = blocks_are_unmapped_or_clean(cmd, begin: new_cache_size, end: cmd->cache_blocks, result: &clean); |
1065 | if (r) { |
1066 | __dm_unbless_for_disk(&null_mapping); |
1067 | goto out; |
1068 | } |
1069 | |
1070 | if (!clean) { |
1071 | DMERR("unable to shrink cache due to dirty blocks" ); |
1072 | r = -EINVAL; |
1073 | __dm_unbless_for_disk(&null_mapping); |
1074 | goto out; |
1075 | } |
1076 | } |
1077 | |
1078 | r = dm_array_resize(info: &cmd->info, root: cmd->root, old_size: from_cblock(b: cmd->cache_blocks), |
1079 | new_size: from_cblock(b: new_cache_size), |
1080 | value: &null_mapping, new_root: &cmd->root); |
1081 | if (r) |
1082 | goto out; |
1083 | |
1084 | if (separate_dirty_bits(cmd)) { |
1085 | r = dm_bitset_resize(info: &cmd->dirty_info, old_root: cmd->dirty_root, |
1086 | old_nr_entries: from_cblock(b: cmd->cache_blocks), new_nr_entries: from_cblock(b: new_cache_size), |
1087 | default_value: false, new_root: &cmd->dirty_root); |
1088 | if (r) |
1089 | goto out; |
1090 | } |
1091 | |
1092 | cmd->cache_blocks = new_cache_size; |
1093 | cmd->changed = true; |
1094 | |
1095 | out: |
1096 | WRITE_UNLOCK(cmd); |
1097 | |
1098 | return r; |
1099 | } |
1100 | |
1101 | int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd, |
1102 | sector_t discard_block_size, |
1103 | dm_dblock_t new_nr_entries) |
1104 | { |
1105 | int r; |
1106 | |
1107 | WRITE_LOCK(cmd); |
1108 | r = dm_bitset_resize(info: &cmd->discard_info, |
1109 | old_root: cmd->discard_root, |
1110 | old_nr_entries: from_dblock(b: cmd->discard_nr_blocks), |
1111 | new_nr_entries: from_dblock(b: new_nr_entries), |
1112 | default_value: false, new_root: &cmd->discard_root); |
1113 | if (!r) { |
1114 | cmd->discard_block_size = discard_block_size; |
1115 | cmd->discard_nr_blocks = new_nr_entries; |
1116 | } |
1117 | |
1118 | cmd->changed = true; |
1119 | WRITE_UNLOCK(cmd); |
1120 | |
1121 | return r; |
1122 | } |
1123 | |
1124 | static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) |
1125 | { |
1126 | return dm_bitset_set_bit(info: &cmd->discard_info, root: cmd->discard_root, |
1127 | index: from_dblock(b), new_root: &cmd->discard_root); |
1128 | } |
1129 | |
1130 | static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b) |
1131 | { |
1132 | return dm_bitset_clear_bit(info: &cmd->discard_info, root: cmd->discard_root, |
1133 | index: from_dblock(b), new_root: &cmd->discard_root); |
1134 | } |
1135 | |
1136 | static int __discard(struct dm_cache_metadata *cmd, |
1137 | dm_dblock_t dblock, bool discard) |
1138 | { |
1139 | int r; |
1140 | |
1141 | r = (discard ? __set_discard : __clear_discard)(cmd, dblock); |
1142 | if (r) |
1143 | return r; |
1144 | |
1145 | cmd->changed = true; |
1146 | return 0; |
1147 | } |
1148 | |
1149 | int dm_cache_set_discard(struct dm_cache_metadata *cmd, |
1150 | dm_dblock_t dblock, bool discard) |
1151 | { |
1152 | int r; |
1153 | |
1154 | WRITE_LOCK(cmd); |
1155 | r = __discard(cmd, dblock, discard); |
1156 | WRITE_UNLOCK(cmd); |
1157 | |
1158 | return r; |
1159 | } |
1160 | |
1161 | static int __load_discards(struct dm_cache_metadata *cmd, |
1162 | load_discard_fn fn, void *context) |
1163 | { |
1164 | int r = 0; |
1165 | uint32_t b; |
1166 | struct dm_bitset_cursor c; |
1167 | |
1168 | if (from_dblock(b: cmd->discard_nr_blocks) == 0) |
1169 | /* nothing to do */ |
1170 | return 0; |
1171 | |
1172 | if (cmd->clean_when_opened) { |
1173 | r = dm_bitset_flush(info: &cmd->discard_info, root: cmd->discard_root, new_root: &cmd->discard_root); |
1174 | if (r) |
1175 | return r; |
1176 | |
1177 | r = dm_bitset_cursor_begin(info: &cmd->discard_info, root: cmd->discard_root, |
1178 | nr_entries: from_dblock(b: cmd->discard_nr_blocks), c: &c); |
1179 | if (r) |
1180 | return r; |
1181 | |
1182 | for (b = 0; ; b++) { |
1183 | r = fn(context, cmd->discard_block_size, to_dblock(b), |
1184 | dm_bitset_cursor_get_value(c: &c)); |
1185 | if (r) |
1186 | break; |
1187 | |
1188 | if (b >= (from_dblock(b: cmd->discard_nr_blocks) - 1)) |
1189 | break; |
1190 | |
1191 | r = dm_bitset_cursor_next(c: &c); |
1192 | if (r) |
1193 | break; |
1194 | } |
1195 | |
1196 | dm_bitset_cursor_end(c: &c); |
1197 | |
1198 | } else { |
1199 | for (b = 0; b < from_dblock(b: cmd->discard_nr_blocks); b++) { |
1200 | r = fn(context, cmd->discard_block_size, to_dblock(b), false); |
1201 | if (r) |
1202 | return r; |
1203 | } |
1204 | } |
1205 | |
1206 | return r; |
1207 | } |
1208 | |
1209 | int dm_cache_load_discards(struct dm_cache_metadata *cmd, |
1210 | load_discard_fn fn, void *context) |
1211 | { |
1212 | int r; |
1213 | |
1214 | READ_LOCK(cmd); |
1215 | r = __load_discards(cmd, fn, context); |
1216 | READ_UNLOCK(cmd); |
1217 | |
1218 | return r; |
1219 | } |
1220 | |
1221 | int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result) |
1222 | { |
1223 | READ_LOCK(cmd); |
1224 | *result = cmd->cache_blocks; |
1225 | READ_UNLOCK(cmd); |
1226 | |
1227 | return 0; |
1228 | } |
1229 | |
1230 | static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock) |
1231 | { |
1232 | int r; |
1233 | __le64 value = pack_value(block: 0, flags: 0); |
1234 | |
1235 | __dm_bless_for_disk(&value); |
1236 | r = dm_array_set_value(info: &cmd->info, root: cmd->root, index: from_cblock(b: cblock), |
1237 | value: &value, new_root: &cmd->root); |
1238 | if (r) |
1239 | return r; |
1240 | |
1241 | cmd->changed = true; |
1242 | return 0; |
1243 | } |
1244 | |
1245 | int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock) |
1246 | { |
1247 | int r; |
1248 | |
1249 | WRITE_LOCK(cmd); |
1250 | r = __remove(cmd, cblock); |
1251 | WRITE_UNLOCK(cmd); |
1252 | |
1253 | return r; |
1254 | } |
1255 | |
1256 | static int __insert(struct dm_cache_metadata *cmd, |
1257 | dm_cblock_t cblock, dm_oblock_t oblock) |
1258 | { |
1259 | int r; |
1260 | __le64 value = pack_value(block: oblock, flags: M_VALID); |
1261 | |
1262 | __dm_bless_for_disk(&value); |
1263 | |
1264 | r = dm_array_set_value(info: &cmd->info, root: cmd->root, index: from_cblock(b: cblock), |
1265 | value: &value, new_root: &cmd->root); |
1266 | if (r) |
1267 | return r; |
1268 | |
1269 | cmd->changed = true; |
1270 | return 0; |
1271 | } |
1272 | |
1273 | int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, |
1274 | dm_cblock_t cblock, dm_oblock_t oblock) |
1275 | { |
1276 | int r; |
1277 | |
1278 | WRITE_LOCK(cmd); |
1279 | r = __insert(cmd, cblock, oblock); |
1280 | WRITE_UNLOCK(cmd); |
1281 | |
1282 | return r; |
1283 | } |
1284 | |
1285 | struct thunk { |
1286 | load_mapping_fn fn; |
1287 | void *context; |
1288 | |
1289 | struct dm_cache_metadata *cmd; |
1290 | bool respect_dirty_flags; |
1291 | bool hints_valid; |
1292 | }; |
1293 | |
1294 | static bool policy_unchanged(struct dm_cache_metadata *cmd, |
1295 | struct dm_cache_policy *policy) |
1296 | { |
1297 | const char *policy_name = dm_cache_policy_get_name(p: policy); |
1298 | const unsigned int *policy_version = dm_cache_policy_get_version(p: policy); |
1299 | size_t policy_hint_size = dm_cache_policy_get_hint_size(p: policy); |
1300 | |
1301 | /* |
1302 | * Ensure policy names match. |
1303 | */ |
1304 | if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name))) |
1305 | return false; |
1306 | |
1307 | /* |
1308 | * Ensure policy major versions match. |
1309 | */ |
1310 | if (cmd->policy_version[0] != policy_version[0]) |
1311 | return false; |
1312 | |
1313 | /* |
1314 | * Ensure policy hint sizes match. |
1315 | */ |
1316 | if (cmd->policy_hint_size != policy_hint_size) |
1317 | return false; |
1318 | |
1319 | return true; |
1320 | } |
1321 | |
1322 | static bool hints_array_initialized(struct dm_cache_metadata *cmd) |
1323 | { |
1324 | return cmd->hint_root && cmd->policy_hint_size; |
1325 | } |
1326 | |
1327 | static bool hints_array_available(struct dm_cache_metadata *cmd, |
1328 | struct dm_cache_policy *policy) |
1329 | { |
1330 | return cmd->clean_when_opened && policy_unchanged(cmd, policy) && |
1331 | hints_array_initialized(cmd); |
1332 | } |
1333 | |
1334 | static int __load_mapping_v1(struct dm_cache_metadata *cmd, |
1335 | uint64_t cb, bool hints_valid, |
1336 | struct dm_array_cursor *mapping_cursor, |
1337 | struct dm_array_cursor *hint_cursor, |
1338 | load_mapping_fn fn, void *context) |
1339 | { |
1340 | int r = 0; |
1341 | |
1342 | __le64 mapping; |
1343 | __le32 hint = 0; |
1344 | |
1345 | __le64 *mapping_value_le; |
1346 | __le32 *hint_value_le; |
1347 | |
1348 | dm_oblock_t oblock; |
1349 | unsigned int flags; |
1350 | bool dirty = true; |
1351 | |
1352 | dm_array_cursor_get_value(c: mapping_cursor, value_le: (void **) &mapping_value_le); |
1353 | memcpy(&mapping, mapping_value_le, sizeof(mapping)); |
1354 | unpack_value(value_le: mapping, block: &oblock, flags: &flags); |
1355 | |
1356 | if (flags & M_VALID) { |
1357 | if (hints_valid) { |
1358 | dm_array_cursor_get_value(c: hint_cursor, value_le: (void **) &hint_value_le); |
1359 | memcpy(&hint, hint_value_le, sizeof(hint)); |
1360 | } |
1361 | if (cmd->clean_when_opened) |
1362 | dirty = flags & M_DIRTY; |
1363 | |
1364 | r = fn(context, oblock, to_cblock(b: cb), dirty, |
1365 | le32_to_cpu(hint), hints_valid); |
1366 | if (r) { |
1367 | DMERR("policy couldn't load cache block %llu" , |
1368 | (unsigned long long) from_cblock(to_cblock(cb))); |
1369 | } |
1370 | } |
1371 | |
1372 | return r; |
1373 | } |
1374 | |
1375 | static int __load_mapping_v2(struct dm_cache_metadata *cmd, |
1376 | uint64_t cb, bool hints_valid, |
1377 | struct dm_array_cursor *mapping_cursor, |
1378 | struct dm_array_cursor *hint_cursor, |
1379 | struct dm_bitset_cursor *dirty_cursor, |
1380 | load_mapping_fn fn, void *context) |
1381 | { |
1382 | int r = 0; |
1383 | |
1384 | __le64 mapping; |
1385 | __le32 hint = 0; |
1386 | |
1387 | __le64 *mapping_value_le; |
1388 | __le32 *hint_value_le; |
1389 | |
1390 | dm_oblock_t oblock; |
1391 | unsigned int flags; |
1392 | bool dirty = true; |
1393 | |
1394 | dm_array_cursor_get_value(c: mapping_cursor, value_le: (void **) &mapping_value_le); |
1395 | memcpy(&mapping, mapping_value_le, sizeof(mapping)); |
1396 | unpack_value(value_le: mapping, block: &oblock, flags: &flags); |
1397 | |
1398 | if (flags & M_VALID) { |
1399 | if (hints_valid) { |
1400 | dm_array_cursor_get_value(c: hint_cursor, value_le: (void **) &hint_value_le); |
1401 | memcpy(&hint, hint_value_le, sizeof(hint)); |
1402 | } |
1403 | if (cmd->clean_when_opened) |
1404 | dirty = dm_bitset_cursor_get_value(c: dirty_cursor); |
1405 | |
1406 | r = fn(context, oblock, to_cblock(b: cb), dirty, |
1407 | le32_to_cpu(hint), hints_valid); |
1408 | if (r) { |
1409 | DMERR("policy couldn't load cache block %llu" , |
1410 | (unsigned long long) from_cblock(to_cblock(cb))); |
1411 | } |
1412 | } |
1413 | |
1414 | return r; |
1415 | } |
1416 | |
1417 | static int __load_mappings(struct dm_cache_metadata *cmd, |
1418 | struct dm_cache_policy *policy, |
1419 | load_mapping_fn fn, void *context) |
1420 | { |
1421 | int r; |
1422 | uint64_t cb; |
1423 | |
1424 | bool hints_valid = hints_array_available(cmd, policy); |
1425 | |
1426 | if (from_cblock(b: cmd->cache_blocks) == 0) |
1427 | /* Nothing to do */ |
1428 | return 0; |
1429 | |
1430 | r = dm_array_cursor_begin(info: &cmd->info, root: cmd->root, c: &cmd->mapping_cursor); |
1431 | if (r) |
1432 | return r; |
1433 | |
1434 | if (hints_valid) { |
1435 | r = dm_array_cursor_begin(info: &cmd->hint_info, root: cmd->hint_root, c: &cmd->hint_cursor); |
1436 | if (r) { |
1437 | dm_array_cursor_end(c: &cmd->mapping_cursor); |
1438 | return r; |
1439 | } |
1440 | } |
1441 | |
1442 | if (separate_dirty_bits(cmd)) { |
1443 | r = dm_bitset_cursor_begin(info: &cmd->dirty_info, root: cmd->dirty_root, |
1444 | nr_entries: from_cblock(b: cmd->cache_blocks), |
1445 | c: &cmd->dirty_cursor); |
1446 | if (r) { |
1447 | dm_array_cursor_end(c: &cmd->hint_cursor); |
1448 | dm_array_cursor_end(c: &cmd->mapping_cursor); |
1449 | return r; |
1450 | } |
1451 | } |
1452 | |
1453 | for (cb = 0; ; cb++) { |
1454 | if (separate_dirty_bits(cmd)) |
1455 | r = __load_mapping_v2(cmd, cb, hints_valid, |
1456 | mapping_cursor: &cmd->mapping_cursor, |
1457 | hint_cursor: &cmd->hint_cursor, |
1458 | dirty_cursor: &cmd->dirty_cursor, |
1459 | fn, context); |
1460 | else |
1461 | r = __load_mapping_v1(cmd, cb, hints_valid, |
1462 | mapping_cursor: &cmd->mapping_cursor, hint_cursor: &cmd->hint_cursor, |
1463 | fn, context); |
1464 | if (r) |
1465 | goto out; |
1466 | |
1467 | /* |
1468 | * We need to break out before we move the cursors. |
1469 | */ |
1470 | if (cb >= (from_cblock(b: cmd->cache_blocks) - 1)) |
1471 | break; |
1472 | |
1473 | r = dm_array_cursor_next(c: &cmd->mapping_cursor); |
1474 | if (r) { |
1475 | DMERR("dm_array_cursor_next for mapping failed" ); |
1476 | goto out; |
1477 | } |
1478 | |
1479 | if (hints_valid) { |
1480 | r = dm_array_cursor_next(c: &cmd->hint_cursor); |
1481 | if (r) { |
1482 | dm_array_cursor_end(c: &cmd->hint_cursor); |
1483 | hints_valid = false; |
1484 | } |
1485 | } |
1486 | |
1487 | if (separate_dirty_bits(cmd)) { |
1488 | r = dm_bitset_cursor_next(c: &cmd->dirty_cursor); |
1489 | if (r) { |
1490 | DMERR("dm_bitset_cursor_next for dirty failed" ); |
1491 | goto out; |
1492 | } |
1493 | } |
1494 | } |
1495 | out: |
1496 | dm_array_cursor_end(c: &cmd->mapping_cursor); |
1497 | if (hints_valid) |
1498 | dm_array_cursor_end(c: &cmd->hint_cursor); |
1499 | |
1500 | if (separate_dirty_bits(cmd)) |
1501 | dm_bitset_cursor_end(c: &cmd->dirty_cursor); |
1502 | |
1503 | return r; |
1504 | } |
1505 | |
1506 | int dm_cache_load_mappings(struct dm_cache_metadata *cmd, |
1507 | struct dm_cache_policy *policy, |
1508 | load_mapping_fn fn, void *context) |
1509 | { |
1510 | int r; |
1511 | |
1512 | READ_LOCK(cmd); |
1513 | r = __load_mappings(cmd, policy, fn, context); |
1514 | READ_UNLOCK(cmd); |
1515 | |
1516 | return r; |
1517 | } |
1518 | |
1519 | static int __dump_mapping(void *context, uint64_t cblock, void *leaf) |
1520 | { |
1521 | __le64 value; |
1522 | dm_oblock_t oblock; |
1523 | unsigned int flags; |
1524 | |
1525 | memcpy(&value, leaf, sizeof(value)); |
1526 | unpack_value(value_le: value, block: &oblock, flags: &flags); |
1527 | |
1528 | return 0; |
1529 | } |
1530 | |
1531 | static int __dump_mappings(struct dm_cache_metadata *cmd) |
1532 | { |
1533 | return dm_array_walk(info: &cmd->info, root: cmd->root, fn: __dump_mapping, NULL); |
1534 | } |
1535 | |
1536 | void dm_cache_dump(struct dm_cache_metadata *cmd) |
1537 | { |
1538 | READ_LOCK_VOID(cmd); |
1539 | __dump_mappings(cmd); |
1540 | READ_UNLOCK(cmd); |
1541 | } |
1542 | |
1543 | int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd) |
1544 | { |
1545 | int r; |
1546 | |
1547 | READ_LOCK(cmd); |
1548 | r = cmd->changed; |
1549 | READ_UNLOCK(cmd); |
1550 | |
1551 | return r; |
1552 | } |
1553 | |
1554 | static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty) |
1555 | { |
1556 | int r; |
1557 | unsigned int flags; |
1558 | dm_oblock_t oblock; |
1559 | __le64 value; |
1560 | |
1561 | r = dm_array_get_value(info: &cmd->info, root: cmd->root, index: from_cblock(b: cblock), value: &value); |
1562 | if (r) |
1563 | return r; |
1564 | |
1565 | unpack_value(value_le: value, block: &oblock, flags: &flags); |
1566 | |
1567 | if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty)) |
1568 | /* nothing to be done */ |
1569 | return 0; |
1570 | |
1571 | value = pack_value(block: oblock, flags: (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0)); |
1572 | __dm_bless_for_disk(&value); |
1573 | |
1574 | r = dm_array_set_value(info: &cmd->info, root: cmd->root, index: from_cblock(b: cblock), |
1575 | value: &value, new_root: &cmd->root); |
1576 | if (r) |
1577 | return r; |
1578 | |
1579 | cmd->changed = true; |
1580 | return 0; |
1581 | |
1582 | } |
1583 | |
1584 | static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits) |
1585 | { |
1586 | int r; |
1587 | unsigned int i; |
1588 | |
1589 | for (i = 0; i < nr_bits; i++) { |
1590 | r = __dirty(cmd, cblock: to_cblock(b: i), test_bit(i, bits)); |
1591 | if (r) |
1592 | return r; |
1593 | } |
1594 | |
1595 | return 0; |
1596 | } |
1597 | |
1598 | static int is_dirty_callback(uint32_t index, bool *value, void *context) |
1599 | { |
1600 | unsigned long *bits = context; |
1601 | *value = test_bit(index, bits); |
1602 | return 0; |
1603 | } |
1604 | |
1605 | static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits) |
1606 | { |
1607 | int r = 0; |
1608 | |
1609 | /* nr_bits is really just a sanity check */ |
1610 | if (nr_bits != from_cblock(b: cmd->cache_blocks)) { |
1611 | DMERR("dirty bitset is wrong size" ); |
1612 | return -EINVAL; |
1613 | } |
1614 | |
1615 | r = dm_bitset_del(info: &cmd->dirty_info, root: cmd->dirty_root); |
1616 | if (r) |
1617 | return r; |
1618 | |
1619 | cmd->changed = true; |
1620 | return dm_bitset_new(info: &cmd->dirty_info, root: &cmd->dirty_root, size: nr_bits, fn: is_dirty_callback, context: bits); |
1621 | } |
1622 | |
1623 | int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd, |
1624 | unsigned int nr_bits, |
1625 | unsigned long *bits) |
1626 | { |
1627 | int r; |
1628 | |
1629 | WRITE_LOCK(cmd); |
1630 | if (separate_dirty_bits(cmd)) |
1631 | r = __set_dirty_bits_v2(cmd, nr_bits, bits); |
1632 | else |
1633 | r = __set_dirty_bits_v1(cmd, nr_bits, bits); |
1634 | WRITE_UNLOCK(cmd); |
1635 | |
1636 | return r; |
1637 | } |
1638 | |
1639 | void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, |
1640 | struct dm_cache_statistics *stats) |
1641 | { |
1642 | READ_LOCK_VOID(cmd); |
1643 | *stats = cmd->stats; |
1644 | READ_UNLOCK(cmd); |
1645 | } |
1646 | |
1647 | void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, |
1648 | struct dm_cache_statistics *stats) |
1649 | { |
1650 | WRITE_LOCK_VOID(cmd); |
1651 | cmd->stats = *stats; |
1652 | WRITE_UNLOCK(cmd); |
1653 | } |
1654 | |
1655 | int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown) |
1656 | { |
1657 | int r = -EINVAL; |
1658 | flags_mutator mutator = (clean_shutdown ? set_clean_shutdown : |
1659 | clear_clean_shutdown); |
1660 | |
1661 | WRITE_LOCK(cmd); |
1662 | if (cmd->fail_io) |
1663 | goto out; |
1664 | |
1665 | r = __commit_transaction(cmd, mutator); |
1666 | if (r) |
1667 | goto out; |
1668 | |
1669 | r = __begin_transaction(cmd); |
1670 | out: |
1671 | WRITE_UNLOCK(cmd); |
1672 | return r; |
1673 | } |
1674 | |
1675 | int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd, |
1676 | dm_block_t *result) |
1677 | { |
1678 | int r = -EINVAL; |
1679 | |
1680 | READ_LOCK(cmd); |
1681 | if (!cmd->fail_io) |
1682 | r = dm_sm_get_nr_free(sm: cmd->metadata_sm, count: result); |
1683 | READ_UNLOCK(cmd); |
1684 | |
1685 | return r; |
1686 | } |
1687 | |
1688 | int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd, |
1689 | dm_block_t *result) |
1690 | { |
1691 | int r = -EINVAL; |
1692 | |
1693 | READ_LOCK(cmd); |
1694 | if (!cmd->fail_io) |
1695 | r = dm_sm_get_nr_blocks(sm: cmd->metadata_sm, count: result); |
1696 | READ_UNLOCK(cmd); |
1697 | |
1698 | return r; |
1699 | } |
1700 | |
1701 | /*----------------------------------------------------------------*/ |
1702 | |
1703 | static int get_hint(uint32_t index, void *value_le, void *context) |
1704 | { |
1705 | uint32_t value; |
1706 | struct dm_cache_policy *policy = context; |
1707 | |
1708 | value = policy_get_hint(p: policy, cblock: to_cblock(b: index)); |
1709 | *((__le32 *) value_le) = cpu_to_le32(value); |
1710 | |
1711 | return 0; |
1712 | } |
1713 | |
1714 | /* |
1715 | * It's quicker to always delete the hint array, and recreate with |
1716 | * dm_array_new(). |
1717 | */ |
1718 | static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) |
1719 | { |
1720 | int r; |
1721 | size_t hint_size; |
1722 | const char *policy_name = dm_cache_policy_get_name(p: policy); |
1723 | const unsigned int *policy_version = dm_cache_policy_get_version(p: policy); |
1724 | |
1725 | if (!policy_name[0] || |
1726 | (strlen(policy_name) > sizeof(cmd->policy_name) - 1)) |
1727 | return -EINVAL; |
1728 | |
1729 | strscpy(p: cmd->policy_name, q: policy_name, size: sizeof(cmd->policy_name)); |
1730 | memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version)); |
1731 | |
1732 | hint_size = dm_cache_policy_get_hint_size(p: policy); |
1733 | if (!hint_size) |
1734 | return 0; /* short-circuit hints initialization */ |
1735 | cmd->policy_hint_size = hint_size; |
1736 | |
1737 | if (cmd->hint_root) { |
1738 | r = dm_array_del(info: &cmd->hint_info, root: cmd->hint_root); |
1739 | if (r) |
1740 | return r; |
1741 | } |
1742 | |
1743 | return dm_array_new(info: &cmd->hint_info, root: &cmd->hint_root, |
1744 | size: from_cblock(b: cmd->cache_blocks), |
1745 | fn: get_hint, context: policy); |
1746 | } |
1747 | |
1748 | int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) |
1749 | { |
1750 | int r; |
1751 | |
1752 | WRITE_LOCK(cmd); |
1753 | r = write_hints(cmd, policy); |
1754 | WRITE_UNLOCK(cmd); |
1755 | |
1756 | return r; |
1757 | } |
1758 | |
1759 | int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result) |
1760 | { |
1761 | int r; |
1762 | |
1763 | READ_LOCK(cmd); |
1764 | r = blocks_are_unmapped_or_clean(cmd, begin: 0, end: cmd->cache_blocks, result); |
1765 | READ_UNLOCK(cmd); |
1766 | |
1767 | return r; |
1768 | } |
1769 | |
1770 | void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd) |
1771 | { |
1772 | WRITE_LOCK_VOID(cmd); |
1773 | dm_bm_set_read_only(bm: cmd->bm); |
1774 | WRITE_UNLOCK(cmd); |
1775 | } |
1776 | |
1777 | void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd) |
1778 | { |
1779 | WRITE_LOCK_VOID(cmd); |
1780 | dm_bm_set_read_write(bm: cmd->bm); |
1781 | WRITE_UNLOCK(cmd); |
1782 | } |
1783 | |
1784 | int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd) |
1785 | { |
1786 | int r; |
1787 | struct dm_block *sblock; |
1788 | struct cache_disk_superblock *disk_super; |
1789 | |
1790 | WRITE_LOCK(cmd); |
1791 | set_bit(nr: NEEDS_CHECK, addr: &cmd->flags); |
1792 | |
1793 | r = superblock_lock(cmd, sblock: &sblock); |
1794 | if (r) { |
1795 | DMERR("couldn't read superblock" ); |
1796 | goto out; |
1797 | } |
1798 | |
1799 | disk_super = dm_block_data(b: sblock); |
1800 | disk_super->flags = cpu_to_le32(cmd->flags); |
1801 | |
1802 | dm_bm_unlock(b: sblock); |
1803 | |
1804 | out: |
1805 | WRITE_UNLOCK(cmd); |
1806 | return r; |
1807 | } |
1808 | |
1809 | int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result) |
1810 | { |
1811 | READ_LOCK(cmd); |
1812 | *result = !!test_bit(NEEDS_CHECK, &cmd->flags); |
1813 | READ_UNLOCK(cmd); |
1814 | |
1815 | return 0; |
1816 | } |
1817 | |
1818 | int dm_cache_metadata_abort(struct dm_cache_metadata *cmd) |
1819 | { |
1820 | int r = -EINVAL; |
1821 | struct dm_block_manager *old_bm = NULL, *new_bm = NULL; |
1822 | |
1823 | /* fail_io is double-checked with cmd->root_lock held below */ |
1824 | if (unlikely(cmd->fail_io)) |
1825 | return r; |
1826 | |
1827 | /* |
1828 | * Replacement block manager (new_bm) is created and old_bm destroyed outside of |
1829 | * cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of |
1830 | * shrinker associated with the block manager's bufio client vs cmd root_lock). |
1831 | * - must take shrinker_mutex without holding cmd->root_lock |
1832 | */ |
1833 | new_bm = dm_block_manager_create(bdev: cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT, |
1834 | CACHE_MAX_CONCURRENT_LOCKS); |
1835 | |
1836 | WRITE_LOCK(cmd); |
1837 | if (cmd->fail_io) { |
1838 | WRITE_UNLOCK(cmd); |
1839 | goto out; |
1840 | } |
1841 | |
1842 | __destroy_persistent_data_objects(cmd, destroy_bm: false); |
1843 | old_bm = cmd->bm; |
1844 | if (IS_ERR(ptr: new_bm)) { |
1845 | DMERR("could not create block manager during abort" ); |
1846 | cmd->bm = NULL; |
1847 | r = PTR_ERR(ptr: new_bm); |
1848 | goto out_unlock; |
1849 | } |
1850 | |
1851 | cmd->bm = new_bm; |
1852 | r = __open_or_format_metadata(cmd, format_device: false); |
1853 | if (r) { |
1854 | cmd->bm = NULL; |
1855 | goto out_unlock; |
1856 | } |
1857 | new_bm = NULL; |
1858 | out_unlock: |
1859 | if (r) |
1860 | cmd->fail_io = true; |
1861 | WRITE_UNLOCK(cmd); |
1862 | dm_block_manager_destroy(bm: old_bm); |
1863 | out: |
1864 | if (new_bm && !IS_ERR(ptr: new_bm)) |
1865 | dm_block_manager_destroy(bm: new_bm); |
1866 | |
1867 | return r; |
1868 | } |
1869 | |