1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2017 Western Digital Corporation or its affiliates. |
4 | * |
5 | * This file is released under the GPL. |
6 | */ |
7 | |
8 | #include "dm-zoned.h" |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/crc32.h> |
12 | #include <linux/sched/mm.h> |
13 | |
14 | #define DM_MSG_PREFIX "zoned metadata" |
15 | |
16 | /* |
17 | * Metadata version. |
18 | */ |
19 | #define DMZ_META_VER 2 |
20 | |
21 | /* |
22 | * On-disk super block magic. |
23 | */ |
24 | #define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \ |
25 | (((unsigned int)('Z')) << 16) | \ |
26 | (((unsigned int)('B')) << 8) | \ |
27 | ((unsigned int)('D'))) |
28 | |
29 | /* |
30 | * On disk super block. |
31 | * This uses only 512 B but uses on disk a full 4KB block. This block is |
32 | * followed on disk by the mapping table of chunks to zones and the bitmap |
33 | * blocks indicating zone block validity. |
34 | * The overall resulting metadata format is: |
35 | * (1) Super block (1 block) |
36 | * (2) Chunk mapping table (nr_map_blocks) |
37 | * (3) Bitmap blocks (nr_bitmap_blocks) |
38 | * All metadata blocks are stored in conventional zones, starting from |
39 | * the first conventional zone found on disk. |
40 | */ |
41 | struct dmz_super { |
42 | /* Magic number */ |
43 | __le32 magic; /* 4 */ |
44 | |
45 | /* Metadata version number */ |
46 | __le32 version; /* 8 */ |
47 | |
48 | /* Generation number */ |
49 | __le64 gen; /* 16 */ |
50 | |
51 | /* This block number */ |
52 | __le64 sb_block; /* 24 */ |
53 | |
54 | /* The number of metadata blocks, including this super block */ |
55 | __le32 nr_meta_blocks; /* 28 */ |
56 | |
57 | /* The number of sequential zones reserved for reclaim */ |
58 | __le32 nr_reserved_seq; /* 32 */ |
59 | |
60 | /* The number of entries in the mapping table */ |
61 | __le32 nr_chunks; /* 36 */ |
62 | |
63 | /* The number of blocks used for the chunk mapping table */ |
64 | __le32 nr_map_blocks; /* 40 */ |
65 | |
66 | /* The number of blocks used for the block bitmaps */ |
67 | __le32 nr_bitmap_blocks; /* 44 */ |
68 | |
69 | /* Checksum */ |
70 | __le32 crc; /* 48 */ |
71 | |
72 | /* DM-Zoned label */ |
73 | u8 dmz_label[32]; /* 80 */ |
74 | |
75 | /* DM-Zoned UUID */ |
76 | u8 dmz_uuid[16]; /* 96 */ |
77 | |
78 | /* Device UUID */ |
79 | u8 dev_uuid[16]; /* 112 */ |
80 | |
81 | /* Padding to full 512B sector */ |
82 | u8 reserved[400]; /* 512 */ |
83 | }; |
84 | |
85 | /* |
86 | * Chunk mapping entry: entries are indexed by chunk number |
87 | * and give the zone ID (dzone_id) mapping the chunk on disk. |
88 | * This zone may be sequential or random. If it is a sequential |
89 | * zone, a second zone (bzone_id) used as a write buffer may |
90 | * also be specified. This second zone will always be a randomly |
91 | * writeable zone. |
92 | */ |
93 | struct dmz_map { |
94 | __le32 dzone_id; |
95 | __le32 bzone_id; |
96 | }; |
97 | |
98 | /* |
99 | * Chunk mapping table metadata: 512 8-bytes entries per 4KB block. |
100 | */ |
101 | #define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map)) |
102 | #define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES)) |
103 | #define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1) |
104 | #define DMZ_MAP_UNMAPPED UINT_MAX |
105 | |
106 | /* |
107 | * Meta data block descriptor (for cached metadata blocks). |
108 | */ |
109 | struct dmz_mblock { |
110 | struct rb_node node; |
111 | struct list_head link; |
112 | sector_t no; |
113 | unsigned int ref; |
114 | unsigned long state; |
115 | struct page *page; |
116 | void *data; |
117 | }; |
118 | |
119 | /* |
120 | * Metadata block state flags. |
121 | */ |
122 | enum { |
123 | DMZ_META_DIRTY, |
124 | DMZ_META_READING, |
125 | DMZ_META_WRITING, |
126 | DMZ_META_ERROR, |
127 | }; |
128 | |
129 | /* |
130 | * Super block information (one per metadata set). |
131 | */ |
132 | struct dmz_sb { |
133 | sector_t block; |
134 | struct dmz_dev *dev; |
135 | struct dmz_mblock *mblk; |
136 | struct dmz_super *sb; |
137 | struct dm_zone *zone; |
138 | }; |
139 | |
140 | /* |
141 | * In-memory metadata. |
142 | */ |
143 | struct dmz_metadata { |
144 | struct dmz_dev *dev; |
145 | unsigned int nr_devs; |
146 | |
147 | char devname[BDEVNAME_SIZE]; |
148 | char label[BDEVNAME_SIZE]; |
149 | uuid_t uuid; |
150 | |
151 | sector_t zone_bitmap_size; |
152 | unsigned int zone_nr_bitmap_blocks; |
153 | unsigned int zone_bits_per_mblk; |
154 | |
155 | sector_t zone_nr_blocks; |
156 | sector_t zone_nr_blocks_shift; |
157 | |
158 | sector_t zone_nr_sectors; |
159 | sector_t zone_nr_sectors_shift; |
160 | |
161 | unsigned int nr_bitmap_blocks; |
162 | unsigned int nr_map_blocks; |
163 | |
164 | unsigned int nr_zones; |
165 | unsigned int nr_useable_zones; |
166 | unsigned int nr_meta_blocks; |
167 | unsigned int nr_meta_zones; |
168 | unsigned int nr_data_zones; |
169 | unsigned int nr_cache_zones; |
170 | unsigned int nr_rnd_zones; |
171 | unsigned int nr_reserved_seq; |
172 | unsigned int nr_chunks; |
173 | |
174 | /* Zone information array */ |
175 | struct xarray zones; |
176 | |
177 | struct dmz_sb sb[2]; |
178 | unsigned int mblk_primary; |
179 | unsigned int sb_version; |
180 | u64 sb_gen; |
181 | unsigned int min_nr_mblks; |
182 | unsigned int max_nr_mblks; |
183 | atomic_t nr_mblks; |
184 | struct rw_semaphore mblk_sem; |
185 | struct mutex mblk_flush_lock; |
186 | spinlock_t mblk_lock; |
187 | struct rb_root mblk_rbtree; |
188 | struct list_head mblk_lru_list; |
189 | struct list_head mblk_dirty_list; |
190 | struct shrinker *mblk_shrinker; |
191 | |
192 | /* Zone allocation management */ |
193 | struct mutex map_lock; |
194 | struct dmz_mblock **map_mblk; |
195 | |
196 | unsigned int nr_cache; |
197 | atomic_t unmap_nr_cache; |
198 | struct list_head unmap_cache_list; |
199 | struct list_head map_cache_list; |
200 | |
201 | atomic_t nr_reserved_seq_zones; |
202 | struct list_head reserved_seq_zones_list; |
203 | |
204 | wait_queue_head_t free_wq; |
205 | }; |
206 | |
207 | #define dmz_zmd_info(zmd, format, args...) \ |
208 | DMINFO("(%s): " format, (zmd)->label, ## args) |
209 | |
210 | #define dmz_zmd_err(zmd, format, args...) \ |
211 | DMERR("(%s): " format, (zmd)->label, ## args) |
212 | |
213 | #define dmz_zmd_warn(zmd, format, args...) \ |
214 | DMWARN("(%s): " format, (zmd)->label, ## args) |
215 | |
216 | #define dmz_zmd_debug(zmd, format, args...) \ |
217 | DMDEBUG("(%s): " format, (zmd)->label, ## args) |
218 | /* |
219 | * Various accessors |
220 | */ |
221 | static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone) |
222 | { |
223 | if (WARN_ON(!zone)) |
224 | return 0; |
225 | |
226 | return zone->id - zone->dev->zone_offset; |
227 | } |
228 | |
229 | sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone) |
230 | { |
231 | unsigned int zone_id = dmz_dev_zone_id(zmd, zone); |
232 | |
233 | return (sector_t)zone_id << zmd->zone_nr_sectors_shift; |
234 | } |
235 | |
236 | sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone) |
237 | { |
238 | unsigned int zone_id = dmz_dev_zone_id(zmd, zone); |
239 | |
240 | return (sector_t)zone_id << zmd->zone_nr_blocks_shift; |
241 | } |
242 | |
243 | unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd) |
244 | { |
245 | return zmd->zone_nr_blocks; |
246 | } |
247 | |
248 | unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd) |
249 | { |
250 | return zmd->zone_nr_blocks_shift; |
251 | } |
252 | |
253 | unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd) |
254 | { |
255 | return zmd->zone_nr_sectors; |
256 | } |
257 | |
258 | unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd) |
259 | { |
260 | return zmd->zone_nr_sectors_shift; |
261 | } |
262 | |
263 | unsigned int dmz_nr_zones(struct dmz_metadata *zmd) |
264 | { |
265 | return zmd->nr_zones; |
266 | } |
267 | |
268 | unsigned int dmz_nr_chunks(struct dmz_metadata *zmd) |
269 | { |
270 | return zmd->nr_chunks; |
271 | } |
272 | |
273 | unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx) |
274 | { |
275 | return zmd->dev[idx].nr_rnd; |
276 | } |
277 | |
278 | unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx) |
279 | { |
280 | return atomic_read(v: &zmd->dev[idx].unmap_nr_rnd); |
281 | } |
282 | |
283 | unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd) |
284 | { |
285 | return zmd->nr_cache; |
286 | } |
287 | |
288 | unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd) |
289 | { |
290 | return atomic_read(v: &zmd->unmap_nr_cache); |
291 | } |
292 | |
293 | unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx) |
294 | { |
295 | return zmd->dev[idx].nr_seq; |
296 | } |
297 | |
298 | unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx) |
299 | { |
300 | return atomic_read(v: &zmd->dev[idx].unmap_nr_seq); |
301 | } |
302 | |
303 | static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id) |
304 | { |
305 | return xa_load(&zmd->zones, index: zone_id); |
306 | } |
307 | |
308 | static struct dm_zone *dmz_insert(struct dmz_metadata *zmd, |
309 | unsigned int zone_id, struct dmz_dev *dev) |
310 | { |
311 | struct dm_zone *zone = kzalloc(size: sizeof(struct dm_zone), GFP_KERNEL); |
312 | |
313 | if (!zone) |
314 | return ERR_PTR(error: -ENOMEM); |
315 | |
316 | if (xa_insert(xa: &zmd->zones, index: zone_id, entry: zone, GFP_KERNEL)) { |
317 | kfree(objp: zone); |
318 | return ERR_PTR(error: -EBUSY); |
319 | } |
320 | |
321 | INIT_LIST_HEAD(list: &zone->link); |
322 | atomic_set(v: &zone->refcount, i: 0); |
323 | zone->id = zone_id; |
324 | zone->chunk = DMZ_MAP_UNMAPPED; |
325 | zone->dev = dev; |
326 | |
327 | return zone; |
328 | } |
329 | |
330 | const char *dmz_metadata_label(struct dmz_metadata *zmd) |
331 | { |
332 | return (const char *)zmd->label; |
333 | } |
334 | |
335 | bool dmz_check_dev(struct dmz_metadata *zmd) |
336 | { |
337 | unsigned int i; |
338 | |
339 | for (i = 0; i < zmd->nr_devs; i++) { |
340 | if (!dmz_check_bdev(dmz_dev: &zmd->dev[i])) |
341 | return false; |
342 | } |
343 | return true; |
344 | } |
345 | |
346 | bool dmz_dev_is_dying(struct dmz_metadata *zmd) |
347 | { |
348 | unsigned int i; |
349 | |
350 | for (i = 0; i < zmd->nr_devs; i++) { |
351 | if (dmz_bdev_is_dying(dmz_dev: &zmd->dev[i])) |
352 | return true; |
353 | } |
354 | return false; |
355 | } |
356 | |
357 | /* |
358 | * Lock/unlock mapping table. |
359 | * The map lock also protects all the zone lists. |
360 | */ |
361 | void dmz_lock_map(struct dmz_metadata *zmd) |
362 | { |
363 | mutex_lock(&zmd->map_lock); |
364 | } |
365 | |
366 | void dmz_unlock_map(struct dmz_metadata *zmd) |
367 | { |
368 | mutex_unlock(lock: &zmd->map_lock); |
369 | } |
370 | |
371 | /* |
372 | * Lock/unlock metadata access. This is a "read" lock on a semaphore |
373 | * that prevents metadata flush from running while metadata are being |
374 | * modified. The actual metadata write mutual exclusion is achieved with |
375 | * the map lock and zone state management (active and reclaim state are |
376 | * mutually exclusive). |
377 | */ |
378 | void dmz_lock_metadata(struct dmz_metadata *zmd) |
379 | { |
380 | down_read(sem: &zmd->mblk_sem); |
381 | } |
382 | |
383 | void dmz_unlock_metadata(struct dmz_metadata *zmd) |
384 | { |
385 | up_read(sem: &zmd->mblk_sem); |
386 | } |
387 | |
388 | /* |
389 | * Lock/unlock flush: prevent concurrent executions |
390 | * of dmz_flush_metadata as well as metadata modification in reclaim |
391 | * while flush is being executed. |
392 | */ |
393 | void dmz_lock_flush(struct dmz_metadata *zmd) |
394 | { |
395 | mutex_lock(&zmd->mblk_flush_lock); |
396 | } |
397 | |
398 | void dmz_unlock_flush(struct dmz_metadata *zmd) |
399 | { |
400 | mutex_unlock(lock: &zmd->mblk_flush_lock); |
401 | } |
402 | |
403 | /* |
404 | * Allocate a metadata block. |
405 | */ |
406 | static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, |
407 | sector_t mblk_no) |
408 | { |
409 | struct dmz_mblock *mblk = NULL; |
410 | |
411 | /* See if we can reuse cached blocks */ |
412 | if (zmd->max_nr_mblks && atomic_read(v: &zmd->nr_mblks) > zmd->max_nr_mblks) { |
413 | spin_lock(lock: &zmd->mblk_lock); |
414 | mblk = list_first_entry_or_null(&zmd->mblk_lru_list, |
415 | struct dmz_mblock, link); |
416 | if (mblk) { |
417 | list_del_init(entry: &mblk->link); |
418 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
419 | mblk->no = mblk_no; |
420 | } |
421 | spin_unlock(lock: &zmd->mblk_lock); |
422 | if (mblk) |
423 | return mblk; |
424 | } |
425 | |
426 | /* Allocate a new block */ |
427 | mblk = kmalloc(size: sizeof(struct dmz_mblock), GFP_NOIO); |
428 | if (!mblk) |
429 | return NULL; |
430 | |
431 | mblk->page = alloc_page(GFP_NOIO); |
432 | if (!mblk->page) { |
433 | kfree(objp: mblk); |
434 | return NULL; |
435 | } |
436 | |
437 | RB_CLEAR_NODE(&mblk->node); |
438 | INIT_LIST_HEAD(list: &mblk->link); |
439 | mblk->ref = 0; |
440 | mblk->state = 0; |
441 | mblk->no = mblk_no; |
442 | mblk->data = page_address(mblk->page); |
443 | |
444 | atomic_inc(v: &zmd->nr_mblks); |
445 | |
446 | return mblk; |
447 | } |
448 | |
449 | /* |
450 | * Free a metadata block. |
451 | */ |
452 | static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) |
453 | { |
454 | __free_pages(page: mblk->page, order: 0); |
455 | kfree(objp: mblk); |
456 | |
457 | atomic_dec(v: &zmd->nr_mblks); |
458 | } |
459 | |
460 | /* |
461 | * Insert a metadata block in the rbtree. |
462 | */ |
463 | static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) |
464 | { |
465 | struct rb_root *root = &zmd->mblk_rbtree; |
466 | struct rb_node **new = &(root->rb_node), *parent = NULL; |
467 | struct dmz_mblock *b; |
468 | |
469 | /* Figure out where to put the new node */ |
470 | while (*new) { |
471 | b = container_of(*new, struct dmz_mblock, node); |
472 | parent = *new; |
473 | new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right); |
474 | } |
475 | |
476 | /* Add new node and rebalance tree */ |
477 | rb_link_node(node: &mblk->node, parent, rb_link: new); |
478 | rb_insert_color(&mblk->node, root); |
479 | } |
480 | |
481 | /* |
482 | * Lookup a metadata block in the rbtree. If the block is found, increment |
483 | * its reference count. |
484 | */ |
485 | static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd, |
486 | sector_t mblk_no) |
487 | { |
488 | struct rb_root *root = &zmd->mblk_rbtree; |
489 | struct rb_node *node = root->rb_node; |
490 | struct dmz_mblock *mblk; |
491 | |
492 | while (node) { |
493 | mblk = container_of(node, struct dmz_mblock, node); |
494 | if (mblk->no == mblk_no) { |
495 | /* |
496 | * If this is the first reference to the block, |
497 | * remove it from the LRU list. |
498 | */ |
499 | mblk->ref++; |
500 | if (mblk->ref == 1 && |
501 | !test_bit(DMZ_META_DIRTY, &mblk->state)) |
502 | list_del_init(entry: &mblk->link); |
503 | return mblk; |
504 | } |
505 | node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right; |
506 | } |
507 | |
508 | return NULL; |
509 | } |
510 | |
511 | /* |
512 | * Metadata block BIO end callback. |
513 | */ |
514 | static void dmz_mblock_bio_end_io(struct bio *bio) |
515 | { |
516 | struct dmz_mblock *mblk = bio->bi_private; |
517 | int flag; |
518 | |
519 | if (bio->bi_status) |
520 | set_bit(nr: DMZ_META_ERROR, addr: &mblk->state); |
521 | |
522 | if (bio_op(bio) == REQ_OP_WRITE) |
523 | flag = DMZ_META_WRITING; |
524 | else |
525 | flag = DMZ_META_READING; |
526 | |
527 | clear_bit_unlock(nr: flag, addr: &mblk->state); |
528 | smp_mb__after_atomic(); |
529 | wake_up_bit(word: &mblk->state, bit: flag); |
530 | |
531 | bio_put(bio); |
532 | } |
533 | |
534 | /* |
535 | * Read an uncached metadata block from disk and add it to the cache. |
536 | */ |
537 | static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd, |
538 | sector_t mblk_no) |
539 | { |
540 | struct dmz_mblock *mblk, *m; |
541 | sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no; |
542 | struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev; |
543 | struct bio *bio; |
544 | |
545 | if (dmz_bdev_is_dying(dmz_dev: dev)) |
546 | return ERR_PTR(error: -EIO); |
547 | |
548 | /* Get a new block and a BIO to read it */ |
549 | mblk = dmz_alloc_mblock(zmd, mblk_no); |
550 | if (!mblk) |
551 | return ERR_PTR(error: -ENOMEM); |
552 | |
553 | bio = bio_alloc(bdev: dev->bdev, nr_vecs: 1, opf: REQ_OP_READ | REQ_META | REQ_PRIO, |
554 | GFP_NOIO); |
555 | |
556 | spin_lock(lock: &zmd->mblk_lock); |
557 | |
558 | /* |
559 | * Make sure that another context did not start reading |
560 | * the block already. |
561 | */ |
562 | m = dmz_get_mblock_fast(zmd, mblk_no); |
563 | if (m) { |
564 | spin_unlock(lock: &zmd->mblk_lock); |
565 | dmz_free_mblock(zmd, mblk); |
566 | bio_put(bio); |
567 | return m; |
568 | } |
569 | |
570 | mblk->ref++; |
571 | set_bit(nr: DMZ_META_READING, addr: &mblk->state); |
572 | dmz_insert_mblock(zmd, mblk); |
573 | |
574 | spin_unlock(lock: &zmd->mblk_lock); |
575 | |
576 | /* Submit read BIO */ |
577 | bio->bi_iter.bi_sector = dmz_blk2sect(block); |
578 | bio->bi_private = mblk; |
579 | bio->bi_end_io = dmz_mblock_bio_end_io; |
580 | __bio_add_page(bio, page: mblk->page, DMZ_BLOCK_SIZE, off: 0); |
581 | submit_bio(bio); |
582 | |
583 | return mblk; |
584 | } |
585 | |
586 | /* |
587 | * Free metadata blocks. |
588 | */ |
589 | static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd, |
590 | unsigned long limit) |
591 | { |
592 | struct dmz_mblock *mblk; |
593 | unsigned long count = 0; |
594 | |
595 | if (!zmd->max_nr_mblks) |
596 | return 0; |
597 | |
598 | while (!list_empty(head: &zmd->mblk_lru_list) && |
599 | atomic_read(v: &zmd->nr_mblks) > zmd->min_nr_mblks && |
600 | count < limit) { |
601 | mblk = list_first_entry(&zmd->mblk_lru_list, |
602 | struct dmz_mblock, link); |
603 | list_del_init(entry: &mblk->link); |
604 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
605 | dmz_free_mblock(zmd, mblk); |
606 | count++; |
607 | } |
608 | |
609 | return count; |
610 | } |
611 | |
612 | /* |
613 | * For mblock shrinker: get the number of unused metadata blocks in the cache. |
614 | */ |
615 | static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink, |
616 | struct shrink_control *sc) |
617 | { |
618 | struct dmz_metadata *zmd = shrink->private_data; |
619 | |
620 | return atomic_read(v: &zmd->nr_mblks); |
621 | } |
622 | |
623 | /* |
624 | * For mblock shrinker: scan unused metadata blocks and shrink the cache. |
625 | */ |
626 | static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink, |
627 | struct shrink_control *sc) |
628 | { |
629 | struct dmz_metadata *zmd = shrink->private_data; |
630 | unsigned long count; |
631 | |
632 | spin_lock(lock: &zmd->mblk_lock); |
633 | count = dmz_shrink_mblock_cache(zmd, limit: sc->nr_to_scan); |
634 | spin_unlock(lock: &zmd->mblk_lock); |
635 | |
636 | return count ? count : SHRINK_STOP; |
637 | } |
638 | |
639 | /* |
640 | * Release a metadata block. |
641 | */ |
642 | static void dmz_release_mblock(struct dmz_metadata *zmd, |
643 | struct dmz_mblock *mblk) |
644 | { |
645 | |
646 | if (!mblk) |
647 | return; |
648 | |
649 | spin_lock(lock: &zmd->mblk_lock); |
650 | |
651 | mblk->ref--; |
652 | if (mblk->ref == 0) { |
653 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { |
654 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
655 | dmz_free_mblock(zmd, mblk); |
656 | } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) { |
657 | list_add_tail(new: &mblk->link, head: &zmd->mblk_lru_list); |
658 | dmz_shrink_mblock_cache(zmd, limit: 1); |
659 | } |
660 | } |
661 | |
662 | spin_unlock(lock: &zmd->mblk_lock); |
663 | } |
664 | |
665 | /* |
666 | * Get a metadata block from the rbtree. If the block |
667 | * is not present, read it from disk. |
668 | */ |
669 | static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, |
670 | sector_t mblk_no) |
671 | { |
672 | struct dmz_mblock *mblk; |
673 | struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev; |
674 | |
675 | /* Check rbtree */ |
676 | spin_lock(lock: &zmd->mblk_lock); |
677 | mblk = dmz_get_mblock_fast(zmd, mblk_no); |
678 | spin_unlock(lock: &zmd->mblk_lock); |
679 | |
680 | if (!mblk) { |
681 | /* Cache miss: read the block from disk */ |
682 | mblk = dmz_get_mblock_slow(zmd, mblk_no); |
683 | if (IS_ERR(ptr: mblk)) |
684 | return mblk; |
685 | } |
686 | |
687 | /* Wait for on-going read I/O and check for error */ |
688 | wait_on_bit_io(word: &mblk->state, bit: DMZ_META_READING, |
689 | TASK_UNINTERRUPTIBLE); |
690 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { |
691 | dmz_release_mblock(zmd, mblk); |
692 | dmz_check_bdev(dmz_dev: dev); |
693 | return ERR_PTR(error: -EIO); |
694 | } |
695 | |
696 | return mblk; |
697 | } |
698 | |
699 | /* |
700 | * Mark a metadata block dirty. |
701 | */ |
702 | static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk) |
703 | { |
704 | spin_lock(lock: &zmd->mblk_lock); |
705 | if (!test_and_set_bit(nr: DMZ_META_DIRTY, addr: &mblk->state)) |
706 | list_add_tail(new: &mblk->link, head: &zmd->mblk_dirty_list); |
707 | spin_unlock(lock: &zmd->mblk_lock); |
708 | } |
709 | |
710 | /* |
711 | * Issue a metadata block write BIO. |
712 | */ |
713 | static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk, |
714 | unsigned int set) |
715 | { |
716 | struct dmz_dev *dev = zmd->sb[set].dev; |
717 | sector_t block = zmd->sb[set].block + mblk->no; |
718 | struct bio *bio; |
719 | |
720 | if (dmz_bdev_is_dying(dmz_dev: dev)) |
721 | return -EIO; |
722 | |
723 | bio = bio_alloc(bdev: dev->bdev, nr_vecs: 1, opf: REQ_OP_WRITE | REQ_META | REQ_PRIO, |
724 | GFP_NOIO); |
725 | |
726 | set_bit(nr: DMZ_META_WRITING, addr: &mblk->state); |
727 | |
728 | bio->bi_iter.bi_sector = dmz_blk2sect(block); |
729 | bio->bi_private = mblk; |
730 | bio->bi_end_io = dmz_mblock_bio_end_io; |
731 | __bio_add_page(bio, page: mblk->page, DMZ_BLOCK_SIZE, off: 0); |
732 | submit_bio(bio); |
733 | |
734 | return 0; |
735 | } |
736 | |
737 | /* |
738 | * Read/write a metadata block. |
739 | */ |
740 | static int dmz_rdwr_block(struct dmz_dev *dev, enum req_op op, |
741 | sector_t block, struct page *page) |
742 | { |
743 | struct bio *bio; |
744 | int ret; |
745 | |
746 | if (WARN_ON(!dev)) |
747 | return -EIO; |
748 | |
749 | if (dmz_bdev_is_dying(dmz_dev: dev)) |
750 | return -EIO; |
751 | |
752 | bio = bio_alloc(bdev: dev->bdev, nr_vecs: 1, opf: op | REQ_SYNC | REQ_META | REQ_PRIO, |
753 | GFP_NOIO); |
754 | bio->bi_iter.bi_sector = dmz_blk2sect(block); |
755 | __bio_add_page(bio, page, DMZ_BLOCK_SIZE, off: 0); |
756 | ret = submit_bio_wait(bio); |
757 | bio_put(bio); |
758 | |
759 | if (ret) |
760 | dmz_check_bdev(dmz_dev: dev); |
761 | return ret; |
762 | } |
763 | |
764 | /* |
765 | * Write super block of the specified metadata set. |
766 | */ |
767 | static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) |
768 | { |
769 | struct dmz_mblock *mblk = zmd->sb[set].mblk; |
770 | struct dmz_super *sb = zmd->sb[set].sb; |
771 | struct dmz_dev *dev = zmd->sb[set].dev; |
772 | sector_t sb_block; |
773 | u64 sb_gen = zmd->sb_gen + 1; |
774 | int ret; |
775 | |
776 | sb->magic = cpu_to_le32(DMZ_MAGIC); |
777 | |
778 | sb->version = cpu_to_le32(zmd->sb_version); |
779 | if (zmd->sb_version > 1) { |
780 | BUILD_BUG_ON(UUID_SIZE != 16); |
781 | export_uuid(dst: sb->dmz_uuid, src: &zmd->uuid); |
782 | memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE); |
783 | export_uuid(dst: sb->dev_uuid, src: &dev->uuid); |
784 | } |
785 | |
786 | sb->gen = cpu_to_le64(sb_gen); |
787 | |
788 | /* |
789 | * The metadata always references the absolute block address, |
790 | * ie relative to the entire block range, not the per-device |
791 | * block address. |
792 | */ |
793 | sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift; |
794 | sb->sb_block = cpu_to_le64(sb_block); |
795 | sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks); |
796 | sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq); |
797 | sb->nr_chunks = cpu_to_le32(zmd->nr_chunks); |
798 | |
799 | sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks); |
800 | sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks); |
801 | |
802 | sb->crc = 0; |
803 | sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE)); |
804 | |
805 | ret = dmz_rdwr_block(dev, op: REQ_OP_WRITE, block: zmd->sb[set].block, |
806 | page: mblk->page); |
807 | if (ret == 0) |
808 | ret = blkdev_issue_flush(bdev: dev->bdev); |
809 | |
810 | return ret; |
811 | } |
812 | |
813 | /* |
814 | * Write dirty metadata blocks to the specified set. |
815 | */ |
816 | static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, |
817 | struct list_head *write_list, |
818 | unsigned int set) |
819 | { |
820 | struct dmz_mblock *mblk; |
821 | struct dmz_dev *dev = zmd->sb[set].dev; |
822 | struct blk_plug plug; |
823 | int ret = 0, nr_mblks_submitted = 0; |
824 | |
825 | /* Issue writes */ |
826 | blk_start_plug(&plug); |
827 | list_for_each_entry(mblk, write_list, link) { |
828 | ret = dmz_write_mblock(zmd, mblk, set); |
829 | if (ret) |
830 | break; |
831 | nr_mblks_submitted++; |
832 | } |
833 | blk_finish_plug(&plug); |
834 | |
835 | /* Wait for completion */ |
836 | list_for_each_entry(mblk, write_list, link) { |
837 | if (!nr_mblks_submitted) |
838 | break; |
839 | wait_on_bit_io(word: &mblk->state, bit: DMZ_META_WRITING, |
840 | TASK_UNINTERRUPTIBLE); |
841 | if (test_bit(DMZ_META_ERROR, &mblk->state)) { |
842 | clear_bit(nr: DMZ_META_ERROR, addr: &mblk->state); |
843 | dmz_check_bdev(dmz_dev: dev); |
844 | ret = -EIO; |
845 | } |
846 | nr_mblks_submitted--; |
847 | } |
848 | |
849 | /* Flush drive cache (this will also sync data) */ |
850 | if (ret == 0) |
851 | ret = blkdev_issue_flush(bdev: dev->bdev); |
852 | |
853 | return ret; |
854 | } |
855 | |
856 | /* |
857 | * Log dirty metadata blocks. |
858 | */ |
859 | static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd, |
860 | struct list_head *write_list) |
861 | { |
862 | unsigned int log_set = zmd->mblk_primary ^ 0x1; |
863 | int ret; |
864 | |
865 | /* Write dirty blocks to the log */ |
866 | ret = dmz_write_dirty_mblocks(zmd, write_list, set: log_set); |
867 | if (ret) |
868 | return ret; |
869 | |
870 | /* |
871 | * No error so far: now validate the log by updating the |
872 | * log index super block generation. |
873 | */ |
874 | ret = dmz_write_sb(zmd, set: log_set); |
875 | if (ret) |
876 | return ret; |
877 | |
878 | return 0; |
879 | } |
880 | |
881 | /* |
882 | * Flush dirty metadata blocks. |
883 | */ |
884 | int dmz_flush_metadata(struct dmz_metadata *zmd) |
885 | { |
886 | struct dmz_mblock *mblk; |
887 | struct list_head write_list; |
888 | struct dmz_dev *dev; |
889 | int ret; |
890 | |
891 | if (WARN_ON(!zmd)) |
892 | return 0; |
893 | |
894 | INIT_LIST_HEAD(list: &write_list); |
895 | |
896 | /* |
897 | * Make sure that metadata blocks are stable before logging: take |
898 | * the write lock on the metadata semaphore to prevent target BIOs |
899 | * from modifying metadata. |
900 | */ |
901 | down_write(sem: &zmd->mblk_sem); |
902 | dev = zmd->sb[zmd->mblk_primary].dev; |
903 | |
904 | /* |
905 | * This is called from the target flush work and reclaim work. |
906 | * Concurrent execution is not allowed. |
907 | */ |
908 | dmz_lock_flush(zmd); |
909 | |
910 | if (dmz_bdev_is_dying(dmz_dev: dev)) { |
911 | ret = -EIO; |
912 | goto out; |
913 | } |
914 | |
915 | /* Get dirty blocks */ |
916 | spin_lock(lock: &zmd->mblk_lock); |
917 | list_splice_init(list: &zmd->mblk_dirty_list, head: &write_list); |
918 | spin_unlock(lock: &zmd->mblk_lock); |
919 | |
920 | /* If there are no dirty metadata blocks, just flush the device cache */ |
921 | if (list_empty(head: &write_list)) { |
922 | ret = blkdev_issue_flush(bdev: dev->bdev); |
923 | goto err; |
924 | } |
925 | |
926 | /* |
927 | * The primary metadata set is still clean. Keep it this way until |
928 | * all updates are successful in the secondary set. That is, use |
929 | * the secondary set as a log. |
930 | */ |
931 | ret = dmz_log_dirty_mblocks(zmd, write_list: &write_list); |
932 | if (ret) |
933 | goto err; |
934 | |
935 | /* |
936 | * The log is on disk. It is now safe to update in place |
937 | * in the primary metadata set. |
938 | */ |
939 | ret = dmz_write_dirty_mblocks(zmd, write_list: &write_list, set: zmd->mblk_primary); |
940 | if (ret) |
941 | goto err; |
942 | |
943 | ret = dmz_write_sb(zmd, set: zmd->mblk_primary); |
944 | if (ret) |
945 | goto err; |
946 | |
947 | while (!list_empty(head: &write_list)) { |
948 | mblk = list_first_entry(&write_list, struct dmz_mblock, link); |
949 | list_del_init(entry: &mblk->link); |
950 | |
951 | spin_lock(lock: &zmd->mblk_lock); |
952 | clear_bit(nr: DMZ_META_DIRTY, addr: &mblk->state); |
953 | if (mblk->ref == 0) |
954 | list_add_tail(new: &mblk->link, head: &zmd->mblk_lru_list); |
955 | spin_unlock(lock: &zmd->mblk_lock); |
956 | } |
957 | |
958 | zmd->sb_gen++; |
959 | out: |
960 | dmz_unlock_flush(zmd); |
961 | up_write(sem: &zmd->mblk_sem); |
962 | |
963 | return ret; |
964 | |
965 | err: |
966 | if (!list_empty(head: &write_list)) { |
967 | spin_lock(lock: &zmd->mblk_lock); |
968 | list_splice(list: &write_list, head: &zmd->mblk_dirty_list); |
969 | spin_unlock(lock: &zmd->mblk_lock); |
970 | } |
971 | if (!dmz_check_bdev(dmz_dev: dev)) |
972 | ret = -EIO; |
973 | goto out; |
974 | } |
975 | |
976 | /* |
977 | * Check super block. |
978 | */ |
979 | static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb, |
980 | bool tertiary) |
981 | { |
982 | struct dmz_super *sb = dsb->sb; |
983 | struct dmz_dev *dev = dsb->dev; |
984 | unsigned int nr_meta_zones, nr_data_zones; |
985 | u32 crc, stored_crc; |
986 | u64 gen, sb_block; |
987 | |
988 | if (le32_to_cpu(sb->magic) != DMZ_MAGIC) { |
989 | dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)" , |
990 | DMZ_MAGIC, le32_to_cpu(sb->magic)); |
991 | return -ENXIO; |
992 | } |
993 | |
994 | zmd->sb_version = le32_to_cpu(sb->version); |
995 | if (zmd->sb_version > DMZ_META_VER) { |
996 | dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)" , |
997 | DMZ_META_VER, zmd->sb_version); |
998 | return -EINVAL; |
999 | } |
1000 | if (zmd->sb_version < 2 && tertiary) { |
1001 | dmz_dev_err(dev, "Tertiary superblocks are not supported" ); |
1002 | return -EINVAL; |
1003 | } |
1004 | |
1005 | gen = le64_to_cpu(sb->gen); |
1006 | stored_crc = le32_to_cpu(sb->crc); |
1007 | sb->crc = 0; |
1008 | crc = crc32_le(crc: gen, p: (unsigned char *)sb, DMZ_BLOCK_SIZE); |
1009 | if (crc != stored_crc) { |
1010 | dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)" , |
1011 | crc, stored_crc); |
1012 | return -ENXIO; |
1013 | } |
1014 | |
1015 | sb_block = le64_to_cpu(sb->sb_block); |
1016 | if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) { |
1017 | dmz_dev_err(dev, "Invalid superblock position (is %llu expected %llu)" , |
1018 | sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift); |
1019 | return -EINVAL; |
1020 | } |
1021 | if (zmd->sb_version > 1) { |
1022 | uuid_t sb_uuid; |
1023 | |
1024 | import_uuid(dst: &sb_uuid, src: sb->dmz_uuid); |
1025 | if (uuid_is_null(uuid: &sb_uuid)) { |
1026 | dmz_dev_err(dev, "NULL DM-Zoned uuid" ); |
1027 | return -ENXIO; |
1028 | } else if (uuid_is_null(uuid: &zmd->uuid)) { |
1029 | uuid_copy(dst: &zmd->uuid, src: &sb_uuid); |
1030 | } else if (!uuid_equal(u1: &zmd->uuid, u2: &sb_uuid)) { |
1031 | dmz_dev_err(dev, "mismatching DM-Zoned uuid, is %pUl expected %pUl" , |
1032 | &sb_uuid, &zmd->uuid); |
1033 | return -ENXIO; |
1034 | } |
1035 | if (!strlen(zmd->label)) |
1036 | memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE); |
1037 | else if (memcmp(p: zmd->label, q: sb->dmz_label, BDEVNAME_SIZE)) { |
1038 | dmz_dev_err(dev, "mismatching DM-Zoned label, is %s expected %s" , |
1039 | sb->dmz_label, zmd->label); |
1040 | return -ENXIO; |
1041 | } |
1042 | import_uuid(dst: &dev->uuid, src: sb->dev_uuid); |
1043 | if (uuid_is_null(uuid: &dev->uuid)) { |
1044 | dmz_dev_err(dev, "NULL device uuid" ); |
1045 | return -ENXIO; |
1046 | } |
1047 | |
1048 | if (tertiary) { |
1049 | /* |
1050 | * Generation number should be 0, but it doesn't |
1051 | * really matter if it isn't. |
1052 | */ |
1053 | if (gen != 0) |
1054 | dmz_dev_warn(dev, "Invalid generation %llu" , |
1055 | gen); |
1056 | return 0; |
1057 | } |
1058 | } |
1059 | |
1060 | nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1) |
1061 | >> zmd->zone_nr_blocks_shift; |
1062 | if (!nr_meta_zones || |
1063 | (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) || |
1064 | (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) { |
1065 | dmz_dev_err(dev, "Invalid number of metadata blocks" ); |
1066 | return -ENXIO; |
1067 | } |
1068 | |
1069 | if (!le32_to_cpu(sb->nr_reserved_seq) || |
1070 | le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) { |
1071 | dmz_dev_err(dev, "Invalid number of reserved sequential zones" ); |
1072 | return -ENXIO; |
1073 | } |
1074 | |
1075 | nr_data_zones = zmd->nr_useable_zones - |
1076 | (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq)); |
1077 | if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) { |
1078 | dmz_dev_err(dev, "Invalid number of chunks %u / %u" , |
1079 | le32_to_cpu(sb->nr_chunks), nr_data_zones); |
1080 | return -ENXIO; |
1081 | } |
1082 | |
1083 | /* OK */ |
1084 | zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks); |
1085 | zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq); |
1086 | zmd->nr_chunks = le32_to_cpu(sb->nr_chunks); |
1087 | zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks); |
1088 | zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks); |
1089 | zmd->nr_meta_zones = nr_meta_zones; |
1090 | zmd->nr_data_zones = nr_data_zones; |
1091 | |
1092 | return 0; |
1093 | } |
1094 | |
1095 | /* |
1096 | * Read the first or second super block from disk. |
1097 | */ |
1098 | static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set) |
1099 | { |
1100 | dmz_zmd_debug(zmd, "read superblock set %d dev %pg block %llu" , |
1101 | set, sb->dev->bdev, sb->block); |
1102 | |
1103 | return dmz_rdwr_block(dev: sb->dev, op: REQ_OP_READ, |
1104 | block: sb->block, page: sb->mblk->page); |
1105 | } |
1106 | |
1107 | /* |
1108 | * Determine the position of the secondary super blocks on disk. |
1109 | * This is used only if a corruption of the primary super block |
1110 | * is detected. |
1111 | */ |
1112 | static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd) |
1113 | { |
1114 | unsigned int zone_nr_blocks = zmd->zone_nr_blocks; |
1115 | struct dmz_mblock *mblk; |
1116 | unsigned int zone_id = zmd->sb[0].zone->id; |
1117 | int i; |
1118 | |
1119 | /* Allocate a block */ |
1120 | mblk = dmz_alloc_mblock(zmd, mblk_no: 0); |
1121 | if (!mblk) |
1122 | return -ENOMEM; |
1123 | |
1124 | zmd->sb[1].mblk = mblk; |
1125 | zmd->sb[1].sb = mblk->data; |
1126 | |
1127 | /* Bad first super block: search for the second one */ |
1128 | zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks; |
1129 | zmd->sb[1].zone = dmz_get(zmd, zone_id: zone_id + 1); |
1130 | zmd->sb[1].dev = zmd->sb[0].dev; |
1131 | for (i = 1; i < zmd->nr_rnd_zones; i++) { |
1132 | if (dmz_read_sb(zmd, sb: &zmd->sb[1], set: 1) != 0) |
1133 | break; |
1134 | if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC) |
1135 | return 0; |
1136 | zmd->sb[1].block += zone_nr_blocks; |
1137 | zmd->sb[1].zone = dmz_get(zmd, zone_id: zone_id + i); |
1138 | } |
1139 | |
1140 | dmz_free_mblock(zmd, mblk); |
1141 | zmd->sb[1].mblk = NULL; |
1142 | zmd->sb[1].zone = NULL; |
1143 | zmd->sb[1].dev = NULL; |
1144 | |
1145 | return -EIO; |
1146 | } |
1147 | |
1148 | /* |
1149 | * Read a super block from disk. |
1150 | */ |
1151 | static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set) |
1152 | { |
1153 | struct dmz_mblock *mblk; |
1154 | int ret; |
1155 | |
1156 | /* Allocate a block */ |
1157 | mblk = dmz_alloc_mblock(zmd, mblk_no: 0); |
1158 | if (!mblk) |
1159 | return -ENOMEM; |
1160 | |
1161 | sb->mblk = mblk; |
1162 | sb->sb = mblk->data; |
1163 | |
1164 | /* Read super block */ |
1165 | ret = dmz_read_sb(zmd, sb, set); |
1166 | if (ret) { |
1167 | dmz_free_mblock(zmd, mblk); |
1168 | sb->mblk = NULL; |
1169 | return ret; |
1170 | } |
1171 | |
1172 | return 0; |
1173 | } |
1174 | |
1175 | /* |
1176 | * Recover a metadata set. |
1177 | */ |
1178 | static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set) |
1179 | { |
1180 | unsigned int src_set = dst_set ^ 0x1; |
1181 | struct page *page; |
1182 | int i, ret; |
1183 | |
1184 | dmz_dev_warn(zmd->sb[dst_set].dev, |
1185 | "Metadata set %u invalid: recovering" , dst_set); |
1186 | |
1187 | if (dst_set == 0) |
1188 | zmd->sb[0].block = dmz_start_block(zmd, zone: zmd->sb[0].zone); |
1189 | else |
1190 | zmd->sb[1].block = dmz_start_block(zmd, zone: zmd->sb[1].zone); |
1191 | |
1192 | page = alloc_page(GFP_NOIO); |
1193 | if (!page) |
1194 | return -ENOMEM; |
1195 | |
1196 | /* Copy metadata blocks */ |
1197 | for (i = 1; i < zmd->nr_meta_blocks; i++) { |
1198 | ret = dmz_rdwr_block(dev: zmd->sb[src_set].dev, op: REQ_OP_READ, |
1199 | block: zmd->sb[src_set].block + i, page); |
1200 | if (ret) |
1201 | goto out; |
1202 | ret = dmz_rdwr_block(dev: zmd->sb[dst_set].dev, op: REQ_OP_WRITE, |
1203 | block: zmd->sb[dst_set].block + i, page); |
1204 | if (ret) |
1205 | goto out; |
1206 | } |
1207 | |
1208 | /* Finalize with the super block */ |
1209 | if (!zmd->sb[dst_set].mblk) { |
1210 | zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, mblk_no: 0); |
1211 | if (!zmd->sb[dst_set].mblk) { |
1212 | ret = -ENOMEM; |
1213 | goto out; |
1214 | } |
1215 | zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data; |
1216 | } |
1217 | |
1218 | ret = dmz_write_sb(zmd, set: dst_set); |
1219 | out: |
1220 | __free_pages(page, order: 0); |
1221 | |
1222 | return ret; |
1223 | } |
1224 | |
1225 | /* |
1226 | * Get super block from disk. |
1227 | */ |
1228 | static int dmz_load_sb(struct dmz_metadata *zmd) |
1229 | { |
1230 | bool sb_good[2] = {false, false}; |
1231 | u64 sb_gen[2] = {0, 0}; |
1232 | int ret; |
1233 | |
1234 | if (!zmd->sb[0].zone) { |
1235 | dmz_zmd_err(zmd, "Primary super block zone not set" ); |
1236 | return -ENXIO; |
1237 | } |
1238 | |
1239 | /* Read and check the primary super block */ |
1240 | zmd->sb[0].block = dmz_start_block(zmd, zone: zmd->sb[0].zone); |
1241 | zmd->sb[0].dev = zmd->sb[0].zone->dev; |
1242 | ret = dmz_get_sb(zmd, sb: &zmd->sb[0], set: 0); |
1243 | if (ret) { |
1244 | dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed" ); |
1245 | return ret; |
1246 | } |
1247 | |
1248 | ret = dmz_check_sb(zmd, dsb: &zmd->sb[0], tertiary: false); |
1249 | |
1250 | /* Read and check secondary super block */ |
1251 | if (ret == 0) { |
1252 | sb_good[0] = true; |
1253 | if (!zmd->sb[1].zone) { |
1254 | unsigned int zone_id = |
1255 | zmd->sb[0].zone->id + zmd->nr_meta_zones; |
1256 | |
1257 | zmd->sb[1].zone = dmz_get(zmd, zone_id); |
1258 | } |
1259 | zmd->sb[1].block = dmz_start_block(zmd, zone: zmd->sb[1].zone); |
1260 | zmd->sb[1].dev = zmd->sb[0].dev; |
1261 | ret = dmz_get_sb(zmd, sb: &zmd->sb[1], set: 1); |
1262 | } else |
1263 | ret = dmz_lookup_secondary_sb(zmd); |
1264 | |
1265 | if (ret) { |
1266 | dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed" ); |
1267 | return ret; |
1268 | } |
1269 | |
1270 | ret = dmz_check_sb(zmd, dsb: &zmd->sb[1], tertiary: false); |
1271 | if (ret == 0) |
1272 | sb_good[1] = true; |
1273 | |
1274 | /* Use highest generation sb first */ |
1275 | if (!sb_good[0] && !sb_good[1]) { |
1276 | dmz_zmd_err(zmd, "No valid super block found" ); |
1277 | return -EIO; |
1278 | } |
1279 | |
1280 | if (sb_good[0]) |
1281 | sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen); |
1282 | else { |
1283 | ret = dmz_recover_mblocks(zmd, dst_set: 0); |
1284 | if (ret) { |
1285 | dmz_dev_err(zmd->sb[0].dev, |
1286 | "Recovery of superblock 0 failed" ); |
1287 | return -EIO; |
1288 | } |
1289 | } |
1290 | |
1291 | if (sb_good[1]) |
1292 | sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen); |
1293 | else { |
1294 | ret = dmz_recover_mblocks(zmd, dst_set: 1); |
1295 | |
1296 | if (ret) { |
1297 | dmz_dev_err(zmd->sb[1].dev, |
1298 | "Recovery of superblock 1 failed" ); |
1299 | return -EIO; |
1300 | } |
1301 | } |
1302 | |
1303 | if (sb_gen[0] >= sb_gen[1]) { |
1304 | zmd->sb_gen = sb_gen[0]; |
1305 | zmd->mblk_primary = 0; |
1306 | } else { |
1307 | zmd->sb_gen = sb_gen[1]; |
1308 | zmd->mblk_primary = 1; |
1309 | } |
1310 | |
1311 | dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev, |
1312 | "Using super block %u (gen %llu)" , |
1313 | zmd->mblk_primary, zmd->sb_gen); |
1314 | |
1315 | if (zmd->sb_version > 1) { |
1316 | int i; |
1317 | struct dmz_sb *sb; |
1318 | |
1319 | sb = kzalloc(size: sizeof(struct dmz_sb), GFP_KERNEL); |
1320 | if (!sb) |
1321 | return -ENOMEM; |
1322 | for (i = 1; i < zmd->nr_devs; i++) { |
1323 | sb->block = 0; |
1324 | sb->zone = dmz_get(zmd, zone_id: zmd->dev[i].zone_offset); |
1325 | sb->dev = &zmd->dev[i]; |
1326 | if (!dmz_is_meta(sb->zone)) { |
1327 | dmz_dev_err(sb->dev, |
1328 | "Tertiary super block zone %u not marked as metadata zone" , |
1329 | sb->zone->id); |
1330 | ret = -EINVAL; |
1331 | goto out_kfree; |
1332 | } |
1333 | ret = dmz_get_sb(zmd, sb, set: i + 1); |
1334 | if (ret) { |
1335 | dmz_dev_err(sb->dev, |
1336 | "Read tertiary super block failed" ); |
1337 | dmz_free_mblock(zmd, mblk: sb->mblk); |
1338 | goto out_kfree; |
1339 | } |
1340 | ret = dmz_check_sb(zmd, dsb: sb, tertiary: true); |
1341 | dmz_free_mblock(zmd, mblk: sb->mblk); |
1342 | if (ret == -EINVAL) |
1343 | goto out_kfree; |
1344 | } |
1345 | out_kfree: |
1346 | kfree(objp: sb); |
1347 | } |
1348 | return ret; |
1349 | } |
1350 | |
1351 | /* |
1352 | * Initialize a zone descriptor. |
1353 | */ |
1354 | static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data) |
1355 | { |
1356 | struct dmz_dev *dev = data; |
1357 | struct dmz_metadata *zmd = dev->metadata; |
1358 | int idx = num + dev->zone_offset; |
1359 | struct dm_zone *zone; |
1360 | |
1361 | zone = dmz_insert(zmd, zone_id: idx, dev); |
1362 | if (IS_ERR(ptr: zone)) |
1363 | return PTR_ERR(ptr: zone); |
1364 | |
1365 | if (blkz->len != zmd->zone_nr_sectors) { |
1366 | if (zmd->sb_version > 1) { |
1367 | /* Ignore the eventual runt (smaller) zone */ |
1368 | set_bit(nr: DMZ_OFFLINE, addr: &zone->flags); |
1369 | return 0; |
1370 | } else if (blkz->start + blkz->len == dev->capacity) |
1371 | return 0; |
1372 | return -ENXIO; |
1373 | } |
1374 | |
1375 | /* |
1376 | * Devices that have zones with a capacity smaller than the zone size |
1377 | * (e.g. NVMe zoned namespaces) are not supported. |
1378 | */ |
1379 | if (blkz->capacity != blkz->len) |
1380 | return -ENXIO; |
1381 | |
1382 | switch (blkz->type) { |
1383 | case BLK_ZONE_TYPE_CONVENTIONAL: |
1384 | set_bit(nr: DMZ_RND, addr: &zone->flags); |
1385 | break; |
1386 | case BLK_ZONE_TYPE_SEQWRITE_REQ: |
1387 | case BLK_ZONE_TYPE_SEQWRITE_PREF: |
1388 | set_bit(nr: DMZ_SEQ, addr: &zone->flags); |
1389 | break; |
1390 | default: |
1391 | return -ENXIO; |
1392 | } |
1393 | |
1394 | if (dmz_is_rnd(zone)) |
1395 | zone->wp_block = 0; |
1396 | else |
1397 | zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); |
1398 | |
1399 | if (blkz->cond == BLK_ZONE_COND_OFFLINE) |
1400 | set_bit(nr: DMZ_OFFLINE, addr: &zone->flags); |
1401 | else if (blkz->cond == BLK_ZONE_COND_READONLY) |
1402 | set_bit(nr: DMZ_READ_ONLY, addr: &zone->flags); |
1403 | else { |
1404 | zmd->nr_useable_zones++; |
1405 | if (dmz_is_rnd(zone)) { |
1406 | zmd->nr_rnd_zones++; |
1407 | if (zmd->nr_devs == 1 && !zmd->sb[0].zone) { |
1408 | /* Primary super block zone */ |
1409 | zmd->sb[0].zone = zone; |
1410 | } |
1411 | } |
1412 | if (zmd->nr_devs > 1 && num == 0) { |
1413 | /* |
1414 | * Tertiary superblock zones are always at the |
1415 | * start of the zoned devices, so mark them |
1416 | * as metadata zone. |
1417 | */ |
1418 | set_bit(nr: DMZ_META, addr: &zone->flags); |
1419 | } |
1420 | } |
1421 | return 0; |
1422 | } |
1423 | |
1424 | static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev) |
1425 | { |
1426 | int idx; |
1427 | sector_t zone_offset = 0; |
1428 | |
1429 | for (idx = 0; idx < dev->nr_zones; idx++) { |
1430 | struct dm_zone *zone; |
1431 | |
1432 | zone = dmz_insert(zmd, zone_id: idx, dev); |
1433 | if (IS_ERR(ptr: zone)) |
1434 | return PTR_ERR(ptr: zone); |
1435 | set_bit(nr: DMZ_CACHE, addr: &zone->flags); |
1436 | zone->wp_block = 0; |
1437 | zmd->nr_cache_zones++; |
1438 | zmd->nr_useable_zones++; |
1439 | if (dev->capacity - zone_offset < zmd->zone_nr_sectors) { |
1440 | /* Disable runt zone */ |
1441 | set_bit(nr: DMZ_OFFLINE, addr: &zone->flags); |
1442 | break; |
1443 | } |
1444 | zone_offset += zmd->zone_nr_sectors; |
1445 | } |
1446 | return 0; |
1447 | } |
1448 | |
1449 | /* |
1450 | * Free zones descriptors. |
1451 | */ |
1452 | static void dmz_drop_zones(struct dmz_metadata *zmd) |
1453 | { |
1454 | int idx; |
1455 | |
1456 | for (idx = 0; idx < zmd->nr_zones; idx++) { |
1457 | struct dm_zone *zone = xa_load(&zmd->zones, index: idx); |
1458 | |
1459 | kfree(objp: zone); |
1460 | xa_erase(&zmd->zones, index: idx); |
1461 | } |
1462 | xa_destroy(&zmd->zones); |
1463 | } |
1464 | |
1465 | /* |
1466 | * Allocate and initialize zone descriptors using the zone |
1467 | * information from disk. |
1468 | */ |
1469 | static int dmz_init_zones(struct dmz_metadata *zmd) |
1470 | { |
1471 | int i, ret; |
1472 | struct dmz_dev *zoned_dev = &zmd->dev[0]; |
1473 | |
1474 | /* Init */ |
1475 | zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors; |
1476 | zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors); |
1477 | zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors); |
1478 | zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks); |
1479 | zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3; |
1480 | zmd->zone_nr_bitmap_blocks = |
1481 | max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT); |
1482 | zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks, |
1483 | DMZ_BLOCK_SIZE_BITS); |
1484 | |
1485 | /* Allocate zone array */ |
1486 | zmd->nr_zones = 0; |
1487 | for (i = 0; i < zmd->nr_devs; i++) { |
1488 | struct dmz_dev *dev = &zmd->dev[i]; |
1489 | |
1490 | dev->metadata = zmd; |
1491 | zmd->nr_zones += dev->nr_zones; |
1492 | |
1493 | atomic_set(v: &dev->unmap_nr_rnd, i: 0); |
1494 | INIT_LIST_HEAD(list: &dev->unmap_rnd_list); |
1495 | INIT_LIST_HEAD(list: &dev->map_rnd_list); |
1496 | |
1497 | atomic_set(v: &dev->unmap_nr_seq, i: 0); |
1498 | INIT_LIST_HEAD(list: &dev->unmap_seq_list); |
1499 | INIT_LIST_HEAD(list: &dev->map_seq_list); |
1500 | } |
1501 | |
1502 | if (!zmd->nr_zones) { |
1503 | DMERR("(%s): No zones found" , zmd->devname); |
1504 | return -ENXIO; |
1505 | } |
1506 | xa_init(xa: &zmd->zones); |
1507 | |
1508 | DMDEBUG("(%s): Using %zu B for zone information" , |
1509 | zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones); |
1510 | |
1511 | if (zmd->nr_devs > 1) { |
1512 | ret = dmz_emulate_zones(zmd, dev: &zmd->dev[0]); |
1513 | if (ret < 0) { |
1514 | DMDEBUG("(%s): Failed to emulate zones, error %d" , |
1515 | zmd->devname, ret); |
1516 | dmz_drop_zones(zmd); |
1517 | return ret; |
1518 | } |
1519 | |
1520 | /* |
1521 | * Primary superblock zone is always at zone 0 when multiple |
1522 | * drives are present. |
1523 | */ |
1524 | zmd->sb[0].zone = dmz_get(zmd, zone_id: 0); |
1525 | |
1526 | for (i = 1; i < zmd->nr_devs; i++) { |
1527 | zoned_dev = &zmd->dev[i]; |
1528 | |
1529 | ret = blkdev_report_zones(bdev: zoned_dev->bdev, sector: 0, |
1530 | BLK_ALL_ZONES, |
1531 | cb: dmz_init_zone, data: zoned_dev); |
1532 | if (ret < 0) { |
1533 | DMDEBUG("(%s): Failed to report zones, error %d" , |
1534 | zmd->devname, ret); |
1535 | dmz_drop_zones(zmd); |
1536 | return ret; |
1537 | } |
1538 | } |
1539 | return 0; |
1540 | } |
1541 | |
1542 | /* |
1543 | * Get zone information and initialize zone descriptors. At the same |
1544 | * time, determine where the super block should be: first block of the |
1545 | * first randomly writable zone. |
1546 | */ |
1547 | ret = blkdev_report_zones(bdev: zoned_dev->bdev, sector: 0, BLK_ALL_ZONES, |
1548 | cb: dmz_init_zone, data: zoned_dev); |
1549 | if (ret < 0) { |
1550 | DMDEBUG("(%s): Failed to report zones, error %d" , |
1551 | zmd->devname, ret); |
1552 | dmz_drop_zones(zmd); |
1553 | return ret; |
1554 | } |
1555 | |
1556 | return 0; |
1557 | } |
1558 | |
1559 | static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx, |
1560 | void *data) |
1561 | { |
1562 | struct dm_zone *zone = data; |
1563 | |
1564 | clear_bit(nr: DMZ_OFFLINE, addr: &zone->flags); |
1565 | clear_bit(nr: DMZ_READ_ONLY, addr: &zone->flags); |
1566 | if (blkz->cond == BLK_ZONE_COND_OFFLINE) |
1567 | set_bit(nr: DMZ_OFFLINE, addr: &zone->flags); |
1568 | else if (blkz->cond == BLK_ZONE_COND_READONLY) |
1569 | set_bit(nr: DMZ_READ_ONLY, addr: &zone->flags); |
1570 | |
1571 | if (dmz_is_seq(zone)) |
1572 | zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start); |
1573 | else |
1574 | zone->wp_block = 0; |
1575 | return 0; |
1576 | } |
1577 | |
1578 | /* |
1579 | * Update a zone information. |
1580 | */ |
1581 | static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
1582 | { |
1583 | struct dmz_dev *dev = zone->dev; |
1584 | unsigned int noio_flag; |
1585 | int ret; |
1586 | |
1587 | if (dev->flags & DMZ_BDEV_REGULAR) |
1588 | return 0; |
1589 | |
1590 | /* |
1591 | * Get zone information from disk. Since blkdev_report_zones() uses |
1592 | * GFP_KERNEL by default for memory allocations, set the per-task |
1593 | * PF_MEMALLOC_NOIO flag so that all allocations are done as if |
1594 | * GFP_NOIO was specified. |
1595 | */ |
1596 | noio_flag = memalloc_noio_save(); |
1597 | ret = blkdev_report_zones(bdev: dev->bdev, sector: dmz_start_sect(zmd, zone), nr_zones: 1, |
1598 | cb: dmz_update_zone_cb, data: zone); |
1599 | memalloc_noio_restore(flags: noio_flag); |
1600 | |
1601 | if (ret == 0) |
1602 | ret = -EIO; |
1603 | if (ret < 0) { |
1604 | dmz_dev_err(dev, "Get zone %u report failed" , |
1605 | zone->id); |
1606 | dmz_check_bdev(dmz_dev: dev); |
1607 | return ret; |
1608 | } |
1609 | |
1610 | return 0; |
1611 | } |
1612 | |
1613 | /* |
1614 | * Check a zone write pointer position when the zone is marked |
1615 | * with the sequential write error flag. |
1616 | */ |
1617 | static int dmz_handle_seq_write_err(struct dmz_metadata *zmd, |
1618 | struct dm_zone *zone) |
1619 | { |
1620 | struct dmz_dev *dev = zone->dev; |
1621 | unsigned int wp = 0; |
1622 | int ret; |
1623 | |
1624 | wp = zone->wp_block; |
1625 | ret = dmz_update_zone(zmd, zone); |
1626 | if (ret) |
1627 | return ret; |
1628 | |
1629 | dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)" , |
1630 | zone->id, zone->wp_block, wp); |
1631 | |
1632 | if (zone->wp_block < wp) { |
1633 | dmz_invalidate_blocks(zmd, zone, chunk_block: zone->wp_block, |
1634 | nr_blocks: wp - zone->wp_block); |
1635 | } |
1636 | |
1637 | return 0; |
1638 | } |
1639 | |
1640 | /* |
1641 | * Reset a zone write pointer. |
1642 | */ |
1643 | static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
1644 | { |
1645 | int ret; |
1646 | |
1647 | /* |
1648 | * Ignore offline zones, read only zones, |
1649 | * and conventional zones. |
1650 | */ |
1651 | if (dmz_is_offline(zone) || |
1652 | dmz_is_readonly(zone) || |
1653 | dmz_is_rnd(zone)) |
1654 | return 0; |
1655 | |
1656 | if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) { |
1657 | struct dmz_dev *dev = zone->dev; |
1658 | |
1659 | ret = blkdev_zone_mgmt(bdev: dev->bdev, op: REQ_OP_ZONE_RESET, |
1660 | sectors: dmz_start_sect(zmd, zone), |
1661 | nr_sectors: zmd->zone_nr_sectors, GFP_NOIO); |
1662 | if (ret) { |
1663 | dmz_dev_err(dev, "Reset zone %u failed %d" , |
1664 | zone->id, ret); |
1665 | return ret; |
1666 | } |
1667 | } |
1668 | |
1669 | /* Clear write error bit and rewind write pointer position */ |
1670 | clear_bit(nr: DMZ_SEQ_WRITE_ERR, addr: &zone->flags); |
1671 | zone->wp_block = 0; |
1672 | |
1673 | return 0; |
1674 | } |
1675 | |
1676 | static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone); |
1677 | |
1678 | /* |
1679 | * Initialize chunk mapping. |
1680 | */ |
1681 | static int dmz_load_mapping(struct dmz_metadata *zmd) |
1682 | { |
1683 | struct dm_zone *dzone, *bzone; |
1684 | struct dmz_mblock *dmap_mblk = NULL; |
1685 | struct dmz_map *dmap; |
1686 | unsigned int i = 0, e = 0, chunk = 0; |
1687 | unsigned int dzone_id; |
1688 | unsigned int bzone_id; |
1689 | |
1690 | /* Metadata block array for the chunk mapping table */ |
1691 | zmd->map_mblk = kcalloc(n: zmd->nr_map_blocks, |
1692 | size: sizeof(struct dmz_mblk *), GFP_KERNEL); |
1693 | if (!zmd->map_mblk) |
1694 | return -ENOMEM; |
1695 | |
1696 | /* Get chunk mapping table blocks and initialize zone mapping */ |
1697 | while (chunk < zmd->nr_chunks) { |
1698 | if (!dmap_mblk) { |
1699 | /* Get mapping block */ |
1700 | dmap_mblk = dmz_get_mblock(zmd, mblk_no: i + 1); |
1701 | if (IS_ERR(ptr: dmap_mblk)) |
1702 | return PTR_ERR(ptr: dmap_mblk); |
1703 | zmd->map_mblk[i] = dmap_mblk; |
1704 | dmap = dmap_mblk->data; |
1705 | i++; |
1706 | e = 0; |
1707 | } |
1708 | |
1709 | /* Check data zone */ |
1710 | dzone_id = le32_to_cpu(dmap[e].dzone_id); |
1711 | if (dzone_id == DMZ_MAP_UNMAPPED) |
1712 | goto next; |
1713 | |
1714 | if (dzone_id >= zmd->nr_zones) { |
1715 | dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u" , |
1716 | chunk, dzone_id); |
1717 | return -EIO; |
1718 | } |
1719 | |
1720 | dzone = dmz_get(zmd, zone_id: dzone_id); |
1721 | if (!dzone) { |
1722 | dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present" , |
1723 | chunk, dzone_id); |
1724 | return -EIO; |
1725 | } |
1726 | set_bit(nr: DMZ_DATA, addr: &dzone->flags); |
1727 | dzone->chunk = chunk; |
1728 | dmz_get_zone_weight(zmd, zone: dzone); |
1729 | |
1730 | if (dmz_is_cache(dzone)) |
1731 | list_add_tail(new: &dzone->link, head: &zmd->map_cache_list); |
1732 | else if (dmz_is_rnd(dzone)) |
1733 | list_add_tail(new: &dzone->link, head: &dzone->dev->map_rnd_list); |
1734 | else |
1735 | list_add_tail(new: &dzone->link, head: &dzone->dev->map_seq_list); |
1736 | |
1737 | /* Check buffer zone */ |
1738 | bzone_id = le32_to_cpu(dmap[e].bzone_id); |
1739 | if (bzone_id == DMZ_MAP_UNMAPPED) |
1740 | goto next; |
1741 | |
1742 | if (bzone_id >= zmd->nr_zones) { |
1743 | dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u" , |
1744 | chunk, bzone_id); |
1745 | return -EIO; |
1746 | } |
1747 | |
1748 | bzone = dmz_get(zmd, zone_id: bzone_id); |
1749 | if (!bzone) { |
1750 | dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present" , |
1751 | chunk, bzone_id); |
1752 | return -EIO; |
1753 | } |
1754 | if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) { |
1755 | dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u" , |
1756 | chunk, bzone_id); |
1757 | return -EIO; |
1758 | } |
1759 | |
1760 | set_bit(nr: DMZ_DATA, addr: &bzone->flags); |
1761 | set_bit(nr: DMZ_BUF, addr: &bzone->flags); |
1762 | bzone->chunk = chunk; |
1763 | bzone->bzone = dzone; |
1764 | dzone->bzone = bzone; |
1765 | dmz_get_zone_weight(zmd, zone: bzone); |
1766 | if (dmz_is_cache(bzone)) |
1767 | list_add_tail(new: &bzone->link, head: &zmd->map_cache_list); |
1768 | else |
1769 | list_add_tail(new: &bzone->link, head: &bzone->dev->map_rnd_list); |
1770 | next: |
1771 | chunk++; |
1772 | e++; |
1773 | if (e >= DMZ_MAP_ENTRIES) |
1774 | dmap_mblk = NULL; |
1775 | } |
1776 | |
1777 | /* |
1778 | * At this point, only meta zones and mapped data zones were |
1779 | * fully initialized. All remaining zones are unmapped data |
1780 | * zones. Finish initializing those here. |
1781 | */ |
1782 | for (i = 0; i < zmd->nr_zones; i++) { |
1783 | dzone = dmz_get(zmd, zone_id: i); |
1784 | if (!dzone) |
1785 | continue; |
1786 | if (dmz_is_meta(dzone)) |
1787 | continue; |
1788 | if (dmz_is_offline(dzone)) |
1789 | continue; |
1790 | |
1791 | if (dmz_is_cache(dzone)) |
1792 | zmd->nr_cache++; |
1793 | else if (dmz_is_rnd(dzone)) |
1794 | dzone->dev->nr_rnd++; |
1795 | else |
1796 | dzone->dev->nr_seq++; |
1797 | |
1798 | if (dmz_is_data(dzone)) { |
1799 | /* Already initialized */ |
1800 | continue; |
1801 | } |
1802 | |
1803 | /* Unmapped data zone */ |
1804 | set_bit(nr: DMZ_DATA, addr: &dzone->flags); |
1805 | dzone->chunk = DMZ_MAP_UNMAPPED; |
1806 | if (dmz_is_cache(dzone)) { |
1807 | list_add_tail(new: &dzone->link, head: &zmd->unmap_cache_list); |
1808 | atomic_inc(v: &zmd->unmap_nr_cache); |
1809 | } else if (dmz_is_rnd(dzone)) { |
1810 | list_add_tail(new: &dzone->link, |
1811 | head: &dzone->dev->unmap_rnd_list); |
1812 | atomic_inc(v: &dzone->dev->unmap_nr_rnd); |
1813 | } else if (atomic_read(v: &zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) { |
1814 | list_add_tail(new: &dzone->link, head: &zmd->reserved_seq_zones_list); |
1815 | set_bit(nr: DMZ_RESERVED, addr: &dzone->flags); |
1816 | atomic_inc(v: &zmd->nr_reserved_seq_zones); |
1817 | dzone->dev->nr_seq--; |
1818 | } else { |
1819 | list_add_tail(new: &dzone->link, |
1820 | head: &dzone->dev->unmap_seq_list); |
1821 | atomic_inc(v: &dzone->dev->unmap_nr_seq); |
1822 | } |
1823 | } |
1824 | |
1825 | return 0; |
1826 | } |
1827 | |
1828 | /* |
1829 | * Set a data chunk mapping. |
1830 | */ |
1831 | static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, |
1832 | unsigned int dzone_id, unsigned int bzone_id) |
1833 | { |
1834 | struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; |
1835 | struct dmz_map *dmap = dmap_mblk->data; |
1836 | int map_idx = chunk & DMZ_MAP_ENTRIES_MASK; |
1837 | |
1838 | dmap[map_idx].dzone_id = cpu_to_le32(dzone_id); |
1839 | dmap[map_idx].bzone_id = cpu_to_le32(bzone_id); |
1840 | dmz_dirty_mblock(zmd, mblk: dmap_mblk); |
1841 | } |
1842 | |
1843 | /* |
1844 | * The list of mapped zones is maintained in LRU order. |
1845 | * This rotates a zone at the end of its map list. |
1846 | */ |
1847 | static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
1848 | { |
1849 | if (list_empty(head: &zone->link)) |
1850 | return; |
1851 | |
1852 | list_del_init(entry: &zone->link); |
1853 | if (dmz_is_seq(zone)) { |
1854 | /* LRU rotate sequential zone */ |
1855 | list_add_tail(new: &zone->link, head: &zone->dev->map_seq_list); |
1856 | } else if (dmz_is_cache(zone)) { |
1857 | /* LRU rotate cache zone */ |
1858 | list_add_tail(new: &zone->link, head: &zmd->map_cache_list); |
1859 | } else { |
1860 | /* LRU rotate random zone */ |
1861 | list_add_tail(new: &zone->link, head: &zone->dev->map_rnd_list); |
1862 | } |
1863 | } |
1864 | |
1865 | /* |
1866 | * The list of mapped random zones is maintained |
1867 | * in LRU order. This rotates a zone at the end of the list. |
1868 | */ |
1869 | static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
1870 | { |
1871 | __dmz_lru_zone(zmd, zone); |
1872 | if (zone->bzone) |
1873 | __dmz_lru_zone(zmd, zone: zone->bzone); |
1874 | } |
1875 | |
1876 | /* |
1877 | * Wait for any zone to be freed. |
1878 | */ |
1879 | static void dmz_wait_for_free_zones(struct dmz_metadata *zmd) |
1880 | { |
1881 | DEFINE_WAIT(wait); |
1882 | |
1883 | prepare_to_wait(wq_head: &zmd->free_wq, wq_entry: &wait, TASK_UNINTERRUPTIBLE); |
1884 | dmz_unlock_map(zmd); |
1885 | dmz_unlock_metadata(zmd); |
1886 | |
1887 | io_schedule_timeout(HZ); |
1888 | |
1889 | dmz_lock_metadata(zmd); |
1890 | dmz_lock_map(zmd); |
1891 | finish_wait(wq_head: &zmd->free_wq, wq_entry: &wait); |
1892 | } |
1893 | |
1894 | /* |
1895 | * Lock a zone for reclaim (set the zone RECLAIM bit). |
1896 | * Returns false if the zone cannot be locked or if it is already locked |
1897 | * and 1 otherwise. |
1898 | */ |
1899 | int dmz_lock_zone_reclaim(struct dm_zone *zone) |
1900 | { |
1901 | /* Active zones cannot be reclaimed */ |
1902 | if (dmz_is_active(zone)) |
1903 | return 0; |
1904 | |
1905 | return !test_and_set_bit(nr: DMZ_RECLAIM, addr: &zone->flags); |
1906 | } |
1907 | |
1908 | /* |
1909 | * Clear a zone reclaim flag. |
1910 | */ |
1911 | void dmz_unlock_zone_reclaim(struct dm_zone *zone) |
1912 | { |
1913 | WARN_ON(dmz_is_active(zone)); |
1914 | WARN_ON(!dmz_in_reclaim(zone)); |
1915 | |
1916 | clear_bit_unlock(nr: DMZ_RECLAIM, addr: &zone->flags); |
1917 | smp_mb__after_atomic(); |
1918 | wake_up_bit(word: &zone->flags, bit: DMZ_RECLAIM); |
1919 | } |
1920 | |
1921 | /* |
1922 | * Wait for a zone reclaim to complete. |
1923 | */ |
1924 | static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone) |
1925 | { |
1926 | dmz_unlock_map(zmd); |
1927 | dmz_unlock_metadata(zmd); |
1928 | set_bit(nr: DMZ_RECLAIM_TERMINATE, addr: &zone->flags); |
1929 | wait_on_bit_timeout(word: &zone->flags, bit: DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ); |
1930 | clear_bit(nr: DMZ_RECLAIM_TERMINATE, addr: &zone->flags); |
1931 | dmz_lock_metadata(zmd); |
1932 | dmz_lock_map(zmd); |
1933 | } |
1934 | |
1935 | /* |
1936 | * Select a cache or random write zone for reclaim. |
1937 | */ |
1938 | static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd, |
1939 | unsigned int idx, bool idle) |
1940 | { |
1941 | struct dm_zone *dzone = NULL; |
1942 | struct dm_zone *zone, *maxw_z = NULL; |
1943 | struct list_head *zone_list; |
1944 | |
1945 | /* If we have cache zones select from the cache zone list */ |
1946 | if (zmd->nr_cache) { |
1947 | zone_list = &zmd->map_cache_list; |
1948 | /* Try to relaim random zones, too, when idle */ |
1949 | if (idle && list_empty(head: zone_list)) |
1950 | zone_list = &zmd->dev[idx].map_rnd_list; |
1951 | } else |
1952 | zone_list = &zmd->dev[idx].map_rnd_list; |
1953 | |
1954 | /* |
1955 | * Find the buffer zone with the heaviest weight or the first (oldest) |
1956 | * data zone that can be reclaimed. |
1957 | */ |
1958 | list_for_each_entry(zone, zone_list, link) { |
1959 | if (dmz_is_buf(zone)) { |
1960 | dzone = zone->bzone; |
1961 | if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) |
1962 | continue; |
1963 | if (!maxw_z || maxw_z->weight < dzone->weight) |
1964 | maxw_z = dzone; |
1965 | } else { |
1966 | dzone = zone; |
1967 | if (dmz_lock_zone_reclaim(zone: dzone)) |
1968 | return dzone; |
1969 | } |
1970 | } |
1971 | |
1972 | if (maxw_z && dmz_lock_zone_reclaim(zone: maxw_z)) |
1973 | return maxw_z; |
1974 | |
1975 | /* |
1976 | * If we come here, none of the zones inspected could be locked for |
1977 | * reclaim. Try again, being more aggressive, that is, find the |
1978 | * first zone that can be reclaimed regardless of its weitght. |
1979 | */ |
1980 | list_for_each_entry(zone, zone_list, link) { |
1981 | if (dmz_is_buf(zone)) { |
1982 | dzone = zone->bzone; |
1983 | if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx) |
1984 | continue; |
1985 | } else |
1986 | dzone = zone; |
1987 | if (dmz_lock_zone_reclaim(zone: dzone)) |
1988 | return dzone; |
1989 | } |
1990 | |
1991 | return NULL; |
1992 | } |
1993 | |
1994 | /* |
1995 | * Select a buffered sequential zone for reclaim. |
1996 | */ |
1997 | static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd, |
1998 | unsigned int idx) |
1999 | { |
2000 | struct dm_zone *zone; |
2001 | |
2002 | list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) { |
2003 | if (!zone->bzone) |
2004 | continue; |
2005 | if (dmz_lock_zone_reclaim(zone)) |
2006 | return zone; |
2007 | } |
2008 | |
2009 | return NULL; |
2010 | } |
2011 | |
2012 | /* |
2013 | * Select a zone for reclaim. |
2014 | */ |
2015 | struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd, |
2016 | unsigned int dev_idx, bool idle) |
2017 | { |
2018 | struct dm_zone *zone = NULL; |
2019 | |
2020 | /* |
2021 | * Search for a zone candidate to reclaim: 2 cases are possible. |
2022 | * (1) There is no free sequential zones. Then a random data zone |
2023 | * cannot be reclaimed. So choose a sequential zone to reclaim so |
2024 | * that afterward a random zone can be reclaimed. |
2025 | * (2) At least one free sequential zone is available, then choose |
2026 | * the oldest random zone (data or buffer) that can be locked. |
2027 | */ |
2028 | dmz_lock_map(zmd); |
2029 | if (list_empty(head: &zmd->reserved_seq_zones_list)) |
2030 | zone = dmz_get_seq_zone_for_reclaim(zmd, idx: dev_idx); |
2031 | if (!zone) |
2032 | zone = dmz_get_rnd_zone_for_reclaim(zmd, idx: dev_idx, idle); |
2033 | dmz_unlock_map(zmd); |
2034 | |
2035 | return zone; |
2036 | } |
2037 | |
2038 | /* |
2039 | * Get the zone mapping a chunk, if the chunk is mapped already. |
2040 | * If no mapping exist and the operation is WRITE, a zone is |
2041 | * allocated and used to map the chunk. |
2042 | * The zone returned will be set to the active state. |
2043 | */ |
2044 | struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, |
2045 | unsigned int chunk, enum req_op op) |
2046 | { |
2047 | struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT]; |
2048 | struct dmz_map *dmap = dmap_mblk->data; |
2049 | int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK; |
2050 | unsigned int dzone_id; |
2051 | struct dm_zone *dzone = NULL; |
2052 | int ret = 0; |
2053 | int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND; |
2054 | |
2055 | dmz_lock_map(zmd); |
2056 | again: |
2057 | /* Get the chunk mapping */ |
2058 | dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id); |
2059 | if (dzone_id == DMZ_MAP_UNMAPPED) { |
2060 | /* |
2061 | * Read or discard in unmapped chunks are fine. But for |
2062 | * writes, we need a mapping, so get one. |
2063 | */ |
2064 | if (op != REQ_OP_WRITE) |
2065 | goto out; |
2066 | |
2067 | /* Allocate a random zone */ |
2068 | dzone = dmz_alloc_zone(zmd, dev_idx: 0, flags: alloc_flags); |
2069 | if (!dzone) { |
2070 | if (dmz_dev_is_dying(zmd)) { |
2071 | dzone = ERR_PTR(error: -EIO); |
2072 | goto out; |
2073 | } |
2074 | dmz_wait_for_free_zones(zmd); |
2075 | goto again; |
2076 | } |
2077 | |
2078 | dmz_map_zone(zmd, zone: dzone, chunk); |
2079 | |
2080 | } else { |
2081 | /* The chunk is already mapped: get the mapping zone */ |
2082 | dzone = dmz_get(zmd, zone_id: dzone_id); |
2083 | if (!dzone) { |
2084 | dzone = ERR_PTR(error: -EIO); |
2085 | goto out; |
2086 | } |
2087 | if (dzone->chunk != chunk) { |
2088 | dzone = ERR_PTR(error: -EIO); |
2089 | goto out; |
2090 | } |
2091 | |
2092 | /* Repair write pointer if the sequential dzone has error */ |
2093 | if (dmz_seq_write_err(dzone)) { |
2094 | ret = dmz_handle_seq_write_err(zmd, zone: dzone); |
2095 | if (ret) { |
2096 | dzone = ERR_PTR(error: -EIO); |
2097 | goto out; |
2098 | } |
2099 | clear_bit(nr: DMZ_SEQ_WRITE_ERR, addr: &dzone->flags); |
2100 | } |
2101 | } |
2102 | |
2103 | /* |
2104 | * If the zone is being reclaimed, the chunk mapping may change |
2105 | * to a different zone. So wait for reclaim and retry. Otherwise, |
2106 | * activate the zone (this will prevent reclaim from touching it). |
2107 | */ |
2108 | if (dmz_in_reclaim(dzone)) { |
2109 | dmz_wait_for_reclaim(zmd, zone: dzone); |
2110 | goto again; |
2111 | } |
2112 | dmz_activate_zone(zone: dzone); |
2113 | dmz_lru_zone(zmd, zone: dzone); |
2114 | out: |
2115 | dmz_unlock_map(zmd); |
2116 | |
2117 | return dzone; |
2118 | } |
2119 | |
2120 | /* |
2121 | * Write and discard change the block validity of data zones and their buffer |
2122 | * zones. Check here that valid blocks are still present. If all blocks are |
2123 | * invalid, the zones can be unmapped on the fly without waiting for reclaim |
2124 | * to do it. |
2125 | */ |
2126 | void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone) |
2127 | { |
2128 | struct dm_zone *bzone; |
2129 | |
2130 | dmz_lock_map(zmd); |
2131 | |
2132 | bzone = dzone->bzone; |
2133 | if (bzone) { |
2134 | if (dmz_weight(bzone)) |
2135 | dmz_lru_zone(zmd, zone: bzone); |
2136 | else { |
2137 | /* Empty buffer zone: reclaim it */ |
2138 | dmz_unmap_zone(zmd, zone: bzone); |
2139 | dmz_free_zone(zmd, zone: bzone); |
2140 | bzone = NULL; |
2141 | } |
2142 | } |
2143 | |
2144 | /* Deactivate the data zone */ |
2145 | dmz_deactivate_zone(zone: dzone); |
2146 | if (dmz_is_active(zone: dzone) || bzone || dmz_weight(dzone)) |
2147 | dmz_lru_zone(zmd, zone: dzone); |
2148 | else { |
2149 | /* Unbuffered inactive empty data zone: reclaim it */ |
2150 | dmz_unmap_zone(zmd, zone: dzone); |
2151 | dmz_free_zone(zmd, zone: dzone); |
2152 | } |
2153 | |
2154 | dmz_unlock_map(zmd); |
2155 | } |
2156 | |
2157 | /* |
2158 | * Allocate and map a random zone to buffer a chunk |
2159 | * already mapped to a sequential zone. |
2160 | */ |
2161 | struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd, |
2162 | struct dm_zone *dzone) |
2163 | { |
2164 | struct dm_zone *bzone; |
2165 | int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND; |
2166 | |
2167 | dmz_lock_map(zmd); |
2168 | again: |
2169 | bzone = dzone->bzone; |
2170 | if (bzone) |
2171 | goto out; |
2172 | |
2173 | /* Allocate a random zone */ |
2174 | bzone = dmz_alloc_zone(zmd, dev_idx: 0, flags: alloc_flags); |
2175 | if (!bzone) { |
2176 | if (dmz_dev_is_dying(zmd)) { |
2177 | bzone = ERR_PTR(error: -EIO); |
2178 | goto out; |
2179 | } |
2180 | dmz_wait_for_free_zones(zmd); |
2181 | goto again; |
2182 | } |
2183 | |
2184 | /* Update the chunk mapping */ |
2185 | dmz_set_chunk_mapping(zmd, chunk: dzone->chunk, dzone_id: dzone->id, bzone_id: bzone->id); |
2186 | |
2187 | set_bit(nr: DMZ_BUF, addr: &bzone->flags); |
2188 | bzone->chunk = dzone->chunk; |
2189 | bzone->bzone = dzone; |
2190 | dzone->bzone = bzone; |
2191 | if (dmz_is_cache(bzone)) |
2192 | list_add_tail(new: &bzone->link, head: &zmd->map_cache_list); |
2193 | else |
2194 | list_add_tail(new: &bzone->link, head: &bzone->dev->map_rnd_list); |
2195 | out: |
2196 | dmz_unlock_map(zmd); |
2197 | |
2198 | return bzone; |
2199 | } |
2200 | |
2201 | /* |
2202 | * Get an unmapped (free) zone. |
2203 | * This must be called with the mapping lock held. |
2204 | */ |
2205 | struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx, |
2206 | unsigned long flags) |
2207 | { |
2208 | struct list_head *list; |
2209 | struct dm_zone *zone; |
2210 | int i; |
2211 | |
2212 | /* Schedule reclaim to ensure free zones are available */ |
2213 | if (!(flags & DMZ_ALLOC_RECLAIM)) { |
2214 | for (i = 0; i < zmd->nr_devs; i++) |
2215 | dmz_schedule_reclaim(zrc: zmd->dev[i].reclaim); |
2216 | } |
2217 | |
2218 | i = 0; |
2219 | again: |
2220 | if (flags & DMZ_ALLOC_CACHE) |
2221 | list = &zmd->unmap_cache_list; |
2222 | else if (flags & DMZ_ALLOC_RND) |
2223 | list = &zmd->dev[dev_idx].unmap_rnd_list; |
2224 | else |
2225 | list = &zmd->dev[dev_idx].unmap_seq_list; |
2226 | |
2227 | if (list_empty(head: list)) { |
2228 | /* |
2229 | * No free zone: return NULL if this is for not reclaim. |
2230 | */ |
2231 | if (!(flags & DMZ_ALLOC_RECLAIM)) |
2232 | return NULL; |
2233 | /* |
2234 | * Try to allocate from other devices |
2235 | */ |
2236 | if (i < zmd->nr_devs) { |
2237 | dev_idx = (dev_idx + 1) % zmd->nr_devs; |
2238 | i++; |
2239 | goto again; |
2240 | } |
2241 | |
2242 | /* |
2243 | * Fallback to the reserved sequential zones |
2244 | */ |
2245 | zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list, |
2246 | struct dm_zone, link); |
2247 | if (zone) { |
2248 | list_del_init(entry: &zone->link); |
2249 | atomic_dec(v: &zmd->nr_reserved_seq_zones); |
2250 | } |
2251 | return zone; |
2252 | } |
2253 | |
2254 | zone = list_first_entry(list, struct dm_zone, link); |
2255 | list_del_init(entry: &zone->link); |
2256 | |
2257 | if (dmz_is_cache(zone)) |
2258 | atomic_dec(v: &zmd->unmap_nr_cache); |
2259 | else if (dmz_is_rnd(zone)) |
2260 | atomic_dec(v: &zone->dev->unmap_nr_rnd); |
2261 | else |
2262 | atomic_dec(v: &zone->dev->unmap_nr_seq); |
2263 | |
2264 | if (dmz_is_offline(zone)) { |
2265 | dmz_zmd_warn(zmd, "Zone %u is offline" , zone->id); |
2266 | zone = NULL; |
2267 | goto again; |
2268 | } |
2269 | if (dmz_is_meta(zone)) { |
2270 | dmz_zmd_warn(zmd, "Zone %u has metadata" , zone->id); |
2271 | zone = NULL; |
2272 | goto again; |
2273 | } |
2274 | return zone; |
2275 | } |
2276 | |
2277 | /* |
2278 | * Free a zone. |
2279 | * This must be called with the mapping lock held. |
2280 | */ |
2281 | void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
2282 | { |
2283 | /* If this is a sequential zone, reset it */ |
2284 | if (dmz_is_seq(zone)) |
2285 | dmz_reset_zone(zmd, zone); |
2286 | |
2287 | /* Return the zone to its type unmap list */ |
2288 | if (dmz_is_cache(zone)) { |
2289 | list_add_tail(new: &zone->link, head: &zmd->unmap_cache_list); |
2290 | atomic_inc(v: &zmd->unmap_nr_cache); |
2291 | } else if (dmz_is_rnd(zone)) { |
2292 | list_add_tail(new: &zone->link, head: &zone->dev->unmap_rnd_list); |
2293 | atomic_inc(v: &zone->dev->unmap_nr_rnd); |
2294 | } else if (dmz_is_reserved(zone)) { |
2295 | list_add_tail(new: &zone->link, head: &zmd->reserved_seq_zones_list); |
2296 | atomic_inc(v: &zmd->nr_reserved_seq_zones); |
2297 | } else { |
2298 | list_add_tail(new: &zone->link, head: &zone->dev->unmap_seq_list); |
2299 | atomic_inc(v: &zone->dev->unmap_nr_seq); |
2300 | } |
2301 | |
2302 | wake_up_all(&zmd->free_wq); |
2303 | } |
2304 | |
2305 | /* |
2306 | * Map a chunk to a zone. |
2307 | * This must be called with the mapping lock held. |
2308 | */ |
2309 | void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone, |
2310 | unsigned int chunk) |
2311 | { |
2312 | /* Set the chunk mapping */ |
2313 | dmz_set_chunk_mapping(zmd, chunk, dzone_id: dzone->id, |
2314 | DMZ_MAP_UNMAPPED); |
2315 | dzone->chunk = chunk; |
2316 | if (dmz_is_cache(dzone)) |
2317 | list_add_tail(new: &dzone->link, head: &zmd->map_cache_list); |
2318 | else if (dmz_is_rnd(dzone)) |
2319 | list_add_tail(new: &dzone->link, head: &dzone->dev->map_rnd_list); |
2320 | else |
2321 | list_add_tail(new: &dzone->link, head: &dzone->dev->map_seq_list); |
2322 | } |
2323 | |
2324 | /* |
2325 | * Unmap a zone. |
2326 | * This must be called with the mapping lock held. |
2327 | */ |
2328 | void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone) |
2329 | { |
2330 | unsigned int chunk = zone->chunk; |
2331 | unsigned int dzone_id; |
2332 | |
2333 | if (chunk == DMZ_MAP_UNMAPPED) { |
2334 | /* Already unmapped */ |
2335 | return; |
2336 | } |
2337 | |
2338 | if (test_and_clear_bit(nr: DMZ_BUF, addr: &zone->flags)) { |
2339 | /* |
2340 | * Unmapping the chunk buffer zone: clear only |
2341 | * the chunk buffer mapping |
2342 | */ |
2343 | dzone_id = zone->bzone->id; |
2344 | zone->bzone->bzone = NULL; |
2345 | zone->bzone = NULL; |
2346 | |
2347 | } else { |
2348 | /* |
2349 | * Unmapping the chunk data zone: the zone must |
2350 | * not be buffered. |
2351 | */ |
2352 | if (WARN_ON(zone->bzone)) { |
2353 | zone->bzone->bzone = NULL; |
2354 | zone->bzone = NULL; |
2355 | } |
2356 | dzone_id = DMZ_MAP_UNMAPPED; |
2357 | } |
2358 | |
2359 | dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED); |
2360 | |
2361 | zone->chunk = DMZ_MAP_UNMAPPED; |
2362 | list_del_init(entry: &zone->link); |
2363 | } |
2364 | |
2365 | /* |
2366 | * Set @nr_bits bits in @bitmap starting from @bit. |
2367 | * Return the number of bits changed from 0 to 1. |
2368 | */ |
2369 | static unsigned int dmz_set_bits(unsigned long *bitmap, |
2370 | unsigned int bit, unsigned int nr_bits) |
2371 | { |
2372 | unsigned long *addr; |
2373 | unsigned int end = bit + nr_bits; |
2374 | unsigned int n = 0; |
2375 | |
2376 | while (bit < end) { |
2377 | if (((bit & (BITS_PER_LONG - 1)) == 0) && |
2378 | ((end - bit) >= BITS_PER_LONG)) { |
2379 | /* Try to set the whole word at once */ |
2380 | addr = bitmap + BIT_WORD(bit); |
2381 | if (*addr == 0) { |
2382 | *addr = ULONG_MAX; |
2383 | n += BITS_PER_LONG; |
2384 | bit += BITS_PER_LONG; |
2385 | continue; |
2386 | } |
2387 | } |
2388 | |
2389 | if (!test_and_set_bit(nr: bit, addr: bitmap)) |
2390 | n++; |
2391 | bit++; |
2392 | } |
2393 | |
2394 | return n; |
2395 | } |
2396 | |
2397 | /* |
2398 | * Get the bitmap block storing the bit for chunk_block in zone. |
2399 | */ |
2400 | static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd, |
2401 | struct dm_zone *zone, |
2402 | sector_t chunk_block) |
2403 | { |
2404 | sector_t bitmap_block = 1 + zmd->nr_map_blocks + |
2405 | (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) + |
2406 | (chunk_block >> DMZ_BLOCK_SHIFT_BITS); |
2407 | |
2408 | return dmz_get_mblock(zmd, mblk_no: bitmap_block); |
2409 | } |
2410 | |
2411 | /* |
2412 | * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone. |
2413 | */ |
2414 | int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, |
2415 | struct dm_zone *to_zone) |
2416 | { |
2417 | struct dmz_mblock *from_mblk, *to_mblk; |
2418 | sector_t chunk_block = 0; |
2419 | |
2420 | /* Get the zones bitmap blocks */ |
2421 | while (chunk_block < zmd->zone_nr_blocks) { |
2422 | from_mblk = dmz_get_bitmap(zmd, zone: from_zone, chunk_block); |
2423 | if (IS_ERR(ptr: from_mblk)) |
2424 | return PTR_ERR(ptr: from_mblk); |
2425 | to_mblk = dmz_get_bitmap(zmd, zone: to_zone, chunk_block); |
2426 | if (IS_ERR(ptr: to_mblk)) { |
2427 | dmz_release_mblock(zmd, mblk: from_mblk); |
2428 | return PTR_ERR(ptr: to_mblk); |
2429 | } |
2430 | |
2431 | memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE); |
2432 | dmz_dirty_mblock(zmd, mblk: to_mblk); |
2433 | |
2434 | dmz_release_mblock(zmd, mblk: to_mblk); |
2435 | dmz_release_mblock(zmd, mblk: from_mblk); |
2436 | |
2437 | chunk_block += zmd->zone_bits_per_mblk; |
2438 | } |
2439 | |
2440 | to_zone->weight = from_zone->weight; |
2441 | |
2442 | return 0; |
2443 | } |
2444 | |
2445 | /* |
2446 | * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone, |
2447 | * starting from chunk_block. |
2448 | */ |
2449 | int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone, |
2450 | struct dm_zone *to_zone, sector_t chunk_block) |
2451 | { |
2452 | unsigned int nr_blocks; |
2453 | int ret; |
2454 | |
2455 | /* Get the zones bitmap blocks */ |
2456 | while (chunk_block < zmd->zone_nr_blocks) { |
2457 | /* Get a valid region from the source zone */ |
2458 | ret = dmz_first_valid_block(zmd, zone: from_zone, chunk_block: &chunk_block); |
2459 | if (ret <= 0) |
2460 | return ret; |
2461 | |
2462 | nr_blocks = ret; |
2463 | ret = dmz_validate_blocks(zmd, zone: to_zone, chunk_block, nr_blocks); |
2464 | if (ret) |
2465 | return ret; |
2466 | |
2467 | chunk_block += nr_blocks; |
2468 | } |
2469 | |
2470 | return 0; |
2471 | } |
2472 | |
2473 | /* |
2474 | * Validate all the blocks in the range [block..block+nr_blocks-1]. |
2475 | */ |
2476 | int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, |
2477 | sector_t chunk_block, unsigned int nr_blocks) |
2478 | { |
2479 | unsigned int count, bit, nr_bits; |
2480 | unsigned int zone_nr_blocks = zmd->zone_nr_blocks; |
2481 | struct dmz_mblock *mblk; |
2482 | unsigned int n = 0; |
2483 | |
2484 | dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks" , |
2485 | zone->id, (unsigned long long)chunk_block, |
2486 | nr_blocks); |
2487 | |
2488 | WARN_ON(chunk_block + nr_blocks > zone_nr_blocks); |
2489 | |
2490 | while (nr_blocks) { |
2491 | /* Get bitmap block */ |
2492 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); |
2493 | if (IS_ERR(ptr: mblk)) |
2494 | return PTR_ERR(ptr: mblk); |
2495 | |
2496 | /* Set bits */ |
2497 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; |
2498 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
2499 | |
2500 | count = dmz_set_bits(bitmap: (unsigned long *)mblk->data, bit, nr_bits); |
2501 | if (count) { |
2502 | dmz_dirty_mblock(zmd, mblk); |
2503 | n += count; |
2504 | } |
2505 | dmz_release_mblock(zmd, mblk); |
2506 | |
2507 | nr_blocks -= nr_bits; |
2508 | chunk_block += nr_bits; |
2509 | } |
2510 | |
2511 | if (likely(zone->weight + n <= zone_nr_blocks)) |
2512 | zone->weight += n; |
2513 | else { |
2514 | dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u" , |
2515 | zone->id, zone->weight, |
2516 | zone_nr_blocks - n); |
2517 | zone->weight = zone_nr_blocks; |
2518 | } |
2519 | |
2520 | return 0; |
2521 | } |
2522 | |
2523 | /* |
2524 | * Clear nr_bits bits in bitmap starting from bit. |
2525 | * Return the number of bits cleared. |
2526 | */ |
2527 | static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits) |
2528 | { |
2529 | unsigned long *addr; |
2530 | int end = bit + nr_bits; |
2531 | int n = 0; |
2532 | |
2533 | while (bit < end) { |
2534 | if (((bit & (BITS_PER_LONG - 1)) == 0) && |
2535 | ((end - bit) >= BITS_PER_LONG)) { |
2536 | /* Try to clear whole word at once */ |
2537 | addr = bitmap + BIT_WORD(bit); |
2538 | if (*addr == ULONG_MAX) { |
2539 | *addr = 0; |
2540 | n += BITS_PER_LONG; |
2541 | bit += BITS_PER_LONG; |
2542 | continue; |
2543 | } |
2544 | } |
2545 | |
2546 | if (test_and_clear_bit(nr: bit, addr: bitmap)) |
2547 | n++; |
2548 | bit++; |
2549 | } |
2550 | |
2551 | return n; |
2552 | } |
2553 | |
2554 | /* |
2555 | * Invalidate all the blocks in the range [block..block+nr_blocks-1]. |
2556 | */ |
2557 | int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone, |
2558 | sector_t chunk_block, unsigned int nr_blocks) |
2559 | { |
2560 | unsigned int count, bit, nr_bits; |
2561 | struct dmz_mblock *mblk; |
2562 | unsigned int n = 0; |
2563 | |
2564 | dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks" , |
2565 | zone->id, (u64)chunk_block, nr_blocks); |
2566 | |
2567 | WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks); |
2568 | |
2569 | while (nr_blocks) { |
2570 | /* Get bitmap block */ |
2571 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); |
2572 | if (IS_ERR(ptr: mblk)) |
2573 | return PTR_ERR(ptr: mblk); |
2574 | |
2575 | /* Clear bits */ |
2576 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; |
2577 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
2578 | |
2579 | count = dmz_clear_bits(bitmap: (unsigned long *)mblk->data, |
2580 | bit, nr_bits); |
2581 | if (count) { |
2582 | dmz_dirty_mblock(zmd, mblk); |
2583 | n += count; |
2584 | } |
2585 | dmz_release_mblock(zmd, mblk); |
2586 | |
2587 | nr_blocks -= nr_bits; |
2588 | chunk_block += nr_bits; |
2589 | } |
2590 | |
2591 | if (zone->weight >= n) |
2592 | zone->weight -= n; |
2593 | else { |
2594 | dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u" , |
2595 | zone->id, zone->weight, n); |
2596 | zone->weight = 0; |
2597 | } |
2598 | |
2599 | return 0; |
2600 | } |
2601 | |
2602 | /* |
2603 | * Get a block bit value. |
2604 | */ |
2605 | static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone, |
2606 | sector_t chunk_block) |
2607 | { |
2608 | struct dmz_mblock *mblk; |
2609 | int ret; |
2610 | |
2611 | WARN_ON(chunk_block >= zmd->zone_nr_blocks); |
2612 | |
2613 | /* Get bitmap block */ |
2614 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); |
2615 | if (IS_ERR(ptr: mblk)) |
2616 | return PTR_ERR(ptr: mblk); |
2617 | |
2618 | /* Get offset */ |
2619 | ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS, |
2620 | (unsigned long *) mblk->data) != 0; |
2621 | |
2622 | dmz_release_mblock(zmd, mblk); |
2623 | |
2624 | return ret; |
2625 | } |
2626 | |
2627 | /* |
2628 | * Return the number of blocks from chunk_block to the first block with a bit |
2629 | * value specified by set. Search at most nr_blocks blocks from chunk_block. |
2630 | */ |
2631 | static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone, |
2632 | sector_t chunk_block, unsigned int nr_blocks, |
2633 | int set) |
2634 | { |
2635 | struct dmz_mblock *mblk; |
2636 | unsigned int bit, set_bit, nr_bits; |
2637 | unsigned int zone_bits = zmd->zone_bits_per_mblk; |
2638 | unsigned long *bitmap; |
2639 | int n = 0; |
2640 | |
2641 | WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks); |
2642 | |
2643 | while (nr_blocks) { |
2644 | /* Get bitmap block */ |
2645 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); |
2646 | if (IS_ERR(ptr: mblk)) |
2647 | return PTR_ERR(ptr: mblk); |
2648 | |
2649 | /* Get offset */ |
2650 | bitmap = (unsigned long *) mblk->data; |
2651 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; |
2652 | nr_bits = min(nr_blocks, zone_bits - bit); |
2653 | if (set) |
2654 | set_bit = find_next_bit(addr: bitmap, size: zone_bits, offset: bit); |
2655 | else |
2656 | set_bit = find_next_zero_bit(addr: bitmap, size: zone_bits, offset: bit); |
2657 | dmz_release_mblock(zmd, mblk); |
2658 | |
2659 | n += set_bit - bit; |
2660 | if (set_bit < zone_bits) |
2661 | break; |
2662 | |
2663 | nr_blocks -= nr_bits; |
2664 | chunk_block += nr_bits; |
2665 | } |
2666 | |
2667 | return n; |
2668 | } |
2669 | |
2670 | /* |
2671 | * Test if chunk_block is valid. If it is, the number of consecutive |
2672 | * valid blocks from chunk_block will be returned. |
2673 | */ |
2674 | int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone, |
2675 | sector_t chunk_block) |
2676 | { |
2677 | int valid; |
2678 | |
2679 | valid = dmz_test_block(zmd, zone, chunk_block); |
2680 | if (valid <= 0) |
2681 | return valid; |
2682 | |
2683 | /* The block is valid: get the number of valid blocks from block */ |
2684 | return dmz_to_next_set_block(zmd, zone, chunk_block, |
2685 | nr_blocks: zmd->zone_nr_blocks - chunk_block, set: 0); |
2686 | } |
2687 | |
2688 | /* |
2689 | * Find the first valid block from @chunk_block in @zone. |
2690 | * If such a block is found, its number is returned using |
2691 | * @chunk_block and the total number of valid blocks from @chunk_block |
2692 | * is returned. |
2693 | */ |
2694 | int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone, |
2695 | sector_t *chunk_block) |
2696 | { |
2697 | sector_t start_block = *chunk_block; |
2698 | int ret; |
2699 | |
2700 | ret = dmz_to_next_set_block(zmd, zone, chunk_block: start_block, |
2701 | nr_blocks: zmd->zone_nr_blocks - start_block, set: 1); |
2702 | if (ret < 0) |
2703 | return ret; |
2704 | |
2705 | start_block += ret; |
2706 | *chunk_block = start_block; |
2707 | |
2708 | return dmz_to_next_set_block(zmd, zone, chunk_block: start_block, |
2709 | nr_blocks: zmd->zone_nr_blocks - start_block, set: 0); |
2710 | } |
2711 | |
2712 | /* |
2713 | * Count the number of bits set starting from bit up to bit + nr_bits - 1. |
2714 | */ |
2715 | static int dmz_count_bits(void *bitmap, int bit, int nr_bits) |
2716 | { |
2717 | unsigned long *addr; |
2718 | int end = bit + nr_bits; |
2719 | int n = 0; |
2720 | |
2721 | while (bit < end) { |
2722 | if (((bit & (BITS_PER_LONG - 1)) == 0) && |
2723 | ((end - bit) >= BITS_PER_LONG)) { |
2724 | addr = (unsigned long *)bitmap + BIT_WORD(bit); |
2725 | if (*addr == ULONG_MAX) { |
2726 | n += BITS_PER_LONG; |
2727 | bit += BITS_PER_LONG; |
2728 | continue; |
2729 | } |
2730 | } |
2731 | |
2732 | if (test_bit(bit, bitmap)) |
2733 | n++; |
2734 | bit++; |
2735 | } |
2736 | |
2737 | return n; |
2738 | } |
2739 | |
2740 | /* |
2741 | * Get a zone weight. |
2742 | */ |
2743 | static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone) |
2744 | { |
2745 | struct dmz_mblock *mblk; |
2746 | sector_t chunk_block = 0; |
2747 | unsigned int bit, nr_bits; |
2748 | unsigned int nr_blocks = zmd->zone_nr_blocks; |
2749 | void *bitmap; |
2750 | int n = 0; |
2751 | |
2752 | while (nr_blocks) { |
2753 | /* Get bitmap block */ |
2754 | mblk = dmz_get_bitmap(zmd, zone, chunk_block); |
2755 | if (IS_ERR(ptr: mblk)) { |
2756 | n = 0; |
2757 | break; |
2758 | } |
2759 | |
2760 | /* Count bits in this block */ |
2761 | bitmap = mblk->data; |
2762 | bit = chunk_block & DMZ_BLOCK_MASK_BITS; |
2763 | nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit); |
2764 | n += dmz_count_bits(bitmap, bit, nr_bits); |
2765 | |
2766 | dmz_release_mblock(zmd, mblk); |
2767 | |
2768 | nr_blocks -= nr_bits; |
2769 | chunk_block += nr_bits; |
2770 | } |
2771 | |
2772 | zone->weight = n; |
2773 | } |
2774 | |
2775 | /* |
2776 | * Cleanup the zoned metadata resources. |
2777 | */ |
2778 | static void dmz_cleanup_metadata(struct dmz_metadata *zmd) |
2779 | { |
2780 | struct rb_root *root; |
2781 | struct dmz_mblock *mblk, *next; |
2782 | int i; |
2783 | |
2784 | /* Release zone mapping resources */ |
2785 | if (zmd->map_mblk) { |
2786 | for (i = 0; i < zmd->nr_map_blocks; i++) |
2787 | dmz_release_mblock(zmd, mblk: zmd->map_mblk[i]); |
2788 | kfree(objp: zmd->map_mblk); |
2789 | zmd->map_mblk = NULL; |
2790 | } |
2791 | |
2792 | /* Release super blocks */ |
2793 | for (i = 0; i < 2; i++) { |
2794 | if (zmd->sb[i].mblk) { |
2795 | dmz_free_mblock(zmd, mblk: zmd->sb[i].mblk); |
2796 | zmd->sb[i].mblk = NULL; |
2797 | } |
2798 | } |
2799 | |
2800 | /* Free cached blocks */ |
2801 | while (!list_empty(head: &zmd->mblk_dirty_list)) { |
2802 | mblk = list_first_entry(&zmd->mblk_dirty_list, |
2803 | struct dmz_mblock, link); |
2804 | dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)" , |
2805 | (u64)mblk->no, mblk->ref); |
2806 | list_del_init(entry: &mblk->link); |
2807 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
2808 | dmz_free_mblock(zmd, mblk); |
2809 | } |
2810 | |
2811 | while (!list_empty(head: &zmd->mblk_lru_list)) { |
2812 | mblk = list_first_entry(&zmd->mblk_lru_list, |
2813 | struct dmz_mblock, link); |
2814 | list_del_init(entry: &mblk->link); |
2815 | rb_erase(&mblk->node, &zmd->mblk_rbtree); |
2816 | dmz_free_mblock(zmd, mblk); |
2817 | } |
2818 | |
2819 | /* Sanity checks: the mblock rbtree should now be empty */ |
2820 | root = &zmd->mblk_rbtree; |
2821 | rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { |
2822 | dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree" , |
2823 | (u64)mblk->no, mblk->ref); |
2824 | mblk->ref = 0; |
2825 | dmz_free_mblock(zmd, mblk); |
2826 | } |
2827 | |
2828 | /* Free the zone descriptors */ |
2829 | dmz_drop_zones(zmd); |
2830 | |
2831 | mutex_destroy(lock: &zmd->mblk_flush_lock); |
2832 | mutex_destroy(lock: &zmd->map_lock); |
2833 | } |
2834 | |
2835 | static void dmz_print_dev(struct dmz_metadata *zmd, int num) |
2836 | { |
2837 | struct dmz_dev *dev = &zmd->dev[num]; |
2838 | |
2839 | if (bdev_zoned_model(bdev: dev->bdev) == BLK_ZONED_NONE) |
2840 | dmz_dev_info(dev, "Regular block device" ); |
2841 | else |
2842 | dmz_dev_info(dev, "Host-%s zoned block device" , |
2843 | bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ? |
2844 | "aware" : "managed" ); |
2845 | if (zmd->sb_version > 1) { |
2846 | sector_t sector_offset = |
2847 | dev->zone_offset << zmd->zone_nr_sectors_shift; |
2848 | |
2849 | dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)" , |
2850 | (u64)dev->capacity, (u64)sector_offset); |
2851 | dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)" , |
2852 | dev->nr_zones, (u64)zmd->zone_nr_sectors, |
2853 | (u64)dev->zone_offset); |
2854 | } else { |
2855 | dmz_dev_info(dev, " %llu 512-byte logical sectors" , |
2856 | (u64)dev->capacity); |
2857 | dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors" , |
2858 | dev->nr_zones, (u64)zmd->zone_nr_sectors); |
2859 | } |
2860 | } |
2861 | |
2862 | /* |
2863 | * Initialize the zoned metadata. |
2864 | */ |
2865 | int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev, |
2866 | struct dmz_metadata **metadata, |
2867 | const char *devname) |
2868 | { |
2869 | struct dmz_metadata *zmd; |
2870 | unsigned int i; |
2871 | struct dm_zone *zone; |
2872 | int ret; |
2873 | |
2874 | zmd = kzalloc(size: sizeof(struct dmz_metadata), GFP_KERNEL); |
2875 | if (!zmd) |
2876 | return -ENOMEM; |
2877 | |
2878 | strcpy(p: zmd->devname, q: devname); |
2879 | zmd->dev = dev; |
2880 | zmd->nr_devs = num_dev; |
2881 | zmd->mblk_rbtree = RB_ROOT; |
2882 | init_rwsem(&zmd->mblk_sem); |
2883 | mutex_init(&zmd->mblk_flush_lock); |
2884 | spin_lock_init(&zmd->mblk_lock); |
2885 | INIT_LIST_HEAD(list: &zmd->mblk_lru_list); |
2886 | INIT_LIST_HEAD(list: &zmd->mblk_dirty_list); |
2887 | |
2888 | mutex_init(&zmd->map_lock); |
2889 | |
2890 | atomic_set(v: &zmd->unmap_nr_cache, i: 0); |
2891 | INIT_LIST_HEAD(list: &zmd->unmap_cache_list); |
2892 | INIT_LIST_HEAD(list: &zmd->map_cache_list); |
2893 | |
2894 | atomic_set(v: &zmd->nr_reserved_seq_zones, i: 0); |
2895 | INIT_LIST_HEAD(list: &zmd->reserved_seq_zones_list); |
2896 | |
2897 | init_waitqueue_head(&zmd->free_wq); |
2898 | |
2899 | /* Initialize zone descriptors */ |
2900 | ret = dmz_init_zones(zmd); |
2901 | if (ret) |
2902 | goto err; |
2903 | |
2904 | /* Get super block */ |
2905 | ret = dmz_load_sb(zmd); |
2906 | if (ret) |
2907 | goto err; |
2908 | |
2909 | /* Set metadata zones starting from sb_zone */ |
2910 | for (i = 0; i < zmd->nr_meta_zones << 1; i++) { |
2911 | zone = dmz_get(zmd, zone_id: zmd->sb[0].zone->id + i); |
2912 | if (!zone) { |
2913 | dmz_zmd_err(zmd, |
2914 | "metadata zone %u not present" , i); |
2915 | ret = -ENXIO; |
2916 | goto err; |
2917 | } |
2918 | if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) { |
2919 | dmz_zmd_err(zmd, |
2920 | "metadata zone %d is not random" , i); |
2921 | ret = -ENXIO; |
2922 | goto err; |
2923 | } |
2924 | set_bit(nr: DMZ_META, addr: &zone->flags); |
2925 | } |
2926 | /* Load mapping table */ |
2927 | ret = dmz_load_mapping(zmd); |
2928 | if (ret) |
2929 | goto err; |
2930 | |
2931 | /* |
2932 | * Cache size boundaries: allow at least 2 super blocks, the chunk map |
2933 | * blocks and enough blocks to be able to cache the bitmap blocks of |
2934 | * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow |
2935 | * the cache to add 512 more metadata blocks. |
2936 | */ |
2937 | zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16; |
2938 | zmd->max_nr_mblks = zmd->min_nr_mblks + 512; |
2939 | |
2940 | /* Metadata cache shrinker */ |
2941 | zmd->mblk_shrinker = shrinker_alloc(flags: 0, fmt: "dm-zoned-meta:(%u:%u)" , |
2942 | MAJOR(dev->bdev->bd_dev), |
2943 | MINOR(dev->bdev->bd_dev)); |
2944 | if (!zmd->mblk_shrinker) { |
2945 | ret = -ENOMEM; |
2946 | dmz_zmd_err(zmd, "Allocate metadata cache shrinker failed" ); |
2947 | goto err; |
2948 | } |
2949 | |
2950 | zmd->mblk_shrinker->count_objects = dmz_mblock_shrinker_count; |
2951 | zmd->mblk_shrinker->scan_objects = dmz_mblock_shrinker_scan; |
2952 | zmd->mblk_shrinker->private_data = zmd; |
2953 | |
2954 | shrinker_register(shrinker: zmd->mblk_shrinker); |
2955 | |
2956 | dmz_zmd_info(zmd, "DM-Zoned metadata version %d" , zmd->sb_version); |
2957 | for (i = 0; i < zmd->nr_devs; i++) |
2958 | dmz_print_dev(zmd, num: i); |
2959 | |
2960 | dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors" , |
2961 | zmd->nr_zones, (u64)zmd->zone_nr_sectors); |
2962 | dmz_zmd_debug(zmd, " %u metadata zones" , |
2963 | zmd->nr_meta_zones * 2); |
2964 | dmz_zmd_debug(zmd, " %u data zones for %u chunks" , |
2965 | zmd->nr_data_zones, zmd->nr_chunks); |
2966 | dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)" , |
2967 | zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache)); |
2968 | for (i = 0; i < zmd->nr_devs; i++) { |
2969 | dmz_zmd_debug(zmd, " %u random zones (%u unmapped)" , |
2970 | dmz_nr_rnd_zones(zmd, i), |
2971 | dmz_nr_unmap_rnd_zones(zmd, i)); |
2972 | dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)" , |
2973 | dmz_nr_seq_zones(zmd, i), |
2974 | dmz_nr_unmap_seq_zones(zmd, i)); |
2975 | } |
2976 | dmz_zmd_debug(zmd, " %u reserved sequential data zones" , |
2977 | zmd->nr_reserved_seq); |
2978 | dmz_zmd_debug(zmd, "Format:" ); |
2979 | dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)" , |
2980 | zmd->nr_meta_blocks, zmd->max_nr_mblks); |
2981 | dmz_zmd_debug(zmd, " %u data zone mapping blocks" , |
2982 | zmd->nr_map_blocks); |
2983 | dmz_zmd_debug(zmd, " %u bitmap blocks" , |
2984 | zmd->nr_bitmap_blocks); |
2985 | |
2986 | *metadata = zmd; |
2987 | |
2988 | return 0; |
2989 | err: |
2990 | dmz_cleanup_metadata(zmd); |
2991 | kfree(objp: zmd); |
2992 | *metadata = NULL; |
2993 | |
2994 | return ret; |
2995 | } |
2996 | |
2997 | /* |
2998 | * Cleanup the zoned metadata resources. |
2999 | */ |
3000 | void dmz_dtr_metadata(struct dmz_metadata *zmd) |
3001 | { |
3002 | shrinker_free(shrinker: zmd->mblk_shrinker); |
3003 | dmz_cleanup_metadata(zmd); |
3004 | kfree(objp: zmd); |
3005 | } |
3006 | |
3007 | /* |
3008 | * Check zone information on resume. |
3009 | */ |
3010 | int dmz_resume_metadata(struct dmz_metadata *zmd) |
3011 | { |
3012 | struct dm_zone *zone; |
3013 | sector_t wp_block; |
3014 | unsigned int i; |
3015 | int ret; |
3016 | |
3017 | /* Check zones */ |
3018 | for (i = 0; i < zmd->nr_zones; i++) { |
3019 | zone = dmz_get(zmd, zone_id: i); |
3020 | if (!zone) { |
3021 | dmz_zmd_err(zmd, "Unable to get zone %u" , i); |
3022 | return -EIO; |
3023 | } |
3024 | wp_block = zone->wp_block; |
3025 | |
3026 | ret = dmz_update_zone(zmd, zone); |
3027 | if (ret) { |
3028 | dmz_zmd_err(zmd, "Broken zone %u" , i); |
3029 | return ret; |
3030 | } |
3031 | |
3032 | if (dmz_is_offline(zone)) { |
3033 | dmz_zmd_warn(zmd, "Zone %u is offline" , i); |
3034 | continue; |
3035 | } |
3036 | |
3037 | /* Check write pointer */ |
3038 | if (!dmz_is_seq(zone)) |
3039 | zone->wp_block = 0; |
3040 | else if (zone->wp_block != wp_block) { |
3041 | dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)" , |
3042 | i, (u64)zone->wp_block, (u64)wp_block); |
3043 | zone->wp_block = wp_block; |
3044 | dmz_invalidate_blocks(zmd, zone, chunk_block: zone->wp_block, |
3045 | nr_blocks: zmd->zone_nr_blocks - zone->wp_block); |
3046 | } |
3047 | } |
3048 | |
3049 | return 0; |
3050 | } |
3051 | |