1 | // SPDX-License-Identifier: GPL-2.0 |
2 | |
3 | #include <linux/bitops.h> |
4 | #include <linux/slab.h> |
5 | #include <linux/blkdev.h> |
6 | #include <linux/sched/mm.h> |
7 | #include <linux/atomic.h> |
8 | #include <linux/vmalloc.h> |
9 | #include "ctree.h" |
10 | #include "volumes.h" |
11 | #include "zoned.h" |
12 | #include "rcu-string.h" |
13 | #include "disk-io.h" |
14 | #include "block-group.h" |
15 | #include "dev-replace.h" |
16 | #include "space-info.h" |
17 | #include "fs.h" |
18 | #include "accessors.h" |
19 | #include "bio.h" |
20 | |
21 | /* Maximum number of zones to report per blkdev_report_zones() call */ |
22 | #define BTRFS_REPORT_NR_ZONES 4096 |
23 | /* Invalid allocation pointer value for missing devices */ |
24 | #define WP_MISSING_DEV ((u64)-1) |
25 | /* Pseudo write pointer value for conventional zone */ |
26 | #define WP_CONVENTIONAL ((u64)-2) |
27 | |
28 | /* |
29 | * Location of the first zone of superblock logging zone pairs. |
30 | * |
31 | * - primary superblock: 0B (zone 0) |
32 | * - first copy: 512G (zone starting at that offset) |
33 | * - second copy: 4T (zone starting at that offset) |
34 | */ |
35 | #define BTRFS_SB_LOG_PRIMARY_OFFSET (0ULL) |
36 | #define BTRFS_SB_LOG_FIRST_OFFSET (512ULL * SZ_1G) |
37 | #define BTRFS_SB_LOG_SECOND_OFFSET (4096ULL * SZ_1G) |
38 | |
39 | #define BTRFS_SB_LOG_FIRST_SHIFT const_ilog2(BTRFS_SB_LOG_FIRST_OFFSET) |
40 | #define BTRFS_SB_LOG_SECOND_SHIFT const_ilog2(BTRFS_SB_LOG_SECOND_OFFSET) |
41 | |
42 | /* Number of superblock log zones */ |
43 | #define BTRFS_NR_SB_LOG_ZONES 2 |
44 | |
45 | /* |
46 | * Minimum of active zones we need: |
47 | * |
48 | * - BTRFS_SUPER_MIRROR_MAX zones for superblock mirrors |
49 | * - 3 zones to ensure at least one zone per SYSTEM, META and DATA block group |
50 | * - 1 zone for tree-log dedicated block group |
51 | * - 1 zone for relocation |
52 | */ |
53 | #define BTRFS_MIN_ACTIVE_ZONES (BTRFS_SUPER_MIRROR_MAX + 5) |
54 | |
55 | /* |
56 | * Minimum / maximum supported zone size. Currently, SMR disks have a zone |
57 | * size of 256MiB, and we are expecting ZNS drives to be in the 1-4GiB range. |
58 | * We do not expect the zone size to become larger than 8GiB or smaller than |
59 | * 4MiB in the near future. |
60 | */ |
61 | #define BTRFS_MAX_ZONE_SIZE SZ_8G |
62 | #define BTRFS_MIN_ZONE_SIZE SZ_4M |
63 | |
64 | #define SUPER_INFO_SECTORS ((u64)BTRFS_SUPER_INFO_SIZE >> SECTOR_SHIFT) |
65 | |
66 | static void wait_eb_writebacks(struct btrfs_block_group *block_group); |
67 | static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written); |
68 | |
69 | static inline bool sb_zone_is_full(const struct blk_zone *zone) |
70 | { |
71 | return (zone->cond == BLK_ZONE_COND_FULL) || |
72 | (zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity); |
73 | } |
74 | |
75 | static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data) |
76 | { |
77 | struct blk_zone *zones = data; |
78 | |
79 | memcpy(&zones[idx], zone, sizeof(*zone)); |
80 | |
81 | return 0; |
82 | } |
83 | |
84 | static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, |
85 | u64 *wp_ret) |
86 | { |
87 | bool empty[BTRFS_NR_SB_LOG_ZONES]; |
88 | bool full[BTRFS_NR_SB_LOG_ZONES]; |
89 | sector_t sector; |
90 | int i; |
91 | |
92 | for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { |
93 | ASSERT(zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL); |
94 | empty[i] = (zones[i].cond == BLK_ZONE_COND_EMPTY); |
95 | full[i] = sb_zone_is_full(zone: &zones[i]); |
96 | } |
97 | |
98 | /* |
99 | * Possible states of log buffer zones |
100 | * |
101 | * Empty[0] In use[0] Full[0] |
102 | * Empty[1] * 0 1 |
103 | * In use[1] x x 1 |
104 | * Full[1] 0 0 C |
105 | * |
106 | * Log position: |
107 | * *: Special case, no superblock is written |
108 | * 0: Use write pointer of zones[0] |
109 | * 1: Use write pointer of zones[1] |
110 | * C: Compare super blocks from zones[0] and zones[1], use the latest |
111 | * one determined by generation |
112 | * x: Invalid state |
113 | */ |
114 | |
115 | if (empty[0] && empty[1]) { |
116 | /* Special case to distinguish no superblock to read */ |
117 | *wp_ret = zones[0].start << SECTOR_SHIFT; |
118 | return -ENOENT; |
119 | } else if (full[0] && full[1]) { |
120 | /* Compare two super blocks */ |
121 | struct address_space *mapping = bdev->bd_inode->i_mapping; |
122 | struct page *page[BTRFS_NR_SB_LOG_ZONES]; |
123 | struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES]; |
124 | int i; |
125 | |
126 | for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { |
127 | u64 zone_end = (zones[i].start + zones[i].capacity) << SECTOR_SHIFT; |
128 | u64 bytenr = ALIGN_DOWN(zone_end, BTRFS_SUPER_INFO_SIZE) - |
129 | BTRFS_SUPER_INFO_SIZE; |
130 | |
131 | page[i] = read_cache_page_gfp(mapping, |
132 | index: bytenr >> PAGE_SHIFT, GFP_NOFS); |
133 | if (IS_ERR(ptr: page[i])) { |
134 | if (i == 1) |
135 | btrfs_release_disk_super(super: super[0]); |
136 | return PTR_ERR(ptr: page[i]); |
137 | } |
138 | super[i] = page_address(page[i]); |
139 | } |
140 | |
141 | if (btrfs_super_generation(s: super[0]) > |
142 | btrfs_super_generation(s: super[1])) |
143 | sector = zones[1].start; |
144 | else |
145 | sector = zones[0].start; |
146 | |
147 | for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) |
148 | btrfs_release_disk_super(super: super[i]); |
149 | } else if (!full[0] && (empty[1] || full[1])) { |
150 | sector = zones[0].wp; |
151 | } else if (full[0]) { |
152 | sector = zones[1].wp; |
153 | } else { |
154 | return -EUCLEAN; |
155 | } |
156 | *wp_ret = sector << SECTOR_SHIFT; |
157 | return 0; |
158 | } |
159 | |
160 | /* |
161 | * Get the first zone number of the superblock mirror |
162 | */ |
163 | static inline u32 sb_zone_number(int shift, int mirror) |
164 | { |
165 | u64 zone = U64_MAX; |
166 | |
167 | ASSERT(mirror < BTRFS_SUPER_MIRROR_MAX); |
168 | switch (mirror) { |
169 | case 0: zone = 0; break; |
170 | case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break; |
171 | case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break; |
172 | } |
173 | |
174 | ASSERT(zone <= U32_MAX); |
175 | |
176 | return (u32)zone; |
177 | } |
178 | |
179 | static inline sector_t zone_start_sector(u32 zone_number, |
180 | struct block_device *bdev) |
181 | { |
182 | return (sector_t)zone_number << ilog2(bdev_zone_sectors(bdev)); |
183 | } |
184 | |
185 | static inline u64 zone_start_physical(u32 zone_number, |
186 | struct btrfs_zoned_device_info *zone_info) |
187 | { |
188 | return (u64)zone_number << zone_info->zone_size_shift; |
189 | } |
190 | |
191 | /* |
192 | * Emulate blkdev_report_zones() for a non-zoned device. It slices up the block |
193 | * device into static sized chunks and fake a conventional zone on each of |
194 | * them. |
195 | */ |
196 | static int emulate_report_zones(struct btrfs_device *device, u64 pos, |
197 | struct blk_zone *zones, unsigned int nr_zones) |
198 | { |
199 | const sector_t zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT; |
200 | sector_t bdev_size = bdev_nr_sectors(bdev: device->bdev); |
201 | unsigned int i; |
202 | |
203 | pos >>= SECTOR_SHIFT; |
204 | for (i = 0; i < nr_zones; i++) { |
205 | zones[i].start = i * zone_sectors + pos; |
206 | zones[i].len = zone_sectors; |
207 | zones[i].capacity = zone_sectors; |
208 | zones[i].wp = zones[i].start + zone_sectors; |
209 | zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL; |
210 | zones[i].cond = BLK_ZONE_COND_NOT_WP; |
211 | |
212 | if (zones[i].wp >= bdev_size) { |
213 | i++; |
214 | break; |
215 | } |
216 | } |
217 | |
218 | return i; |
219 | } |
220 | |
221 | static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos, |
222 | struct blk_zone *zones, unsigned int *nr_zones) |
223 | { |
224 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
225 | int ret; |
226 | |
227 | if (!*nr_zones) |
228 | return 0; |
229 | |
230 | if (!bdev_is_zoned(bdev: device->bdev)) { |
231 | ret = emulate_report_zones(device, pos, zones, nr_zones: *nr_zones); |
232 | *nr_zones = ret; |
233 | return 0; |
234 | } |
235 | |
236 | /* Check cache */ |
237 | if (zinfo->zone_cache) { |
238 | unsigned int i; |
239 | u32 zno; |
240 | |
241 | ASSERT(IS_ALIGNED(pos, zinfo->zone_size)); |
242 | zno = pos >> zinfo->zone_size_shift; |
243 | /* |
244 | * We cannot report zones beyond the zone end. So, it is OK to |
245 | * cap *nr_zones to at the end. |
246 | */ |
247 | *nr_zones = min_t(u32, *nr_zones, zinfo->nr_zones - zno); |
248 | |
249 | for (i = 0; i < *nr_zones; i++) { |
250 | struct blk_zone *zone_info; |
251 | |
252 | zone_info = &zinfo->zone_cache[zno + i]; |
253 | if (!zone_info->len) |
254 | break; |
255 | } |
256 | |
257 | if (i == *nr_zones) { |
258 | /* Cache hit on all the zones */ |
259 | memcpy(zones, zinfo->zone_cache + zno, |
260 | sizeof(*zinfo->zone_cache) * *nr_zones); |
261 | return 0; |
262 | } |
263 | } |
264 | |
265 | ret = blkdev_report_zones(bdev: device->bdev, sector: pos >> SECTOR_SHIFT, nr_zones: *nr_zones, |
266 | cb: copy_zone_info_cb, data: zones); |
267 | if (ret < 0) { |
268 | btrfs_err_in_rcu(device->fs_info, |
269 | "zoned: failed to read zone %llu on %s (devid %llu)" , |
270 | pos, rcu_str_deref(device->name), |
271 | device->devid); |
272 | return ret; |
273 | } |
274 | *nr_zones = ret; |
275 | if (!ret) |
276 | return -EIO; |
277 | |
278 | /* Populate cache */ |
279 | if (zinfo->zone_cache) { |
280 | u32 zno = pos >> zinfo->zone_size_shift; |
281 | |
282 | memcpy(zinfo->zone_cache + zno, zones, |
283 | sizeof(*zinfo->zone_cache) * *nr_zones); |
284 | } |
285 | |
286 | return 0; |
287 | } |
288 | |
289 | /* The emulated zone size is determined from the size of device extent */ |
290 | static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info) |
291 | { |
292 | struct btrfs_path *path; |
293 | struct btrfs_root *root = fs_info->dev_root; |
294 | struct btrfs_key key; |
295 | struct extent_buffer *leaf; |
296 | struct btrfs_dev_extent *dext; |
297 | int ret = 0; |
298 | |
299 | key.objectid = 1; |
300 | key.type = BTRFS_DEV_EXTENT_KEY; |
301 | key.offset = 0; |
302 | |
303 | path = btrfs_alloc_path(); |
304 | if (!path) |
305 | return -ENOMEM; |
306 | |
307 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
308 | if (ret < 0) |
309 | goto out; |
310 | |
311 | if (path->slots[0] >= btrfs_header_nritems(eb: path->nodes[0])) { |
312 | ret = btrfs_next_leaf(root, path); |
313 | if (ret < 0) |
314 | goto out; |
315 | /* No dev extents at all? Not good */ |
316 | if (ret > 0) { |
317 | ret = -EUCLEAN; |
318 | goto out; |
319 | } |
320 | } |
321 | |
322 | leaf = path->nodes[0]; |
323 | dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); |
324 | fs_info->zone_size = btrfs_dev_extent_length(eb: leaf, s: dext); |
325 | ret = 0; |
326 | |
327 | out: |
328 | btrfs_free_path(p: path); |
329 | |
330 | return ret; |
331 | } |
332 | |
333 | int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info) |
334 | { |
335 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
336 | struct btrfs_device *device; |
337 | int ret = 0; |
338 | |
339 | /* fs_info->zone_size might not set yet. Use the incomapt flag here. */ |
340 | if (!btrfs_fs_incompat(fs_info, ZONED)) |
341 | return 0; |
342 | |
343 | mutex_lock(&fs_devices->device_list_mutex); |
344 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
345 | /* We can skip reading of zone info for missing devices */ |
346 | if (!device->bdev) |
347 | continue; |
348 | |
349 | ret = btrfs_get_dev_zone_info(device, populate_cache: true); |
350 | if (ret) |
351 | break; |
352 | } |
353 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
354 | |
355 | return ret; |
356 | } |
357 | |
358 | int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache) |
359 | { |
360 | struct btrfs_fs_info *fs_info = device->fs_info; |
361 | struct btrfs_zoned_device_info *zone_info = NULL; |
362 | struct block_device *bdev = device->bdev; |
363 | unsigned int max_active_zones; |
364 | unsigned int nactive; |
365 | sector_t nr_sectors; |
366 | sector_t sector = 0; |
367 | struct blk_zone *zones = NULL; |
368 | unsigned int i, nreported = 0, nr_zones; |
369 | sector_t zone_sectors; |
370 | char *model, *emulated; |
371 | int ret; |
372 | |
373 | /* |
374 | * Cannot use btrfs_is_zoned here, since fs_info::zone_size might not |
375 | * yet be set. |
376 | */ |
377 | if (!btrfs_fs_incompat(fs_info, ZONED)) |
378 | return 0; |
379 | |
380 | if (device->zone_info) |
381 | return 0; |
382 | |
383 | zone_info = kzalloc(size: sizeof(*zone_info), GFP_KERNEL); |
384 | if (!zone_info) |
385 | return -ENOMEM; |
386 | |
387 | device->zone_info = zone_info; |
388 | |
389 | if (!bdev_is_zoned(bdev)) { |
390 | if (!fs_info->zone_size) { |
391 | ret = calculate_emulated_zone_size(fs_info); |
392 | if (ret) |
393 | goto out; |
394 | } |
395 | |
396 | ASSERT(fs_info->zone_size); |
397 | zone_sectors = fs_info->zone_size >> SECTOR_SHIFT; |
398 | } else { |
399 | zone_sectors = bdev_zone_sectors(bdev); |
400 | } |
401 | |
402 | ASSERT(is_power_of_two_u64(zone_sectors)); |
403 | zone_info->zone_size = zone_sectors << SECTOR_SHIFT; |
404 | |
405 | /* We reject devices with a zone size larger than 8GB */ |
406 | if (zone_info->zone_size > BTRFS_MAX_ZONE_SIZE) { |
407 | btrfs_err_in_rcu(fs_info, |
408 | "zoned: %s: zone size %llu larger than supported maximum %llu" , |
409 | rcu_str_deref(device->name), |
410 | zone_info->zone_size, BTRFS_MAX_ZONE_SIZE); |
411 | ret = -EINVAL; |
412 | goto out; |
413 | } else if (zone_info->zone_size < BTRFS_MIN_ZONE_SIZE) { |
414 | btrfs_err_in_rcu(fs_info, |
415 | "zoned: %s: zone size %llu smaller than supported minimum %u" , |
416 | rcu_str_deref(device->name), |
417 | zone_info->zone_size, BTRFS_MIN_ZONE_SIZE); |
418 | ret = -EINVAL; |
419 | goto out; |
420 | } |
421 | |
422 | nr_sectors = bdev_nr_sectors(bdev); |
423 | zone_info->zone_size_shift = ilog2(zone_info->zone_size); |
424 | zone_info->nr_zones = nr_sectors >> ilog2(zone_sectors); |
425 | if (!IS_ALIGNED(nr_sectors, zone_sectors)) |
426 | zone_info->nr_zones++; |
427 | |
428 | max_active_zones = bdev_max_active_zones(bdev); |
429 | if (max_active_zones && max_active_zones < BTRFS_MIN_ACTIVE_ZONES) { |
430 | btrfs_err_in_rcu(fs_info, |
431 | "zoned: %s: max active zones %u is too small, need at least %u active zones" , |
432 | rcu_str_deref(device->name), max_active_zones, |
433 | BTRFS_MIN_ACTIVE_ZONES); |
434 | ret = -EINVAL; |
435 | goto out; |
436 | } |
437 | zone_info->max_active_zones = max_active_zones; |
438 | |
439 | zone_info->seq_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
440 | if (!zone_info->seq_zones) { |
441 | ret = -ENOMEM; |
442 | goto out; |
443 | } |
444 | |
445 | zone_info->empty_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
446 | if (!zone_info->empty_zones) { |
447 | ret = -ENOMEM; |
448 | goto out; |
449 | } |
450 | |
451 | zone_info->active_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
452 | if (!zone_info->active_zones) { |
453 | ret = -ENOMEM; |
454 | goto out; |
455 | } |
456 | |
457 | zones = kvcalloc(BTRFS_REPORT_NR_ZONES, size: sizeof(struct blk_zone), GFP_KERNEL); |
458 | if (!zones) { |
459 | ret = -ENOMEM; |
460 | goto out; |
461 | } |
462 | |
463 | /* |
464 | * Enable zone cache only for a zoned device. On a non-zoned device, we |
465 | * fill the zone info with emulated CONVENTIONAL zones, so no need to |
466 | * use the cache. |
467 | */ |
468 | if (populate_cache && bdev_is_zoned(bdev: device->bdev)) { |
469 | zone_info->zone_cache = vcalloc(n: zone_info->nr_zones, |
470 | size: sizeof(struct blk_zone)); |
471 | if (!zone_info->zone_cache) { |
472 | btrfs_err_in_rcu(device->fs_info, |
473 | "zoned: failed to allocate zone cache for %s" , |
474 | rcu_str_deref(device->name)); |
475 | ret = -ENOMEM; |
476 | goto out; |
477 | } |
478 | } |
479 | |
480 | /* Get zones type */ |
481 | nactive = 0; |
482 | while (sector < nr_sectors) { |
483 | nr_zones = BTRFS_REPORT_NR_ZONES; |
484 | ret = btrfs_get_dev_zones(device, pos: sector << SECTOR_SHIFT, zones, |
485 | nr_zones: &nr_zones); |
486 | if (ret) |
487 | goto out; |
488 | |
489 | for (i = 0; i < nr_zones; i++) { |
490 | if (zones[i].type == BLK_ZONE_TYPE_SEQWRITE_REQ) |
491 | __set_bit(nreported, zone_info->seq_zones); |
492 | switch (zones[i].cond) { |
493 | case BLK_ZONE_COND_EMPTY: |
494 | __set_bit(nreported, zone_info->empty_zones); |
495 | break; |
496 | case BLK_ZONE_COND_IMP_OPEN: |
497 | case BLK_ZONE_COND_EXP_OPEN: |
498 | case BLK_ZONE_COND_CLOSED: |
499 | __set_bit(nreported, zone_info->active_zones); |
500 | nactive++; |
501 | break; |
502 | } |
503 | nreported++; |
504 | } |
505 | sector = zones[nr_zones - 1].start + zones[nr_zones - 1].len; |
506 | } |
507 | |
508 | if (nreported != zone_info->nr_zones) { |
509 | btrfs_err_in_rcu(device->fs_info, |
510 | "inconsistent number of zones on %s (%u/%u)" , |
511 | rcu_str_deref(device->name), nreported, |
512 | zone_info->nr_zones); |
513 | ret = -EIO; |
514 | goto out; |
515 | } |
516 | |
517 | if (max_active_zones) { |
518 | if (nactive > max_active_zones) { |
519 | btrfs_err_in_rcu(device->fs_info, |
520 | "zoned: %u active zones on %s exceeds max_active_zones %u" , |
521 | nactive, rcu_str_deref(device->name), |
522 | max_active_zones); |
523 | ret = -EIO; |
524 | goto out; |
525 | } |
526 | atomic_set(v: &zone_info->active_zones_left, |
527 | i: max_active_zones - nactive); |
528 | set_bit(nr: BTRFS_FS_ACTIVE_ZONE_TRACKING, addr: &fs_info->flags); |
529 | } |
530 | |
531 | /* Validate superblock log */ |
532 | nr_zones = BTRFS_NR_SB_LOG_ZONES; |
533 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
534 | u32 sb_zone; |
535 | u64 sb_wp; |
536 | int sb_pos = BTRFS_NR_SB_LOG_ZONES * i; |
537 | |
538 | sb_zone = sb_zone_number(shift: zone_info->zone_size_shift, mirror: i); |
539 | if (sb_zone + 1 >= zone_info->nr_zones) |
540 | continue; |
541 | |
542 | ret = btrfs_get_dev_zones(device, |
543 | pos: zone_start_physical(zone_number: sb_zone, zone_info), |
544 | zones: &zone_info->sb_zones[sb_pos], |
545 | nr_zones: &nr_zones); |
546 | if (ret) |
547 | goto out; |
548 | |
549 | if (nr_zones != BTRFS_NR_SB_LOG_ZONES) { |
550 | btrfs_err_in_rcu(device->fs_info, |
551 | "zoned: failed to read super block log zone info at devid %llu zone %u" , |
552 | device->devid, sb_zone); |
553 | ret = -EUCLEAN; |
554 | goto out; |
555 | } |
556 | |
557 | /* |
558 | * If zones[0] is conventional, always use the beginning of the |
559 | * zone to record superblock. No need to validate in that case. |
560 | */ |
561 | if (zone_info->sb_zones[BTRFS_NR_SB_LOG_ZONES * i].type == |
562 | BLK_ZONE_TYPE_CONVENTIONAL) |
563 | continue; |
564 | |
565 | ret = sb_write_pointer(bdev: device->bdev, |
566 | zones: &zone_info->sb_zones[sb_pos], wp_ret: &sb_wp); |
567 | if (ret != -ENOENT && ret) { |
568 | btrfs_err_in_rcu(device->fs_info, |
569 | "zoned: super block log zone corrupted devid %llu zone %u" , |
570 | device->devid, sb_zone); |
571 | ret = -EUCLEAN; |
572 | goto out; |
573 | } |
574 | } |
575 | |
576 | |
577 | kvfree(addr: zones); |
578 | |
579 | if (bdev_is_zoned(bdev)) { |
580 | model = "host-managed zoned" ; |
581 | emulated = "" ; |
582 | } else { |
583 | model = "regular" ; |
584 | emulated = "emulated " ; |
585 | } |
586 | |
587 | btrfs_info_in_rcu(fs_info, |
588 | "%s block device %s, %u %szones of %llu bytes" , |
589 | model, rcu_str_deref(device->name), zone_info->nr_zones, |
590 | emulated, zone_info->zone_size); |
591 | |
592 | return 0; |
593 | |
594 | out: |
595 | kvfree(addr: zones); |
596 | btrfs_destroy_dev_zone_info(device); |
597 | return ret; |
598 | } |
599 | |
600 | void btrfs_destroy_dev_zone_info(struct btrfs_device *device) |
601 | { |
602 | struct btrfs_zoned_device_info *zone_info = device->zone_info; |
603 | |
604 | if (!zone_info) |
605 | return; |
606 | |
607 | bitmap_free(bitmap: zone_info->active_zones); |
608 | bitmap_free(bitmap: zone_info->seq_zones); |
609 | bitmap_free(bitmap: zone_info->empty_zones); |
610 | vfree(addr: zone_info->zone_cache); |
611 | kfree(objp: zone_info); |
612 | device->zone_info = NULL; |
613 | } |
614 | |
615 | struct btrfs_zoned_device_info *btrfs_clone_dev_zone_info(struct btrfs_device *orig_dev) |
616 | { |
617 | struct btrfs_zoned_device_info *zone_info; |
618 | |
619 | zone_info = kmemdup(p: orig_dev->zone_info, size: sizeof(*zone_info), GFP_KERNEL); |
620 | if (!zone_info) |
621 | return NULL; |
622 | |
623 | zone_info->seq_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
624 | if (!zone_info->seq_zones) |
625 | goto out; |
626 | |
627 | bitmap_copy(dst: zone_info->seq_zones, src: orig_dev->zone_info->seq_zones, |
628 | nbits: zone_info->nr_zones); |
629 | |
630 | zone_info->empty_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
631 | if (!zone_info->empty_zones) |
632 | goto out; |
633 | |
634 | bitmap_copy(dst: zone_info->empty_zones, src: orig_dev->zone_info->empty_zones, |
635 | nbits: zone_info->nr_zones); |
636 | |
637 | zone_info->active_zones = bitmap_zalloc(nbits: zone_info->nr_zones, GFP_KERNEL); |
638 | if (!zone_info->active_zones) |
639 | goto out; |
640 | |
641 | bitmap_copy(dst: zone_info->active_zones, src: orig_dev->zone_info->active_zones, |
642 | nbits: zone_info->nr_zones); |
643 | zone_info->zone_cache = NULL; |
644 | |
645 | return zone_info; |
646 | |
647 | out: |
648 | bitmap_free(bitmap: zone_info->seq_zones); |
649 | bitmap_free(bitmap: zone_info->empty_zones); |
650 | bitmap_free(bitmap: zone_info->active_zones); |
651 | kfree(objp: zone_info); |
652 | return NULL; |
653 | } |
654 | |
655 | int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, |
656 | struct blk_zone *zone) |
657 | { |
658 | unsigned int nr_zones = 1; |
659 | int ret; |
660 | |
661 | ret = btrfs_get_dev_zones(device, pos, zones: zone, nr_zones: &nr_zones); |
662 | if (ret != 0 || !nr_zones) |
663 | return ret ? ret : -EIO; |
664 | |
665 | return 0; |
666 | } |
667 | |
668 | static int btrfs_check_for_zoned_device(struct btrfs_fs_info *fs_info) |
669 | { |
670 | struct btrfs_device *device; |
671 | |
672 | list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { |
673 | if (device->bdev && bdev_is_zoned(bdev: device->bdev)) { |
674 | btrfs_err(fs_info, |
675 | "zoned: mode not enabled but zoned device found: %pg" , |
676 | device->bdev); |
677 | return -EINVAL; |
678 | } |
679 | } |
680 | |
681 | return 0; |
682 | } |
683 | |
684 | int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info) |
685 | { |
686 | struct queue_limits *lim = &fs_info->limits; |
687 | struct btrfs_device *device; |
688 | u64 zone_size = 0; |
689 | int ret; |
690 | |
691 | /* |
692 | * Host-Managed devices can't be used without the ZONED flag. With the |
693 | * ZONED all devices can be used, using zone emulation if required. |
694 | */ |
695 | if (!btrfs_fs_incompat(fs_info, ZONED)) |
696 | return btrfs_check_for_zoned_device(fs_info); |
697 | |
698 | blk_set_stacking_limits(lim); |
699 | |
700 | list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { |
701 | struct btrfs_zoned_device_info *zone_info = device->zone_info; |
702 | |
703 | if (!device->bdev) |
704 | continue; |
705 | |
706 | if (!zone_size) { |
707 | zone_size = zone_info->zone_size; |
708 | } else if (zone_info->zone_size != zone_size) { |
709 | btrfs_err(fs_info, |
710 | "zoned: unequal block device zone sizes: have %llu found %llu" , |
711 | zone_info->zone_size, zone_size); |
712 | return -EINVAL; |
713 | } |
714 | |
715 | /* |
716 | * With the zoned emulation, we can have non-zoned device on the |
717 | * zoned mode. In this case, we don't have a valid max zone |
718 | * append size. |
719 | */ |
720 | if (bdev_is_zoned(bdev: device->bdev)) { |
721 | blk_stack_limits(t: lim, |
722 | b: &bdev_get_queue(bdev: device->bdev)->limits, |
723 | offset: 0); |
724 | } |
725 | } |
726 | |
727 | /* |
728 | * stripe_size is always aligned to BTRFS_STRIPE_LEN in |
729 | * btrfs_create_chunk(). Since we want stripe_len == zone_size, |
730 | * check the alignment here. |
731 | */ |
732 | if (!IS_ALIGNED(zone_size, BTRFS_STRIPE_LEN)) { |
733 | btrfs_err(fs_info, |
734 | "zoned: zone size %llu not aligned to stripe %u" , |
735 | zone_size, BTRFS_STRIPE_LEN); |
736 | return -EINVAL; |
737 | } |
738 | |
739 | if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { |
740 | btrfs_err(fs_info, "zoned: mixed block groups not supported" ); |
741 | return -EINVAL; |
742 | } |
743 | |
744 | fs_info->zone_size = zone_size; |
745 | /* |
746 | * Also limit max_zone_append_size by max_segments * PAGE_SIZE. |
747 | * Technically, we can have multiple pages per segment. But, since |
748 | * we add the pages one by one to a bio, and cannot increase the |
749 | * metadata reservation even if it increases the number of extents, it |
750 | * is safe to stick with the limit. |
751 | */ |
752 | fs_info->max_zone_append_size = ALIGN_DOWN( |
753 | min3((u64)lim->max_zone_append_sectors << SECTOR_SHIFT, |
754 | (u64)lim->max_sectors << SECTOR_SHIFT, |
755 | (u64)lim->max_segments << PAGE_SHIFT), |
756 | fs_info->sectorsize); |
757 | fs_info->fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_ZONED; |
758 | if (fs_info->max_zone_append_size < fs_info->max_extent_size) |
759 | fs_info->max_extent_size = fs_info->max_zone_append_size; |
760 | |
761 | /* |
762 | * Check mount options here, because we might change fs_info->zoned |
763 | * from fs_info->zone_size. |
764 | */ |
765 | ret = btrfs_check_mountopts_zoned(info: fs_info, mount_opt: &fs_info->mount_opt); |
766 | if (ret) |
767 | return ret; |
768 | |
769 | btrfs_info(fs_info, "zoned mode enabled with zone size %llu" , zone_size); |
770 | return 0; |
771 | } |
772 | |
773 | int btrfs_check_mountopts_zoned(struct btrfs_fs_info *info, unsigned long *mount_opt) |
774 | { |
775 | if (!btrfs_is_zoned(fs_info: info)) |
776 | return 0; |
777 | |
778 | /* |
779 | * Space cache writing is not COWed. Disable that to avoid write errors |
780 | * in sequential zones. |
781 | */ |
782 | if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) { |
783 | btrfs_err(info, "zoned: space cache v1 is not supported" ); |
784 | return -EINVAL; |
785 | } |
786 | |
787 | if (btrfs_raw_test_opt(*mount_opt, NODATACOW)) { |
788 | btrfs_err(info, "zoned: NODATACOW not supported" ); |
789 | return -EINVAL; |
790 | } |
791 | |
792 | if (btrfs_raw_test_opt(*mount_opt, DISCARD_ASYNC)) { |
793 | btrfs_info(info, |
794 | "zoned: async discard ignored and disabled for zoned mode" ); |
795 | btrfs_clear_opt(*mount_opt, DISCARD_ASYNC); |
796 | } |
797 | |
798 | return 0; |
799 | } |
800 | |
801 | static int sb_log_location(struct block_device *bdev, struct blk_zone *zones, |
802 | int rw, u64 *bytenr_ret) |
803 | { |
804 | u64 wp; |
805 | int ret; |
806 | |
807 | if (zones[0].type == BLK_ZONE_TYPE_CONVENTIONAL) { |
808 | *bytenr_ret = zones[0].start << SECTOR_SHIFT; |
809 | return 0; |
810 | } |
811 | |
812 | ret = sb_write_pointer(bdev, zones, wp_ret: &wp); |
813 | if (ret != -ENOENT && ret < 0) |
814 | return ret; |
815 | |
816 | if (rw == WRITE) { |
817 | struct blk_zone *reset = NULL; |
818 | |
819 | if (wp == zones[0].start << SECTOR_SHIFT) |
820 | reset = &zones[0]; |
821 | else if (wp == zones[1].start << SECTOR_SHIFT) |
822 | reset = &zones[1]; |
823 | |
824 | if (reset && reset->cond != BLK_ZONE_COND_EMPTY) { |
825 | unsigned int nofs_flags; |
826 | |
827 | ASSERT(sb_zone_is_full(reset)); |
828 | |
829 | nofs_flags = memalloc_nofs_save(); |
830 | ret = blkdev_zone_mgmt(bdev, op: REQ_OP_ZONE_RESET, |
831 | sectors: reset->start, nr_sectors: reset->len); |
832 | memalloc_nofs_restore(flags: nofs_flags); |
833 | if (ret) |
834 | return ret; |
835 | |
836 | reset->cond = BLK_ZONE_COND_EMPTY; |
837 | reset->wp = reset->start; |
838 | } |
839 | } else if (ret != -ENOENT) { |
840 | /* |
841 | * For READ, we want the previous one. Move write pointer to |
842 | * the end of a zone, if it is at the head of a zone. |
843 | */ |
844 | u64 zone_end = 0; |
845 | |
846 | if (wp == zones[0].start << SECTOR_SHIFT) |
847 | zone_end = zones[1].start + zones[1].capacity; |
848 | else if (wp == zones[1].start << SECTOR_SHIFT) |
849 | zone_end = zones[0].start + zones[0].capacity; |
850 | if (zone_end) |
851 | wp = ALIGN_DOWN(zone_end << SECTOR_SHIFT, |
852 | BTRFS_SUPER_INFO_SIZE); |
853 | |
854 | wp -= BTRFS_SUPER_INFO_SIZE; |
855 | } |
856 | |
857 | *bytenr_ret = wp; |
858 | return 0; |
859 | |
860 | } |
861 | |
862 | int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, |
863 | u64 *bytenr_ret) |
864 | { |
865 | struct blk_zone zones[BTRFS_NR_SB_LOG_ZONES]; |
866 | sector_t zone_sectors; |
867 | u32 sb_zone; |
868 | int ret; |
869 | u8 zone_sectors_shift; |
870 | sector_t nr_sectors; |
871 | u32 nr_zones; |
872 | |
873 | if (!bdev_is_zoned(bdev)) { |
874 | *bytenr_ret = btrfs_sb_offset(mirror); |
875 | return 0; |
876 | } |
877 | |
878 | ASSERT(rw == READ || rw == WRITE); |
879 | |
880 | zone_sectors = bdev_zone_sectors(bdev); |
881 | if (!is_power_of_2(n: zone_sectors)) |
882 | return -EINVAL; |
883 | zone_sectors_shift = ilog2(zone_sectors); |
884 | nr_sectors = bdev_nr_sectors(bdev); |
885 | nr_zones = nr_sectors >> zone_sectors_shift; |
886 | |
887 | sb_zone = sb_zone_number(shift: zone_sectors_shift + SECTOR_SHIFT, mirror); |
888 | if (sb_zone + 1 >= nr_zones) |
889 | return -ENOENT; |
890 | |
891 | ret = blkdev_report_zones(bdev, sector: zone_start_sector(zone_number: sb_zone, bdev), |
892 | BTRFS_NR_SB_LOG_ZONES, cb: copy_zone_info_cb, |
893 | data: zones); |
894 | if (ret < 0) |
895 | return ret; |
896 | if (ret != BTRFS_NR_SB_LOG_ZONES) |
897 | return -EIO; |
898 | |
899 | return sb_log_location(bdev, zones, rw, bytenr_ret); |
900 | } |
901 | |
902 | int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, |
903 | u64 *bytenr_ret) |
904 | { |
905 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
906 | u32 zone_num; |
907 | |
908 | /* |
909 | * For a zoned filesystem on a non-zoned block device, use the same |
910 | * super block locations as regular filesystem. Doing so, the super |
911 | * block can always be retrieved and the zoned flag of the volume |
912 | * detected from the super block information. |
913 | */ |
914 | if (!bdev_is_zoned(bdev: device->bdev)) { |
915 | *bytenr_ret = btrfs_sb_offset(mirror); |
916 | return 0; |
917 | } |
918 | |
919 | zone_num = sb_zone_number(shift: zinfo->zone_size_shift, mirror); |
920 | if (zone_num + 1 >= zinfo->nr_zones) |
921 | return -ENOENT; |
922 | |
923 | return sb_log_location(bdev: device->bdev, |
924 | zones: &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror], |
925 | rw, bytenr_ret); |
926 | } |
927 | |
928 | static inline bool is_sb_log_zone(struct btrfs_zoned_device_info *zinfo, |
929 | int mirror) |
930 | { |
931 | u32 zone_num; |
932 | |
933 | if (!zinfo) |
934 | return false; |
935 | |
936 | zone_num = sb_zone_number(shift: zinfo->zone_size_shift, mirror); |
937 | if (zone_num + 1 >= zinfo->nr_zones) |
938 | return false; |
939 | |
940 | if (!test_bit(zone_num, zinfo->seq_zones)) |
941 | return false; |
942 | |
943 | return true; |
944 | } |
945 | |
946 | int btrfs_advance_sb_log(struct btrfs_device *device, int mirror) |
947 | { |
948 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
949 | struct blk_zone *zone; |
950 | int i; |
951 | |
952 | if (!is_sb_log_zone(zinfo, mirror)) |
953 | return 0; |
954 | |
955 | zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror]; |
956 | for (i = 0; i < BTRFS_NR_SB_LOG_ZONES; i++) { |
957 | /* Advance the next zone */ |
958 | if (zone->cond == BLK_ZONE_COND_FULL) { |
959 | zone++; |
960 | continue; |
961 | } |
962 | |
963 | if (zone->cond == BLK_ZONE_COND_EMPTY) |
964 | zone->cond = BLK_ZONE_COND_IMP_OPEN; |
965 | |
966 | zone->wp += SUPER_INFO_SECTORS; |
967 | |
968 | if (sb_zone_is_full(zone)) { |
969 | /* |
970 | * No room left to write new superblock. Since |
971 | * superblock is written with REQ_SYNC, it is safe to |
972 | * finish the zone now. |
973 | * |
974 | * If the write pointer is exactly at the capacity, |
975 | * explicit ZONE_FINISH is not necessary. |
976 | */ |
977 | if (zone->wp != zone->start + zone->capacity) { |
978 | unsigned int nofs_flags; |
979 | int ret; |
980 | |
981 | nofs_flags = memalloc_nofs_save(); |
982 | ret = blkdev_zone_mgmt(bdev: device->bdev, |
983 | op: REQ_OP_ZONE_FINISH, sectors: zone->start, |
984 | nr_sectors: zone->len); |
985 | memalloc_nofs_restore(flags: nofs_flags); |
986 | if (ret) |
987 | return ret; |
988 | } |
989 | |
990 | zone->wp = zone->start + zone->len; |
991 | zone->cond = BLK_ZONE_COND_FULL; |
992 | } |
993 | return 0; |
994 | } |
995 | |
996 | /* All the zones are FULL. Should not reach here. */ |
997 | ASSERT(0); |
998 | return -EIO; |
999 | } |
1000 | |
1001 | int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror) |
1002 | { |
1003 | unsigned int nofs_flags; |
1004 | sector_t zone_sectors; |
1005 | sector_t nr_sectors; |
1006 | u8 zone_sectors_shift; |
1007 | u32 sb_zone; |
1008 | u32 nr_zones; |
1009 | int ret; |
1010 | |
1011 | zone_sectors = bdev_zone_sectors(bdev); |
1012 | zone_sectors_shift = ilog2(zone_sectors); |
1013 | nr_sectors = bdev_nr_sectors(bdev); |
1014 | nr_zones = nr_sectors >> zone_sectors_shift; |
1015 | |
1016 | sb_zone = sb_zone_number(shift: zone_sectors_shift + SECTOR_SHIFT, mirror); |
1017 | if (sb_zone + 1 >= nr_zones) |
1018 | return -ENOENT; |
1019 | |
1020 | nofs_flags = memalloc_nofs_save(); |
1021 | ret = blkdev_zone_mgmt(bdev, op: REQ_OP_ZONE_RESET, |
1022 | sectors: zone_start_sector(zone_number: sb_zone, bdev), |
1023 | nr_sectors: zone_sectors * BTRFS_NR_SB_LOG_ZONES); |
1024 | memalloc_nofs_restore(flags: nofs_flags); |
1025 | return ret; |
1026 | } |
1027 | |
1028 | /* |
1029 | * Find allocatable zones within a given region. |
1030 | * |
1031 | * @device: the device to allocate a region on |
1032 | * @hole_start: the position of the hole to allocate the region |
1033 | * @num_bytes: size of wanted region |
1034 | * @hole_end: the end of the hole |
1035 | * @return: position of allocatable zones |
1036 | * |
1037 | * Allocatable region should not contain any superblock locations. |
1038 | */ |
1039 | u64 btrfs_find_allocatable_zones(struct btrfs_device *device, u64 hole_start, |
1040 | u64 hole_end, u64 num_bytes) |
1041 | { |
1042 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
1043 | const u8 shift = zinfo->zone_size_shift; |
1044 | u64 nzones = num_bytes >> shift; |
1045 | u64 pos = hole_start; |
1046 | u64 begin, end; |
1047 | bool have_sb; |
1048 | int i; |
1049 | |
1050 | ASSERT(IS_ALIGNED(hole_start, zinfo->zone_size)); |
1051 | ASSERT(IS_ALIGNED(num_bytes, zinfo->zone_size)); |
1052 | |
1053 | while (pos < hole_end) { |
1054 | begin = pos >> shift; |
1055 | end = begin + nzones; |
1056 | |
1057 | if (end > zinfo->nr_zones) |
1058 | return hole_end; |
1059 | |
1060 | /* Check if zones in the region are all empty */ |
1061 | if (btrfs_dev_is_sequential(device, pos) && |
1062 | !bitmap_test_range_all_set(addr: zinfo->empty_zones, start: begin, nbits: nzones)) { |
1063 | pos += zinfo->zone_size; |
1064 | continue; |
1065 | } |
1066 | |
1067 | have_sb = false; |
1068 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { |
1069 | u32 sb_zone; |
1070 | u64 sb_pos; |
1071 | |
1072 | sb_zone = sb_zone_number(shift, mirror: i); |
1073 | if (!(end <= sb_zone || |
1074 | sb_zone + BTRFS_NR_SB_LOG_ZONES <= begin)) { |
1075 | have_sb = true; |
1076 | pos = zone_start_physical( |
1077 | zone_number: sb_zone + BTRFS_NR_SB_LOG_ZONES, zone_info: zinfo); |
1078 | break; |
1079 | } |
1080 | |
1081 | /* We also need to exclude regular superblock positions */ |
1082 | sb_pos = btrfs_sb_offset(mirror: i); |
1083 | if (!(pos + num_bytes <= sb_pos || |
1084 | sb_pos + BTRFS_SUPER_INFO_SIZE <= pos)) { |
1085 | have_sb = true; |
1086 | pos = ALIGN(sb_pos + BTRFS_SUPER_INFO_SIZE, |
1087 | zinfo->zone_size); |
1088 | break; |
1089 | } |
1090 | } |
1091 | if (!have_sb) |
1092 | break; |
1093 | } |
1094 | |
1095 | return pos; |
1096 | } |
1097 | |
1098 | static bool btrfs_dev_set_active_zone(struct btrfs_device *device, u64 pos) |
1099 | { |
1100 | struct btrfs_zoned_device_info *zone_info = device->zone_info; |
1101 | unsigned int zno = (pos >> zone_info->zone_size_shift); |
1102 | |
1103 | /* We can use any number of zones */ |
1104 | if (zone_info->max_active_zones == 0) |
1105 | return true; |
1106 | |
1107 | if (!test_bit(zno, zone_info->active_zones)) { |
1108 | /* Active zone left? */ |
1109 | if (atomic_dec_if_positive(v: &zone_info->active_zones_left) < 0) |
1110 | return false; |
1111 | if (test_and_set_bit(nr: zno, addr: zone_info->active_zones)) { |
1112 | /* Someone already set the bit */ |
1113 | atomic_inc(v: &zone_info->active_zones_left); |
1114 | } |
1115 | } |
1116 | |
1117 | return true; |
1118 | } |
1119 | |
1120 | static void btrfs_dev_clear_active_zone(struct btrfs_device *device, u64 pos) |
1121 | { |
1122 | struct btrfs_zoned_device_info *zone_info = device->zone_info; |
1123 | unsigned int zno = (pos >> zone_info->zone_size_shift); |
1124 | |
1125 | /* We can use any number of zones */ |
1126 | if (zone_info->max_active_zones == 0) |
1127 | return; |
1128 | |
1129 | if (test_and_clear_bit(nr: zno, addr: zone_info->active_zones)) |
1130 | atomic_inc(v: &zone_info->active_zones_left); |
1131 | } |
1132 | |
1133 | int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical, |
1134 | u64 length, u64 *bytes) |
1135 | { |
1136 | unsigned int nofs_flags; |
1137 | int ret; |
1138 | |
1139 | *bytes = 0; |
1140 | nofs_flags = memalloc_nofs_save(); |
1141 | ret = blkdev_zone_mgmt(bdev: device->bdev, op: REQ_OP_ZONE_RESET, |
1142 | sectors: physical >> SECTOR_SHIFT, nr_sectors: length >> SECTOR_SHIFT); |
1143 | memalloc_nofs_restore(flags: nofs_flags); |
1144 | if (ret) |
1145 | return ret; |
1146 | |
1147 | *bytes = length; |
1148 | while (length) { |
1149 | btrfs_dev_set_zone_empty(device, pos: physical); |
1150 | btrfs_dev_clear_active_zone(device, pos: physical); |
1151 | physical += device->zone_info->zone_size; |
1152 | length -= device->zone_info->zone_size; |
1153 | } |
1154 | |
1155 | return 0; |
1156 | } |
1157 | |
1158 | int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size) |
1159 | { |
1160 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
1161 | const u8 shift = zinfo->zone_size_shift; |
1162 | unsigned long begin = start >> shift; |
1163 | unsigned long nbits = size >> shift; |
1164 | u64 pos; |
1165 | int ret; |
1166 | |
1167 | ASSERT(IS_ALIGNED(start, zinfo->zone_size)); |
1168 | ASSERT(IS_ALIGNED(size, zinfo->zone_size)); |
1169 | |
1170 | if (begin + nbits > zinfo->nr_zones) |
1171 | return -ERANGE; |
1172 | |
1173 | /* All the zones are conventional */ |
1174 | if (bitmap_test_range_all_zero(addr: zinfo->seq_zones, start: begin, nbits)) |
1175 | return 0; |
1176 | |
1177 | /* All the zones are sequential and empty */ |
1178 | if (bitmap_test_range_all_set(addr: zinfo->seq_zones, start: begin, nbits) && |
1179 | bitmap_test_range_all_set(addr: zinfo->empty_zones, start: begin, nbits)) |
1180 | return 0; |
1181 | |
1182 | for (pos = start; pos < start + size; pos += zinfo->zone_size) { |
1183 | u64 reset_bytes; |
1184 | |
1185 | if (!btrfs_dev_is_sequential(device, pos) || |
1186 | btrfs_dev_is_empty_zone(device, pos)) |
1187 | continue; |
1188 | |
1189 | /* Free regions should be empty */ |
1190 | btrfs_warn_in_rcu( |
1191 | device->fs_info, |
1192 | "zoned: resetting device %s (devid %llu) zone %llu for allocation" , |
1193 | rcu_str_deref(device->name), device->devid, pos >> shift); |
1194 | WARN_ON_ONCE(1); |
1195 | |
1196 | ret = btrfs_reset_device_zone(device, physical: pos, length: zinfo->zone_size, |
1197 | bytes: &reset_bytes); |
1198 | if (ret) |
1199 | return ret; |
1200 | } |
1201 | |
1202 | return 0; |
1203 | } |
1204 | |
1205 | /* |
1206 | * Calculate an allocation pointer from the extent allocation information |
1207 | * for a block group consist of conventional zones. It is pointed to the |
1208 | * end of the highest addressed extent in the block group as an allocation |
1209 | * offset. |
1210 | */ |
1211 | static int calculate_alloc_pointer(struct btrfs_block_group *cache, |
1212 | u64 *offset_ret, bool new) |
1213 | { |
1214 | struct btrfs_fs_info *fs_info = cache->fs_info; |
1215 | struct btrfs_root *root; |
1216 | struct btrfs_path *path; |
1217 | struct btrfs_key key; |
1218 | struct btrfs_key found_key; |
1219 | int ret; |
1220 | u64 length; |
1221 | |
1222 | /* |
1223 | * Avoid tree lookups for a new block group, there's no use for it. |
1224 | * It must always be 0. |
1225 | * |
1226 | * Also, we have a lock chain of extent buffer lock -> chunk mutex. |
1227 | * For new a block group, this function is called from |
1228 | * btrfs_make_block_group() which is already taking the chunk mutex. |
1229 | * Thus, we cannot call calculate_alloc_pointer() which takes extent |
1230 | * buffer locks to avoid deadlock. |
1231 | */ |
1232 | if (new) { |
1233 | *offset_ret = 0; |
1234 | return 0; |
1235 | } |
1236 | |
1237 | path = btrfs_alloc_path(); |
1238 | if (!path) |
1239 | return -ENOMEM; |
1240 | |
1241 | key.objectid = cache->start + cache->length; |
1242 | key.type = 0; |
1243 | key.offset = 0; |
1244 | |
1245 | root = btrfs_extent_root(fs_info, bytenr: key.objectid); |
1246 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
1247 | /* We should not find the exact match */ |
1248 | if (!ret) |
1249 | ret = -EUCLEAN; |
1250 | if (ret < 0) |
1251 | goto out; |
1252 | |
1253 | ret = btrfs_previous_extent_item(root, path, min_objectid: cache->start); |
1254 | if (ret) { |
1255 | if (ret == 1) { |
1256 | ret = 0; |
1257 | *offset_ret = 0; |
1258 | } |
1259 | goto out; |
1260 | } |
1261 | |
1262 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &found_key, nr: path->slots[0]); |
1263 | |
1264 | if (found_key.type == BTRFS_EXTENT_ITEM_KEY) |
1265 | length = found_key.offset; |
1266 | else |
1267 | length = fs_info->nodesize; |
1268 | |
1269 | if (!(found_key.objectid >= cache->start && |
1270 | found_key.objectid + length <= cache->start + cache->length)) { |
1271 | ret = -EUCLEAN; |
1272 | goto out; |
1273 | } |
1274 | *offset_ret = found_key.objectid + length - cache->start; |
1275 | ret = 0; |
1276 | |
1277 | out: |
1278 | btrfs_free_path(p: path); |
1279 | return ret; |
1280 | } |
1281 | |
1282 | struct zone_info { |
1283 | u64 physical; |
1284 | u64 capacity; |
1285 | u64 alloc_offset; |
1286 | }; |
1287 | |
1288 | static int btrfs_load_zone_info(struct btrfs_fs_info *fs_info, int zone_idx, |
1289 | struct zone_info *info, unsigned long *active, |
1290 | struct btrfs_chunk_map *map) |
1291 | { |
1292 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
1293 | struct btrfs_device *device = map->stripes[zone_idx].dev; |
1294 | int dev_replace_is_ongoing = 0; |
1295 | unsigned int nofs_flag; |
1296 | struct blk_zone zone; |
1297 | int ret; |
1298 | |
1299 | info->physical = map->stripes[zone_idx].physical; |
1300 | |
1301 | if (!device->bdev) { |
1302 | info->alloc_offset = WP_MISSING_DEV; |
1303 | return 0; |
1304 | } |
1305 | |
1306 | /* Consider a zone as active if we can allow any number of active zones. */ |
1307 | if (!device->zone_info->max_active_zones) |
1308 | __set_bit(zone_idx, active); |
1309 | |
1310 | if (!btrfs_dev_is_sequential(device, pos: info->physical)) { |
1311 | info->alloc_offset = WP_CONVENTIONAL; |
1312 | return 0; |
1313 | } |
1314 | |
1315 | /* This zone will be used for allocation, so mark this zone non-empty. */ |
1316 | btrfs_dev_clear_zone_empty(device, pos: info->physical); |
1317 | |
1318 | down_read(sem: &dev_replace->rwsem); |
1319 | dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace); |
1320 | if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) |
1321 | btrfs_dev_clear_zone_empty(device: dev_replace->tgtdev, pos: info->physical); |
1322 | up_read(sem: &dev_replace->rwsem); |
1323 | |
1324 | /* |
1325 | * The group is mapped to a sequential zone. Get the zone write pointer |
1326 | * to determine the allocation offset within the zone. |
1327 | */ |
1328 | WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size)); |
1329 | nofs_flag = memalloc_nofs_save(); |
1330 | ret = btrfs_get_dev_zone(device, pos: info->physical, zone: &zone); |
1331 | memalloc_nofs_restore(flags: nofs_flag); |
1332 | if (ret) { |
1333 | if (ret != -EIO && ret != -EOPNOTSUPP) |
1334 | return ret; |
1335 | info->alloc_offset = WP_MISSING_DEV; |
1336 | return 0; |
1337 | } |
1338 | |
1339 | if (zone.type == BLK_ZONE_TYPE_CONVENTIONAL) { |
1340 | btrfs_err_in_rcu(fs_info, |
1341 | "zoned: unexpected conventional zone %llu on device %s (devid %llu)" , |
1342 | zone.start << SECTOR_SHIFT, rcu_str_deref(device->name), |
1343 | device->devid); |
1344 | return -EIO; |
1345 | } |
1346 | |
1347 | info->capacity = (zone.capacity << SECTOR_SHIFT); |
1348 | |
1349 | switch (zone.cond) { |
1350 | case BLK_ZONE_COND_OFFLINE: |
1351 | case BLK_ZONE_COND_READONLY: |
1352 | btrfs_err(fs_info, |
1353 | "zoned: offline/readonly zone %llu on device %s (devid %llu)" , |
1354 | (info->physical >> device->zone_info->zone_size_shift), |
1355 | rcu_str_deref(device->name), device->devid); |
1356 | info->alloc_offset = WP_MISSING_DEV; |
1357 | break; |
1358 | case BLK_ZONE_COND_EMPTY: |
1359 | info->alloc_offset = 0; |
1360 | break; |
1361 | case BLK_ZONE_COND_FULL: |
1362 | info->alloc_offset = info->capacity; |
1363 | break; |
1364 | default: |
1365 | /* Partially used zone. */ |
1366 | info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT); |
1367 | __set_bit(zone_idx, active); |
1368 | break; |
1369 | } |
1370 | |
1371 | return 0; |
1372 | } |
1373 | |
1374 | static int btrfs_load_block_group_single(struct btrfs_block_group *bg, |
1375 | struct zone_info *info, |
1376 | unsigned long *active) |
1377 | { |
1378 | if (info->alloc_offset == WP_MISSING_DEV) { |
1379 | btrfs_err(bg->fs_info, |
1380 | "zoned: cannot recover write pointer for zone %llu" , |
1381 | info->physical); |
1382 | return -EIO; |
1383 | } |
1384 | |
1385 | bg->alloc_offset = info->alloc_offset; |
1386 | bg->zone_capacity = info->capacity; |
1387 | if (test_bit(0, active)) |
1388 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &bg->runtime_flags); |
1389 | return 0; |
1390 | } |
1391 | |
1392 | static int btrfs_load_block_group_dup(struct btrfs_block_group *bg, |
1393 | struct btrfs_chunk_map *map, |
1394 | struct zone_info *zone_info, |
1395 | unsigned long *active) |
1396 | { |
1397 | struct btrfs_fs_info *fs_info = bg->fs_info; |
1398 | |
1399 | if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { |
1400 | btrfs_err(fs_info, "zoned: data DUP profile needs raid-stripe-tree" ); |
1401 | return -EINVAL; |
1402 | } |
1403 | |
1404 | if (zone_info[0].alloc_offset == WP_MISSING_DEV) { |
1405 | btrfs_err(bg->fs_info, |
1406 | "zoned: cannot recover write pointer for zone %llu" , |
1407 | zone_info[0].physical); |
1408 | return -EIO; |
1409 | } |
1410 | if (zone_info[1].alloc_offset == WP_MISSING_DEV) { |
1411 | btrfs_err(bg->fs_info, |
1412 | "zoned: cannot recover write pointer for zone %llu" , |
1413 | zone_info[1].physical); |
1414 | return -EIO; |
1415 | } |
1416 | if (zone_info[0].alloc_offset != zone_info[1].alloc_offset) { |
1417 | btrfs_err(bg->fs_info, |
1418 | "zoned: write pointer offset mismatch of zones in DUP profile" ); |
1419 | return -EIO; |
1420 | } |
1421 | |
1422 | if (test_bit(0, active) != test_bit(1, active)) { |
1423 | if (!btrfs_zone_activate(block_group: bg)) |
1424 | return -EIO; |
1425 | } else if (test_bit(0, active)) { |
1426 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &bg->runtime_flags); |
1427 | } |
1428 | |
1429 | bg->alloc_offset = zone_info[0].alloc_offset; |
1430 | bg->zone_capacity = min(zone_info[0].capacity, zone_info[1].capacity); |
1431 | return 0; |
1432 | } |
1433 | |
1434 | static int btrfs_load_block_group_raid1(struct btrfs_block_group *bg, |
1435 | struct btrfs_chunk_map *map, |
1436 | struct zone_info *zone_info, |
1437 | unsigned long *active) |
1438 | { |
1439 | struct btrfs_fs_info *fs_info = bg->fs_info; |
1440 | int i; |
1441 | |
1442 | if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { |
1443 | btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree" , |
1444 | btrfs_bg_type_to_raid_name(map->type)); |
1445 | return -EINVAL; |
1446 | } |
1447 | |
1448 | for (i = 0; i < map->num_stripes; i++) { |
1449 | if (zone_info[i].alloc_offset == WP_MISSING_DEV || |
1450 | zone_info[i].alloc_offset == WP_CONVENTIONAL) |
1451 | continue; |
1452 | |
1453 | if ((zone_info[0].alloc_offset != zone_info[i].alloc_offset) && |
1454 | !btrfs_test_opt(fs_info, DEGRADED)) { |
1455 | btrfs_err(fs_info, |
1456 | "zoned: write pointer offset mismatch of zones in %s profile" , |
1457 | btrfs_bg_type_to_raid_name(map->type)); |
1458 | return -EIO; |
1459 | } |
1460 | if (test_bit(0, active) != test_bit(i, active)) { |
1461 | if (!btrfs_test_opt(fs_info, DEGRADED) && |
1462 | !btrfs_zone_activate(block_group: bg)) { |
1463 | return -EIO; |
1464 | } |
1465 | } else { |
1466 | if (test_bit(0, active)) |
1467 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &bg->runtime_flags); |
1468 | } |
1469 | /* In case a device is missing we have a cap of 0, so don't use it. */ |
1470 | bg->zone_capacity = min_not_zero(zone_info[0].capacity, |
1471 | zone_info[1].capacity); |
1472 | } |
1473 | |
1474 | if (zone_info[0].alloc_offset != WP_MISSING_DEV) |
1475 | bg->alloc_offset = zone_info[0].alloc_offset; |
1476 | else |
1477 | bg->alloc_offset = zone_info[i - 1].alloc_offset; |
1478 | |
1479 | return 0; |
1480 | } |
1481 | |
1482 | static int btrfs_load_block_group_raid0(struct btrfs_block_group *bg, |
1483 | struct btrfs_chunk_map *map, |
1484 | struct zone_info *zone_info, |
1485 | unsigned long *active) |
1486 | { |
1487 | struct btrfs_fs_info *fs_info = bg->fs_info; |
1488 | |
1489 | if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { |
1490 | btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree" , |
1491 | btrfs_bg_type_to_raid_name(map->type)); |
1492 | return -EINVAL; |
1493 | } |
1494 | |
1495 | for (int i = 0; i < map->num_stripes; i++) { |
1496 | if (zone_info[i].alloc_offset == WP_MISSING_DEV || |
1497 | zone_info[i].alloc_offset == WP_CONVENTIONAL) |
1498 | continue; |
1499 | |
1500 | if (test_bit(0, active) != test_bit(i, active)) { |
1501 | if (!btrfs_zone_activate(block_group: bg)) |
1502 | return -EIO; |
1503 | } else { |
1504 | if (test_bit(0, active)) |
1505 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &bg->runtime_flags); |
1506 | } |
1507 | bg->zone_capacity += zone_info[i].capacity; |
1508 | bg->alloc_offset += zone_info[i].alloc_offset; |
1509 | } |
1510 | |
1511 | return 0; |
1512 | } |
1513 | |
1514 | static int btrfs_load_block_group_raid10(struct btrfs_block_group *bg, |
1515 | struct btrfs_chunk_map *map, |
1516 | struct zone_info *zone_info, |
1517 | unsigned long *active) |
1518 | { |
1519 | struct btrfs_fs_info *fs_info = bg->fs_info; |
1520 | |
1521 | if ((map->type & BTRFS_BLOCK_GROUP_DATA) && !fs_info->stripe_root) { |
1522 | btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree" , |
1523 | btrfs_bg_type_to_raid_name(map->type)); |
1524 | return -EINVAL; |
1525 | } |
1526 | |
1527 | for (int i = 0; i < map->num_stripes; i++) { |
1528 | if (zone_info[i].alloc_offset == WP_MISSING_DEV || |
1529 | zone_info[i].alloc_offset == WP_CONVENTIONAL) |
1530 | continue; |
1531 | |
1532 | if (test_bit(0, active) != test_bit(i, active)) { |
1533 | if (!btrfs_zone_activate(block_group: bg)) |
1534 | return -EIO; |
1535 | } else { |
1536 | if (test_bit(0, active)) |
1537 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &bg->runtime_flags); |
1538 | } |
1539 | |
1540 | if ((i % map->sub_stripes) == 0) { |
1541 | bg->zone_capacity += zone_info[i].capacity; |
1542 | bg->alloc_offset += zone_info[i].alloc_offset; |
1543 | } |
1544 | } |
1545 | |
1546 | return 0; |
1547 | } |
1548 | |
1549 | int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new) |
1550 | { |
1551 | struct btrfs_fs_info *fs_info = cache->fs_info; |
1552 | struct btrfs_chunk_map *map; |
1553 | u64 logical = cache->start; |
1554 | u64 length = cache->length; |
1555 | struct zone_info *zone_info = NULL; |
1556 | int ret; |
1557 | int i; |
1558 | unsigned long *active = NULL; |
1559 | u64 last_alloc = 0; |
1560 | u32 num_sequential = 0, num_conventional = 0; |
1561 | |
1562 | if (!btrfs_is_zoned(fs_info)) |
1563 | return 0; |
1564 | |
1565 | /* Sanity check */ |
1566 | if (!IS_ALIGNED(length, fs_info->zone_size)) { |
1567 | btrfs_err(fs_info, |
1568 | "zoned: block group %llu len %llu unaligned to zone size %llu" , |
1569 | logical, length, fs_info->zone_size); |
1570 | return -EIO; |
1571 | } |
1572 | |
1573 | map = btrfs_find_chunk_map(fs_info, logical, length); |
1574 | if (!map) |
1575 | return -EINVAL; |
1576 | |
1577 | cache->physical_map = map; |
1578 | |
1579 | zone_info = kcalloc(n: map->num_stripes, size: sizeof(*zone_info), GFP_NOFS); |
1580 | if (!zone_info) { |
1581 | ret = -ENOMEM; |
1582 | goto out; |
1583 | } |
1584 | |
1585 | active = bitmap_zalloc(nbits: map->num_stripes, GFP_NOFS); |
1586 | if (!active) { |
1587 | ret = -ENOMEM; |
1588 | goto out; |
1589 | } |
1590 | |
1591 | for (i = 0; i < map->num_stripes; i++) { |
1592 | ret = btrfs_load_zone_info(fs_info, zone_idx: i, info: &zone_info[i], active, map); |
1593 | if (ret) |
1594 | goto out; |
1595 | |
1596 | if (zone_info[i].alloc_offset == WP_CONVENTIONAL) |
1597 | num_conventional++; |
1598 | else |
1599 | num_sequential++; |
1600 | } |
1601 | |
1602 | if (num_sequential > 0) |
1603 | set_bit(nr: BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, addr: &cache->runtime_flags); |
1604 | |
1605 | if (num_conventional > 0) { |
1606 | /* Zone capacity is always zone size in emulation */ |
1607 | cache->zone_capacity = cache->length; |
1608 | ret = calculate_alloc_pointer(cache, offset_ret: &last_alloc, new); |
1609 | if (ret) { |
1610 | btrfs_err(fs_info, |
1611 | "zoned: failed to determine allocation offset of bg %llu" , |
1612 | cache->start); |
1613 | goto out; |
1614 | } else if (map->num_stripes == num_conventional) { |
1615 | cache->alloc_offset = last_alloc; |
1616 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &cache->runtime_flags); |
1617 | goto out; |
1618 | } |
1619 | } |
1620 | |
1621 | switch (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { |
1622 | case 0: /* single */ |
1623 | ret = btrfs_load_block_group_single(bg: cache, info: &zone_info[0], active); |
1624 | break; |
1625 | case BTRFS_BLOCK_GROUP_DUP: |
1626 | ret = btrfs_load_block_group_dup(bg: cache, map, zone_info, active); |
1627 | break; |
1628 | case BTRFS_BLOCK_GROUP_RAID1: |
1629 | case BTRFS_BLOCK_GROUP_RAID1C3: |
1630 | case BTRFS_BLOCK_GROUP_RAID1C4: |
1631 | ret = btrfs_load_block_group_raid1(bg: cache, map, zone_info, active); |
1632 | break; |
1633 | case BTRFS_BLOCK_GROUP_RAID0: |
1634 | ret = btrfs_load_block_group_raid0(bg: cache, map, zone_info, active); |
1635 | break; |
1636 | case BTRFS_BLOCK_GROUP_RAID10: |
1637 | ret = btrfs_load_block_group_raid10(bg: cache, map, zone_info, active); |
1638 | break; |
1639 | case BTRFS_BLOCK_GROUP_RAID5: |
1640 | case BTRFS_BLOCK_GROUP_RAID6: |
1641 | default: |
1642 | btrfs_err(fs_info, "zoned: profile %s not yet supported" , |
1643 | btrfs_bg_type_to_raid_name(map->type)); |
1644 | ret = -EINVAL; |
1645 | goto out; |
1646 | } |
1647 | |
1648 | out: |
1649 | /* Reject non SINGLE data profiles without RST */ |
1650 | if ((map->type & BTRFS_BLOCK_GROUP_DATA) && |
1651 | (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) && |
1652 | !fs_info->stripe_root) { |
1653 | btrfs_err(fs_info, "zoned: data %s needs raid-stripe-tree" , |
1654 | btrfs_bg_type_to_raid_name(map->type)); |
1655 | return -EINVAL; |
1656 | } |
1657 | |
1658 | if (cache->alloc_offset > cache->zone_capacity) { |
1659 | btrfs_err(fs_info, |
1660 | "zoned: invalid write pointer %llu (larger than zone capacity %llu) in block group %llu" , |
1661 | cache->alloc_offset, cache->zone_capacity, |
1662 | cache->start); |
1663 | ret = -EIO; |
1664 | } |
1665 | |
1666 | /* An extent is allocated after the write pointer */ |
1667 | if (!ret && num_conventional && last_alloc > cache->alloc_offset) { |
1668 | btrfs_err(fs_info, |
1669 | "zoned: got wrong write pointer in BG %llu: %llu > %llu" , |
1670 | logical, last_alloc, cache->alloc_offset); |
1671 | ret = -EIO; |
1672 | } |
1673 | |
1674 | if (!ret) { |
1675 | cache->meta_write_pointer = cache->alloc_offset + cache->start; |
1676 | if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags)) { |
1677 | btrfs_get_block_group(cache); |
1678 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
1679 | list_add_tail(new: &cache->active_bg_list, |
1680 | head: &fs_info->zone_active_bgs); |
1681 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
1682 | } |
1683 | } else { |
1684 | btrfs_free_chunk_map(map: cache->physical_map); |
1685 | cache->physical_map = NULL; |
1686 | } |
1687 | bitmap_free(bitmap: active); |
1688 | kfree(objp: zone_info); |
1689 | |
1690 | return ret; |
1691 | } |
1692 | |
1693 | void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) |
1694 | { |
1695 | u64 unusable, free; |
1696 | |
1697 | if (!btrfs_is_zoned(fs_info: cache->fs_info)) |
1698 | return; |
1699 | |
1700 | WARN_ON(cache->bytes_super != 0); |
1701 | unusable = (cache->alloc_offset - cache->used) + |
1702 | (cache->length - cache->zone_capacity); |
1703 | free = cache->zone_capacity - cache->alloc_offset; |
1704 | |
1705 | /* We only need ->free_space in ALLOC_SEQ block groups */ |
1706 | cache->cached = BTRFS_CACHE_FINISHED; |
1707 | cache->free_space_ctl->free_space = free; |
1708 | cache->zone_unusable = unusable; |
1709 | } |
1710 | |
1711 | bool btrfs_use_zone_append(struct btrfs_bio *bbio) |
1712 | { |
1713 | u64 start = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT); |
1714 | struct btrfs_inode *inode = bbio->inode; |
1715 | struct btrfs_fs_info *fs_info = bbio->fs_info; |
1716 | struct btrfs_block_group *cache; |
1717 | bool ret = false; |
1718 | |
1719 | if (!btrfs_is_zoned(fs_info)) |
1720 | return false; |
1721 | |
1722 | if (!inode || !is_data_inode(inode: &inode->vfs_inode)) |
1723 | return false; |
1724 | |
1725 | if (btrfs_op(bio: &bbio->bio) != BTRFS_MAP_WRITE) |
1726 | return false; |
1727 | |
1728 | /* |
1729 | * Using REQ_OP_ZONE_APPNED for relocation can break assumptions on the |
1730 | * extent layout the relocation code has. |
1731 | * Furthermore we have set aside own block-group from which only the |
1732 | * relocation "process" can allocate and make sure only one process at a |
1733 | * time can add pages to an extent that gets relocated, so it's safe to |
1734 | * use regular REQ_OP_WRITE for this special case. |
1735 | */ |
1736 | if (btrfs_is_data_reloc_root(root: inode->root)) |
1737 | return false; |
1738 | |
1739 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
1740 | ASSERT(cache); |
1741 | if (!cache) |
1742 | return false; |
1743 | |
1744 | ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags); |
1745 | btrfs_put_block_group(cache); |
1746 | |
1747 | return ret; |
1748 | } |
1749 | |
1750 | void btrfs_record_physical_zoned(struct btrfs_bio *bbio) |
1751 | { |
1752 | const u64 physical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; |
1753 | struct btrfs_ordered_sum *sum = bbio->sums; |
1754 | |
1755 | if (physical < bbio->orig_physical) |
1756 | sum->logical -= bbio->orig_physical - physical; |
1757 | else |
1758 | sum->logical += physical - bbio->orig_physical; |
1759 | } |
1760 | |
1761 | static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered, |
1762 | u64 logical) |
1763 | { |
1764 | struct extent_map_tree *em_tree = &BTRFS_I(inode: ordered->inode)->extent_tree; |
1765 | struct extent_map *em; |
1766 | |
1767 | ordered->disk_bytenr = logical; |
1768 | |
1769 | write_lock(&em_tree->lock); |
1770 | em = search_extent_mapping(tree: em_tree, start: ordered->file_offset, |
1771 | len: ordered->num_bytes); |
1772 | em->block_start = logical; |
1773 | free_extent_map(em); |
1774 | write_unlock(&em_tree->lock); |
1775 | } |
1776 | |
1777 | static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered, |
1778 | u64 logical, u64 len) |
1779 | { |
1780 | struct btrfs_ordered_extent *new; |
1781 | |
1782 | if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) && |
1783 | split_extent_map(inode: BTRFS_I(inode: ordered->inode), start: ordered->file_offset, |
1784 | len: ordered->num_bytes, pre: len, new_logical: logical)) |
1785 | return false; |
1786 | |
1787 | new = btrfs_split_ordered_extent(ordered, len); |
1788 | if (IS_ERR(ptr: new)) |
1789 | return false; |
1790 | new->disk_bytenr = logical; |
1791 | btrfs_finish_one_ordered(ordered_extent: new); |
1792 | return true; |
1793 | } |
1794 | |
1795 | void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered) |
1796 | { |
1797 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
1798 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
1799 | struct btrfs_ordered_sum *sum; |
1800 | u64 logical, len; |
1801 | |
1802 | /* |
1803 | * Write to pre-allocated region is for the data relocation, and so |
1804 | * it should use WRITE operation. No split/rewrite are necessary. |
1805 | */ |
1806 | if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) |
1807 | return; |
1808 | |
1809 | ASSERT(!list_empty(&ordered->list)); |
1810 | /* The ordered->list can be empty in the above pre-alloc case. */ |
1811 | sum = list_first_entry(&ordered->list, struct btrfs_ordered_sum, list); |
1812 | logical = sum->logical; |
1813 | len = sum->len; |
1814 | |
1815 | while (len < ordered->disk_num_bytes) { |
1816 | sum = list_next_entry(sum, list); |
1817 | if (sum->logical == logical + len) { |
1818 | len += sum->len; |
1819 | continue; |
1820 | } |
1821 | if (!btrfs_zoned_split_ordered(ordered, logical, len)) { |
1822 | set_bit(nr: BTRFS_ORDERED_IOERR, addr: &ordered->flags); |
1823 | btrfs_err(fs_info, "failed to split ordered extent" ); |
1824 | goto out; |
1825 | } |
1826 | logical = sum->logical; |
1827 | len = sum->len; |
1828 | } |
1829 | |
1830 | if (ordered->disk_bytenr != logical) |
1831 | btrfs_rewrite_logical_zoned(ordered, logical); |
1832 | |
1833 | out: |
1834 | /* |
1835 | * If we end up here for nodatasum I/O, the btrfs_ordered_sum structures |
1836 | * were allocated by btrfs_alloc_dummy_sum only to record the logical |
1837 | * addresses and don't contain actual checksums. We thus must free them |
1838 | * here so that we don't attempt to log the csums later. |
1839 | */ |
1840 | if ((inode->flags & BTRFS_INODE_NODATASUM) || |
1841 | test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) { |
1842 | while ((sum = list_first_entry_or_null(&ordered->list, |
1843 | typeof(*sum), list))) { |
1844 | list_del(entry: &sum->list); |
1845 | kfree(objp: sum); |
1846 | } |
1847 | } |
1848 | } |
1849 | |
1850 | static bool check_bg_is_active(struct btrfs_eb_write_context *ctx, |
1851 | struct btrfs_block_group **active_bg) |
1852 | { |
1853 | const struct writeback_control *wbc = ctx->wbc; |
1854 | struct btrfs_block_group *block_group = ctx->zoned_bg; |
1855 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
1856 | |
1857 | if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) |
1858 | return true; |
1859 | |
1860 | if (fs_info->treelog_bg == block_group->start) { |
1861 | if (!btrfs_zone_activate(block_group)) { |
1862 | int ret_fin = btrfs_zone_finish_one_bg(fs_info); |
1863 | |
1864 | if (ret_fin != 1 || !btrfs_zone_activate(block_group)) |
1865 | return false; |
1866 | } |
1867 | } else if (*active_bg != block_group) { |
1868 | struct btrfs_block_group *tgt = *active_bg; |
1869 | |
1870 | /* zoned_meta_io_lock protects fs_info->active_{meta,system}_bg. */ |
1871 | lockdep_assert_held(&fs_info->zoned_meta_io_lock); |
1872 | |
1873 | if (tgt) { |
1874 | /* |
1875 | * If there is an unsent IO left in the allocated area, |
1876 | * we cannot wait for them as it may cause a deadlock. |
1877 | */ |
1878 | if (tgt->meta_write_pointer < tgt->start + tgt->alloc_offset) { |
1879 | if (wbc->sync_mode == WB_SYNC_NONE || |
1880 | (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)) |
1881 | return false; |
1882 | } |
1883 | |
1884 | /* Pivot active metadata/system block group. */ |
1885 | btrfs_zoned_meta_io_unlock(fs_info); |
1886 | wait_eb_writebacks(block_group: tgt); |
1887 | do_zone_finish(block_group: tgt, fully_written: true); |
1888 | btrfs_zoned_meta_io_lock(fs_info); |
1889 | if (*active_bg == tgt) { |
1890 | btrfs_put_block_group(cache: tgt); |
1891 | *active_bg = NULL; |
1892 | } |
1893 | } |
1894 | if (!btrfs_zone_activate(block_group)) |
1895 | return false; |
1896 | if (*active_bg != block_group) { |
1897 | ASSERT(*active_bg == NULL); |
1898 | *active_bg = block_group; |
1899 | btrfs_get_block_group(cache: block_group); |
1900 | } |
1901 | } |
1902 | |
1903 | return true; |
1904 | } |
1905 | |
1906 | /* |
1907 | * Check if @ctx->eb is aligned to the write pointer. |
1908 | * |
1909 | * Return: |
1910 | * 0: @ctx->eb is at the write pointer. You can write it. |
1911 | * -EAGAIN: There is a hole. The caller should handle the case. |
1912 | * -EBUSY: There is a hole, but the caller can just bail out. |
1913 | */ |
1914 | int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, |
1915 | struct btrfs_eb_write_context *ctx) |
1916 | { |
1917 | const struct writeback_control *wbc = ctx->wbc; |
1918 | const struct extent_buffer *eb = ctx->eb; |
1919 | struct btrfs_block_group *block_group = ctx->zoned_bg; |
1920 | |
1921 | if (!btrfs_is_zoned(fs_info)) |
1922 | return 0; |
1923 | |
1924 | if (block_group) { |
1925 | if (block_group->start > eb->start || |
1926 | block_group->start + block_group->length <= eb->start) { |
1927 | btrfs_put_block_group(cache: block_group); |
1928 | block_group = NULL; |
1929 | ctx->zoned_bg = NULL; |
1930 | } |
1931 | } |
1932 | |
1933 | if (!block_group) { |
1934 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: eb->start); |
1935 | if (!block_group) |
1936 | return 0; |
1937 | ctx->zoned_bg = block_group; |
1938 | } |
1939 | |
1940 | if (block_group->meta_write_pointer == eb->start) { |
1941 | struct btrfs_block_group **tgt; |
1942 | |
1943 | if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) |
1944 | return 0; |
1945 | |
1946 | if (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) |
1947 | tgt = &fs_info->active_system_bg; |
1948 | else |
1949 | tgt = &fs_info->active_meta_bg; |
1950 | if (check_bg_is_active(ctx, active_bg: tgt)) |
1951 | return 0; |
1952 | } |
1953 | |
1954 | /* |
1955 | * Since we may release fs_info->zoned_meta_io_lock, someone can already |
1956 | * start writing this eb. In that case, we can just bail out. |
1957 | */ |
1958 | if (block_group->meta_write_pointer > eb->start) |
1959 | return -EBUSY; |
1960 | |
1961 | /* If for_sync, this hole will be filled with trasnsaction commit. */ |
1962 | if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) |
1963 | return -EAGAIN; |
1964 | return -EBUSY; |
1965 | } |
1966 | |
1967 | int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length) |
1968 | { |
1969 | if (!btrfs_dev_is_sequential(device, pos: physical)) |
1970 | return -EOPNOTSUPP; |
1971 | |
1972 | return blkdev_issue_zeroout(bdev: device->bdev, sector: physical >> SECTOR_SHIFT, |
1973 | nr_sects: length >> SECTOR_SHIFT, GFP_NOFS, flags: 0); |
1974 | } |
1975 | |
1976 | static int read_zone_info(struct btrfs_fs_info *fs_info, u64 logical, |
1977 | struct blk_zone *zone) |
1978 | { |
1979 | struct btrfs_io_context *bioc = NULL; |
1980 | u64 mapped_length = PAGE_SIZE; |
1981 | unsigned int nofs_flag; |
1982 | int nmirrors; |
1983 | int i, ret; |
1984 | |
1985 | ret = btrfs_map_block(fs_info, op: BTRFS_MAP_GET_READ_MIRRORS, logical, |
1986 | length: &mapped_length, bioc_ret: &bioc, NULL, NULL); |
1987 | if (ret || !bioc || mapped_length < PAGE_SIZE) { |
1988 | ret = -EIO; |
1989 | goto out_put_bioc; |
1990 | } |
1991 | |
1992 | if (bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { |
1993 | ret = -EINVAL; |
1994 | goto out_put_bioc; |
1995 | } |
1996 | |
1997 | nofs_flag = memalloc_nofs_save(); |
1998 | nmirrors = (int)bioc->num_stripes; |
1999 | for (i = 0; i < nmirrors; i++) { |
2000 | u64 physical = bioc->stripes[i].physical; |
2001 | struct btrfs_device *dev = bioc->stripes[i].dev; |
2002 | |
2003 | /* Missing device */ |
2004 | if (!dev->bdev) |
2005 | continue; |
2006 | |
2007 | ret = btrfs_get_dev_zone(device: dev, pos: physical, zone); |
2008 | /* Failing device */ |
2009 | if (ret == -EIO || ret == -EOPNOTSUPP) |
2010 | continue; |
2011 | break; |
2012 | } |
2013 | memalloc_nofs_restore(flags: nofs_flag); |
2014 | out_put_bioc: |
2015 | btrfs_put_bioc(bioc); |
2016 | return ret; |
2017 | } |
2018 | |
2019 | /* |
2020 | * Synchronize write pointer in a zone at @physical_start on @tgt_dev, by |
2021 | * filling zeros between @physical_pos to a write pointer of dev-replace |
2022 | * source device. |
2023 | */ |
2024 | int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical, |
2025 | u64 physical_start, u64 physical_pos) |
2026 | { |
2027 | struct btrfs_fs_info *fs_info = tgt_dev->fs_info; |
2028 | struct blk_zone zone; |
2029 | u64 length; |
2030 | u64 wp; |
2031 | int ret; |
2032 | |
2033 | if (!btrfs_dev_is_sequential(device: tgt_dev, pos: physical_pos)) |
2034 | return 0; |
2035 | |
2036 | ret = read_zone_info(fs_info, logical, zone: &zone); |
2037 | if (ret) |
2038 | return ret; |
2039 | |
2040 | wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT); |
2041 | |
2042 | if (physical_pos == wp) |
2043 | return 0; |
2044 | |
2045 | if (physical_pos > wp) |
2046 | return -EUCLEAN; |
2047 | |
2048 | length = wp - physical_pos; |
2049 | return btrfs_zoned_issue_zeroout(device: tgt_dev, physical: physical_pos, length); |
2050 | } |
2051 | |
2052 | /* |
2053 | * Activate block group and underlying device zones |
2054 | * |
2055 | * @block_group: the block group to activate |
2056 | * |
2057 | * Return: true on success, false otherwise |
2058 | */ |
2059 | bool btrfs_zone_activate(struct btrfs_block_group *block_group) |
2060 | { |
2061 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2062 | struct btrfs_chunk_map *map; |
2063 | struct btrfs_device *device; |
2064 | u64 physical; |
2065 | const bool is_data = (block_group->flags & BTRFS_BLOCK_GROUP_DATA); |
2066 | bool ret; |
2067 | int i; |
2068 | |
2069 | if (!btrfs_is_zoned(fs_info: block_group->fs_info)) |
2070 | return true; |
2071 | |
2072 | map = block_group->physical_map; |
2073 | |
2074 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
2075 | spin_lock(lock: &block_group->lock); |
2076 | if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { |
2077 | ret = true; |
2078 | goto out_unlock; |
2079 | } |
2080 | |
2081 | /* No space left */ |
2082 | if (btrfs_zoned_bg_is_full(bg: block_group)) { |
2083 | ret = false; |
2084 | goto out_unlock; |
2085 | } |
2086 | |
2087 | for (i = 0; i < map->num_stripes; i++) { |
2088 | struct btrfs_zoned_device_info *zinfo; |
2089 | int reserved = 0; |
2090 | |
2091 | device = map->stripes[i].dev; |
2092 | physical = map->stripes[i].physical; |
2093 | zinfo = device->zone_info; |
2094 | |
2095 | if (zinfo->max_active_zones == 0) |
2096 | continue; |
2097 | |
2098 | if (is_data) |
2099 | reserved = zinfo->reserved_active_zones; |
2100 | /* |
2101 | * For the data block group, leave active zones for one |
2102 | * metadata block group and one system block group. |
2103 | */ |
2104 | if (atomic_read(v: &zinfo->active_zones_left) <= reserved) { |
2105 | ret = false; |
2106 | goto out_unlock; |
2107 | } |
2108 | |
2109 | if (!btrfs_dev_set_active_zone(device, pos: physical)) { |
2110 | /* Cannot activate the zone */ |
2111 | ret = false; |
2112 | goto out_unlock; |
2113 | } |
2114 | if (!is_data) |
2115 | zinfo->reserved_active_zones--; |
2116 | } |
2117 | |
2118 | /* Successfully activated all the zones */ |
2119 | set_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &block_group->runtime_flags); |
2120 | spin_unlock(lock: &block_group->lock); |
2121 | |
2122 | /* For the active block group list */ |
2123 | btrfs_get_block_group(cache: block_group); |
2124 | list_add_tail(new: &block_group->active_bg_list, head: &fs_info->zone_active_bgs); |
2125 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2126 | |
2127 | return true; |
2128 | |
2129 | out_unlock: |
2130 | spin_unlock(lock: &block_group->lock); |
2131 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2132 | return ret; |
2133 | } |
2134 | |
2135 | static void wait_eb_writebacks(struct btrfs_block_group *block_group) |
2136 | { |
2137 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2138 | const u64 end = block_group->start + block_group->length; |
2139 | struct radix_tree_iter iter; |
2140 | struct extent_buffer *eb; |
2141 | void __rcu **slot; |
2142 | |
2143 | rcu_read_lock(); |
2144 | radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, |
2145 | block_group->start >> fs_info->sectorsize_bits) { |
2146 | eb = radix_tree_deref_slot(slot); |
2147 | if (!eb) |
2148 | continue; |
2149 | if (radix_tree_deref_retry(arg: eb)) { |
2150 | slot = radix_tree_iter_retry(iter: &iter); |
2151 | continue; |
2152 | } |
2153 | |
2154 | if (eb->start < block_group->start) |
2155 | continue; |
2156 | if (eb->start >= end) |
2157 | break; |
2158 | |
2159 | slot = radix_tree_iter_resume(slot, iter: &iter); |
2160 | rcu_read_unlock(); |
2161 | wait_on_extent_buffer_writeback(eb); |
2162 | rcu_read_lock(); |
2163 | } |
2164 | rcu_read_unlock(); |
2165 | } |
2166 | |
2167 | static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_written) |
2168 | { |
2169 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2170 | struct btrfs_chunk_map *map; |
2171 | const bool is_metadata = (block_group->flags & |
2172 | (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)); |
2173 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
2174 | int ret = 0; |
2175 | int i; |
2176 | |
2177 | spin_lock(lock: &block_group->lock); |
2178 | if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) { |
2179 | spin_unlock(lock: &block_group->lock); |
2180 | return 0; |
2181 | } |
2182 | |
2183 | /* Check if we have unwritten allocated space */ |
2184 | if (is_metadata && |
2185 | block_group->start + block_group->alloc_offset > block_group->meta_write_pointer) { |
2186 | spin_unlock(lock: &block_group->lock); |
2187 | return -EAGAIN; |
2188 | } |
2189 | |
2190 | /* |
2191 | * If we are sure that the block group is full (= no more room left for |
2192 | * new allocation) and the IO for the last usable block is completed, we |
2193 | * don't need to wait for the other IOs. This holds because we ensure |
2194 | * the sequential IO submissions using the ZONE_APPEND command for data |
2195 | * and block_group->meta_write_pointer for metadata. |
2196 | */ |
2197 | if (!fully_written) { |
2198 | if (test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { |
2199 | spin_unlock(lock: &block_group->lock); |
2200 | return -EAGAIN; |
2201 | } |
2202 | spin_unlock(lock: &block_group->lock); |
2203 | |
2204 | ret = btrfs_inc_block_group_ro(cache: block_group, do_chunk_alloc: false); |
2205 | if (ret) |
2206 | return ret; |
2207 | |
2208 | /* Ensure all writes in this block group finish */ |
2209 | btrfs_wait_block_group_reservations(bg: block_group); |
2210 | /* No need to wait for NOCOW writers. Zoned mode does not allow that */ |
2211 | btrfs_wait_ordered_roots(fs_info, U64_MAX, range_start: block_group->start, |
2212 | range_len: block_group->length); |
2213 | /* Wait for extent buffers to be written. */ |
2214 | if (is_metadata) |
2215 | wait_eb_writebacks(block_group); |
2216 | |
2217 | spin_lock(lock: &block_group->lock); |
2218 | |
2219 | /* |
2220 | * Bail out if someone already deactivated the block group, or |
2221 | * allocated space is left in the block group. |
2222 | */ |
2223 | if (!test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, |
2224 | &block_group->runtime_flags)) { |
2225 | spin_unlock(lock: &block_group->lock); |
2226 | btrfs_dec_block_group_ro(cache: block_group); |
2227 | return 0; |
2228 | } |
2229 | |
2230 | if (block_group->reserved || |
2231 | test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, |
2232 | &block_group->runtime_flags)) { |
2233 | spin_unlock(lock: &block_group->lock); |
2234 | btrfs_dec_block_group_ro(cache: block_group); |
2235 | return -EAGAIN; |
2236 | } |
2237 | } |
2238 | |
2239 | clear_bit(nr: BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, addr: &block_group->runtime_flags); |
2240 | block_group->alloc_offset = block_group->zone_capacity; |
2241 | if (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)) |
2242 | block_group->meta_write_pointer = block_group->start + |
2243 | block_group->zone_capacity; |
2244 | block_group->free_space_ctl->free_space = 0; |
2245 | btrfs_clear_treelog_bg(bg: block_group); |
2246 | btrfs_clear_data_reloc_bg(bg: block_group); |
2247 | spin_unlock(lock: &block_group->lock); |
2248 | |
2249 | down_read(sem: &dev_replace->rwsem); |
2250 | map = block_group->physical_map; |
2251 | for (i = 0; i < map->num_stripes; i++) { |
2252 | struct btrfs_device *device = map->stripes[i].dev; |
2253 | const u64 physical = map->stripes[i].physical; |
2254 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
2255 | unsigned int nofs_flags; |
2256 | |
2257 | if (zinfo->max_active_zones == 0) |
2258 | continue; |
2259 | |
2260 | nofs_flags = memalloc_nofs_save(); |
2261 | ret = blkdev_zone_mgmt(bdev: device->bdev, op: REQ_OP_ZONE_FINISH, |
2262 | sectors: physical >> SECTOR_SHIFT, |
2263 | nr_sectors: zinfo->zone_size >> SECTOR_SHIFT); |
2264 | memalloc_nofs_restore(flags: nofs_flags); |
2265 | |
2266 | if (ret) { |
2267 | up_read(sem: &dev_replace->rwsem); |
2268 | return ret; |
2269 | } |
2270 | |
2271 | if (!(block_group->flags & BTRFS_BLOCK_GROUP_DATA)) |
2272 | zinfo->reserved_active_zones++; |
2273 | btrfs_dev_clear_active_zone(device, pos: physical); |
2274 | } |
2275 | up_read(sem: &dev_replace->rwsem); |
2276 | |
2277 | if (!fully_written) |
2278 | btrfs_dec_block_group_ro(cache: block_group); |
2279 | |
2280 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
2281 | ASSERT(!list_empty(&block_group->active_bg_list)); |
2282 | list_del_init(entry: &block_group->active_bg_list); |
2283 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2284 | |
2285 | /* For active_bg_list */ |
2286 | btrfs_put_block_group(cache: block_group); |
2287 | |
2288 | clear_and_wake_up_bit(bit: BTRFS_FS_NEED_ZONE_FINISH, word: &fs_info->flags); |
2289 | |
2290 | return 0; |
2291 | } |
2292 | |
2293 | int btrfs_zone_finish(struct btrfs_block_group *block_group) |
2294 | { |
2295 | if (!btrfs_is_zoned(fs_info: block_group->fs_info)) |
2296 | return 0; |
2297 | |
2298 | return do_zone_finish(block_group, fully_written: false); |
2299 | } |
2300 | |
2301 | bool btrfs_can_activate_zone(struct btrfs_fs_devices *fs_devices, u64 flags) |
2302 | { |
2303 | struct btrfs_fs_info *fs_info = fs_devices->fs_info; |
2304 | struct btrfs_device *device; |
2305 | bool ret = false; |
2306 | |
2307 | if (!btrfs_is_zoned(fs_info)) |
2308 | return true; |
2309 | |
2310 | /* Check if there is a device with active zones left */ |
2311 | mutex_lock(&fs_info->chunk_mutex); |
2312 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
2313 | list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { |
2314 | struct btrfs_zoned_device_info *zinfo = device->zone_info; |
2315 | int reserved = 0; |
2316 | |
2317 | if (!device->bdev) |
2318 | continue; |
2319 | |
2320 | if (!zinfo->max_active_zones) { |
2321 | ret = true; |
2322 | break; |
2323 | } |
2324 | |
2325 | if (flags & BTRFS_BLOCK_GROUP_DATA) |
2326 | reserved = zinfo->reserved_active_zones; |
2327 | |
2328 | switch (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) { |
2329 | case 0: /* single */ |
2330 | ret = (atomic_read(v: &zinfo->active_zones_left) >= (1 + reserved)); |
2331 | break; |
2332 | case BTRFS_BLOCK_GROUP_DUP: |
2333 | ret = (atomic_read(v: &zinfo->active_zones_left) >= (2 + reserved)); |
2334 | break; |
2335 | } |
2336 | if (ret) |
2337 | break; |
2338 | } |
2339 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2340 | mutex_unlock(lock: &fs_info->chunk_mutex); |
2341 | |
2342 | if (!ret) |
2343 | set_bit(nr: BTRFS_FS_NEED_ZONE_FINISH, addr: &fs_info->flags); |
2344 | |
2345 | return ret; |
2346 | } |
2347 | |
2348 | void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical, u64 length) |
2349 | { |
2350 | struct btrfs_block_group *block_group; |
2351 | u64 min_alloc_bytes; |
2352 | |
2353 | if (!btrfs_is_zoned(fs_info)) |
2354 | return; |
2355 | |
2356 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: logical); |
2357 | ASSERT(block_group); |
2358 | |
2359 | /* No MIXED_BG on zoned btrfs. */ |
2360 | if (block_group->flags & BTRFS_BLOCK_GROUP_DATA) |
2361 | min_alloc_bytes = fs_info->sectorsize; |
2362 | else |
2363 | min_alloc_bytes = fs_info->nodesize; |
2364 | |
2365 | /* Bail out if we can allocate more data from this block group. */ |
2366 | if (logical + length + min_alloc_bytes <= |
2367 | block_group->start + block_group->zone_capacity) |
2368 | goto out; |
2369 | |
2370 | do_zone_finish(block_group, fully_written: true); |
2371 | |
2372 | out: |
2373 | btrfs_put_block_group(cache: block_group); |
2374 | } |
2375 | |
2376 | static void btrfs_zone_finish_endio_workfn(struct work_struct *work) |
2377 | { |
2378 | struct btrfs_block_group *bg = |
2379 | container_of(work, struct btrfs_block_group, zone_finish_work); |
2380 | |
2381 | wait_on_extent_buffer_writeback(eb: bg->last_eb); |
2382 | free_extent_buffer(eb: bg->last_eb); |
2383 | btrfs_zone_finish_endio(fs_info: bg->fs_info, logical: bg->start, length: bg->length); |
2384 | btrfs_put_block_group(cache: bg); |
2385 | } |
2386 | |
2387 | void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg, |
2388 | struct extent_buffer *eb) |
2389 | { |
2390 | if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) || |
2391 | eb->start + eb->len * 2 <= bg->start + bg->zone_capacity) |
2392 | return; |
2393 | |
2394 | if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) { |
2395 | btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing" , |
2396 | bg->start); |
2397 | return; |
2398 | } |
2399 | |
2400 | /* For the work */ |
2401 | btrfs_get_block_group(cache: bg); |
2402 | atomic_inc(v: &eb->refs); |
2403 | bg->last_eb = eb; |
2404 | INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn); |
2405 | queue_work(wq: system_unbound_wq, work: &bg->zone_finish_work); |
2406 | } |
2407 | |
2408 | void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) |
2409 | { |
2410 | struct btrfs_fs_info *fs_info = bg->fs_info; |
2411 | |
2412 | spin_lock(lock: &fs_info->relocation_bg_lock); |
2413 | if (fs_info->data_reloc_bg == bg->start) |
2414 | fs_info->data_reloc_bg = 0; |
2415 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
2416 | } |
2417 | |
2418 | void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) |
2419 | { |
2420 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
2421 | struct btrfs_device *device; |
2422 | |
2423 | if (!btrfs_is_zoned(fs_info)) |
2424 | return; |
2425 | |
2426 | mutex_lock(&fs_devices->device_list_mutex); |
2427 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
2428 | if (device->zone_info) { |
2429 | vfree(addr: device->zone_info->zone_cache); |
2430 | device->zone_info->zone_cache = NULL; |
2431 | } |
2432 | } |
2433 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
2434 | } |
2435 | |
2436 | bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info) |
2437 | { |
2438 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
2439 | struct btrfs_device *device; |
2440 | u64 used = 0; |
2441 | u64 total = 0; |
2442 | u64 factor; |
2443 | |
2444 | ASSERT(btrfs_is_zoned(fs_info)); |
2445 | |
2446 | if (fs_info->bg_reclaim_threshold == 0) |
2447 | return false; |
2448 | |
2449 | mutex_lock(&fs_devices->device_list_mutex); |
2450 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
2451 | if (!device->bdev) |
2452 | continue; |
2453 | |
2454 | total += device->disk_total_bytes; |
2455 | used += device->bytes_used; |
2456 | } |
2457 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
2458 | |
2459 | factor = div64_u64(dividend: used * 100, divisor: total); |
2460 | return factor >= fs_info->bg_reclaim_threshold; |
2461 | } |
2462 | |
2463 | void btrfs_zoned_release_data_reloc_bg(struct btrfs_fs_info *fs_info, u64 logical, |
2464 | u64 length) |
2465 | { |
2466 | struct btrfs_block_group *block_group; |
2467 | |
2468 | if (!btrfs_is_zoned(fs_info)) |
2469 | return; |
2470 | |
2471 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: logical); |
2472 | /* It should be called on a previous data relocation block group. */ |
2473 | ASSERT(block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)); |
2474 | |
2475 | spin_lock(lock: &block_group->lock); |
2476 | if (!test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) |
2477 | goto out; |
2478 | |
2479 | /* All relocation extents are written. */ |
2480 | if (block_group->start + block_group->alloc_offset == logical + length) { |
2481 | /* |
2482 | * Now, release this block group for further allocations and |
2483 | * zone finish. |
2484 | */ |
2485 | clear_bit(nr: BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, |
2486 | addr: &block_group->runtime_flags); |
2487 | } |
2488 | |
2489 | out: |
2490 | spin_unlock(lock: &block_group->lock); |
2491 | btrfs_put_block_group(cache: block_group); |
2492 | } |
2493 | |
2494 | int btrfs_zone_finish_one_bg(struct btrfs_fs_info *fs_info) |
2495 | { |
2496 | struct btrfs_block_group *block_group; |
2497 | struct btrfs_block_group *min_bg = NULL; |
2498 | u64 min_avail = U64_MAX; |
2499 | int ret; |
2500 | |
2501 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
2502 | list_for_each_entry(block_group, &fs_info->zone_active_bgs, |
2503 | active_bg_list) { |
2504 | u64 avail; |
2505 | |
2506 | spin_lock(lock: &block_group->lock); |
2507 | if (block_group->reserved || block_group->alloc_offset == 0 || |
2508 | (block_group->flags & BTRFS_BLOCK_GROUP_SYSTEM) || |
2509 | test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags)) { |
2510 | spin_unlock(lock: &block_group->lock); |
2511 | continue; |
2512 | } |
2513 | |
2514 | avail = block_group->zone_capacity - block_group->alloc_offset; |
2515 | if (min_avail > avail) { |
2516 | if (min_bg) |
2517 | btrfs_put_block_group(cache: min_bg); |
2518 | min_bg = block_group; |
2519 | min_avail = avail; |
2520 | btrfs_get_block_group(cache: min_bg); |
2521 | } |
2522 | spin_unlock(lock: &block_group->lock); |
2523 | } |
2524 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2525 | |
2526 | if (!min_bg) |
2527 | return 0; |
2528 | |
2529 | ret = btrfs_zone_finish(block_group: min_bg); |
2530 | btrfs_put_block_group(cache: min_bg); |
2531 | |
2532 | return ret < 0 ? ret : 1; |
2533 | } |
2534 | |
2535 | int btrfs_zoned_activate_one_bg(struct btrfs_fs_info *fs_info, |
2536 | struct btrfs_space_info *space_info, |
2537 | bool do_finish) |
2538 | { |
2539 | struct btrfs_block_group *bg; |
2540 | int index; |
2541 | |
2542 | if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA)) |
2543 | return 0; |
2544 | |
2545 | for (;;) { |
2546 | int ret; |
2547 | bool need_finish = false; |
2548 | |
2549 | down_read(sem: &space_info->groups_sem); |
2550 | for (index = 0; index < BTRFS_NR_RAID_TYPES; index++) { |
2551 | list_for_each_entry(bg, &space_info->block_groups[index], |
2552 | list) { |
2553 | if (!spin_trylock(lock: &bg->lock)) |
2554 | continue; |
2555 | if (btrfs_zoned_bg_is_full(bg) || |
2556 | test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, |
2557 | &bg->runtime_flags)) { |
2558 | spin_unlock(lock: &bg->lock); |
2559 | continue; |
2560 | } |
2561 | spin_unlock(lock: &bg->lock); |
2562 | |
2563 | if (btrfs_zone_activate(block_group: bg)) { |
2564 | up_read(sem: &space_info->groups_sem); |
2565 | return 1; |
2566 | } |
2567 | |
2568 | need_finish = true; |
2569 | } |
2570 | } |
2571 | up_read(sem: &space_info->groups_sem); |
2572 | |
2573 | if (!do_finish || !need_finish) |
2574 | break; |
2575 | |
2576 | ret = btrfs_zone_finish_one_bg(fs_info); |
2577 | if (ret == 0) |
2578 | break; |
2579 | if (ret < 0) |
2580 | return ret; |
2581 | } |
2582 | |
2583 | return 0; |
2584 | } |
2585 | |
2586 | /* |
2587 | * Reserve zones for one metadata block group, one tree-log block group, and one |
2588 | * system block group. |
2589 | */ |
2590 | void btrfs_check_active_zone_reservation(struct btrfs_fs_info *fs_info) |
2591 | { |
2592 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
2593 | struct btrfs_block_group *block_group; |
2594 | struct btrfs_device *device; |
2595 | /* Reserve zones for normal SINGLE metadata and tree-log block group. */ |
2596 | unsigned int metadata_reserve = 2; |
2597 | /* Reserve a zone for SINGLE system block group. */ |
2598 | unsigned int system_reserve = 1; |
2599 | |
2600 | if (!test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &fs_info->flags)) |
2601 | return; |
2602 | |
2603 | /* |
2604 | * This function is called from the mount context. So, there is no |
2605 | * parallel process touching the bits. No need for read_seqretry(). |
2606 | */ |
2607 | if (fs_info->avail_metadata_alloc_bits & BTRFS_BLOCK_GROUP_DUP) |
2608 | metadata_reserve = 4; |
2609 | if (fs_info->avail_system_alloc_bits & BTRFS_BLOCK_GROUP_DUP) |
2610 | system_reserve = 2; |
2611 | |
2612 | /* Apply the reservation on all the devices. */ |
2613 | mutex_lock(&fs_devices->device_list_mutex); |
2614 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
2615 | if (!device->bdev) |
2616 | continue; |
2617 | |
2618 | device->zone_info->reserved_active_zones = |
2619 | metadata_reserve + system_reserve; |
2620 | } |
2621 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
2622 | |
2623 | /* Release reservation for currently active block groups. */ |
2624 | spin_lock(lock: &fs_info->zone_active_bgs_lock); |
2625 | list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) { |
2626 | struct btrfs_chunk_map *map = block_group->physical_map; |
2627 | |
2628 | if (!(block_group->flags & |
2629 | (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM))) |
2630 | continue; |
2631 | |
2632 | for (int i = 0; i < map->num_stripes; i++) |
2633 | map->stripes[i].dev->zone_info->reserved_active_zones--; |
2634 | } |
2635 | spin_unlock(lock: &fs_info->zone_active_bgs_lock); |
2636 | } |
2637 | |