1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
5
6#include <linux/fs.h>
7#include <linux/blkdev.h>
8#include <linux/radix-tree.h>
9#include <linux/writeback.h>
10#include <linux/workqueue.h>
11#include <linux/kthread.h>
12#include <linux/slab.h>
13#include <linux/migrate.h>
14#include <linux/ratelimit.h>
15#include <linux/uuid.h>
16#include <linux/semaphore.h>
17#include <linux/error-injection.h>
18#include <linux/crc32c.h>
19#include <linux/sched/mm.h>
20#include <asm/unaligned.h>
21#include <crypto/hash.h>
22#include "ctree.h"
23#include "disk-io.h"
24#include "transaction.h"
25#include "btrfs_inode.h"
26#include "bio.h"
27#include "print-tree.h"
28#include "locking.h"
29#include "tree-log.h"
30#include "free-space-cache.h"
31#include "free-space-tree.h"
32#include "rcu-string.h"
33#include "dev-replace.h"
34#include "raid56.h"
35#include "sysfs.h"
36#include "qgroup.h"
37#include "compression.h"
38#include "tree-checker.h"
39#include "ref-verify.h"
40#include "block-group.h"
41#include "discard.h"
42#include "space-info.h"
43#include "zoned.h"
44#include "subpage.h"
45#include "fs.h"
46#include "accessors.h"
47#include "extent-tree.h"
48#include "root-tree.h"
49#include "defrag.h"
50#include "uuid-tree.h"
51#include "relocation.h"
52#include "scrub.h"
53#include "super.h"
54
55#define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\
56 BTRFS_HEADER_FLAG_RELOC |\
57 BTRFS_SUPER_FLAG_ERROR |\
58 BTRFS_SUPER_FLAG_SEEDING |\
59 BTRFS_SUPER_FLAG_METADUMP |\
60 BTRFS_SUPER_FLAG_METADUMP_V2)
61
62static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info);
63static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info);
64
65static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info)
66{
67 if (fs_info->csum_shash)
68 crypto_free_shash(tfm: fs_info->csum_shash);
69}
70
71/*
72 * Compute the csum of a btree block and store the result to provided buffer.
73 */
74static void csum_tree_block(struct extent_buffer *buf, u8 *result)
75{
76 struct btrfs_fs_info *fs_info = buf->fs_info;
77 const int num_pages = num_extent_pages(eb: buf);
78 const int first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize);
79 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
80 char *kaddr;
81 int i;
82
83 shash->tfm = fs_info->csum_shash;
84 crypto_shash_init(desc: shash);
85 kaddr = page_address(buf->pages[0]) + offset_in_page(buf->start);
86 crypto_shash_update(desc: shash, data: kaddr + BTRFS_CSUM_SIZE,
87 len: first_page_part - BTRFS_CSUM_SIZE);
88
89 for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
90 kaddr = page_address(buf->pages[i]);
91 crypto_shash_update(desc: shash, data: kaddr, PAGE_SIZE);
92 }
93 memset(result, 0, BTRFS_CSUM_SIZE);
94 crypto_shash_final(desc: shash, out: result);
95}
96
97/*
98 * we can't consider a given block up to date unless the transid of the
99 * block matches the transid in the parent node's pointer. This is how we
100 * detect blocks that either didn't get written at all or got written
101 * in the wrong place.
102 */
103int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic)
104{
105 if (!extent_buffer_uptodate(eb))
106 return 0;
107
108 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
109 return 1;
110
111 if (atomic)
112 return -EAGAIN;
113
114 if (!extent_buffer_uptodate(eb) ||
115 btrfs_header_generation(eb) != parent_transid) {
116 btrfs_err_rl(eb->fs_info,
117"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
118 eb->start, eb->read_mirror,
119 parent_transid, btrfs_header_generation(eb));
120 clear_extent_buffer_uptodate(eb);
121 return 0;
122 }
123 return 1;
124}
125
126static bool btrfs_supported_super_csum(u16 csum_type)
127{
128 switch (csum_type) {
129 case BTRFS_CSUM_TYPE_CRC32:
130 case BTRFS_CSUM_TYPE_XXHASH:
131 case BTRFS_CSUM_TYPE_SHA256:
132 case BTRFS_CSUM_TYPE_BLAKE2:
133 return true;
134 default:
135 return false;
136 }
137}
138
139/*
140 * Return 0 if the superblock checksum type matches the checksum value of that
141 * algorithm. Pass the raw disk superblock data.
142 */
143int btrfs_check_super_csum(struct btrfs_fs_info *fs_info,
144 const struct btrfs_super_block *disk_sb)
145{
146 char result[BTRFS_CSUM_SIZE];
147 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
148
149 shash->tfm = fs_info->csum_shash;
150
151 /*
152 * The super_block structure does not span the whole
153 * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is
154 * filled with zeros and is included in the checksum.
155 */
156 crypto_shash_digest(desc: shash, data: (const u8 *)disk_sb + BTRFS_CSUM_SIZE,
157 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, out: result);
158
159 if (memcmp(p: disk_sb->csum, q: result, size: fs_info->csum_size))
160 return 1;
161
162 return 0;
163}
164
165static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb,
166 int mirror_num)
167{
168 struct btrfs_fs_info *fs_info = eb->fs_info;
169 int i, num_pages = num_extent_pages(eb);
170 int ret = 0;
171
172 if (sb_rdonly(sb: fs_info->sb))
173 return -EROFS;
174
175 for (i = 0; i < num_pages; i++) {
176 struct page *p = eb->pages[i];
177 u64 start = max_t(u64, eb->start, page_offset(p));
178 u64 end = min_t(u64, eb->start + eb->len, page_offset(p) + PAGE_SIZE);
179 u32 len = end - start;
180
181 ret = btrfs_repair_io_failure(fs_info, ino: 0, start, length: len,
182 logical: start, page: p, offset_in_page(start), mirror_num);
183 if (ret)
184 break;
185 }
186
187 return ret;
188}
189
190/*
191 * helper to read a given tree block, doing retries as required when
192 * the checksums don't match and we have alternate mirrors to try.
193 *
194 * @check: expected tree parentness check, see the comments of the
195 * structure for details.
196 */
197int btrfs_read_extent_buffer(struct extent_buffer *eb,
198 struct btrfs_tree_parent_check *check)
199{
200 struct btrfs_fs_info *fs_info = eb->fs_info;
201 int failed = 0;
202 int ret;
203 int num_copies = 0;
204 int mirror_num = 0;
205 int failed_mirror = 0;
206
207 ASSERT(check);
208
209 while (1) {
210 clear_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &eb->bflags);
211 ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, parent_check: check);
212 if (!ret)
213 break;
214
215 num_copies = btrfs_num_copies(fs_info,
216 logical: eb->start, len: eb->len);
217 if (num_copies == 1)
218 break;
219
220 if (!failed_mirror) {
221 failed = 1;
222 failed_mirror = eb->read_mirror;
223 }
224
225 mirror_num++;
226 if (mirror_num == failed_mirror)
227 mirror_num++;
228
229 if (mirror_num > num_copies)
230 break;
231 }
232
233 if (failed && !ret && failed_mirror)
234 btrfs_repair_eb_io_failure(eb, mirror_num: failed_mirror);
235
236 return ret;
237}
238
239/*
240 * Checksum a dirty tree block before IO.
241 */
242blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio)
243{
244 struct extent_buffer *eb = bbio->private;
245 struct btrfs_fs_info *fs_info = eb->fs_info;
246 u64 found_start = btrfs_header_bytenr(eb);
247 u64 last_trans;
248 u8 result[BTRFS_CSUM_SIZE];
249 int ret;
250
251 /* Btree blocks are always contiguous on disk. */
252 if (WARN_ON_ONCE(bbio->file_offset != eb->start))
253 return BLK_STS_IOERR;
254 if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len))
255 return BLK_STS_IOERR;
256
257 if (test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)) {
258 WARN_ON_ONCE(found_start != 0);
259 return BLK_STS_OK;
260 }
261
262 if (WARN_ON_ONCE(found_start != eb->start))
263 return BLK_STS_IOERR;
264 if (WARN_ON(!btrfs_page_test_uptodate(fs_info, eb->pages[0], eb->start,
265 eb->len)))
266 return BLK_STS_IOERR;
267
268 ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid,
269 offsetof(struct btrfs_header, fsid),
270 BTRFS_FSID_SIZE) == 0);
271 csum_tree_block(buf: eb, result);
272
273 if (btrfs_header_level(eb))
274 ret = btrfs_check_node(node: eb);
275 else
276 ret = btrfs_check_leaf(leaf: eb);
277
278 if (ret < 0)
279 goto error;
280
281 /*
282 * Also check the generation, the eb reached here must be newer than
283 * last committed. Or something seriously wrong happened.
284 */
285 last_trans = btrfs_get_last_trans_committed(fs_info);
286 if (unlikely(btrfs_header_generation(eb) <= last_trans)) {
287 ret = -EUCLEAN;
288 btrfs_err(fs_info,
289 "block=%llu bad generation, have %llu expect > %llu",
290 eb->start, btrfs_header_generation(eb), last_trans);
291 goto error;
292 }
293 write_extent_buffer(eb, src: result, start: 0, len: fs_info->csum_size);
294 return BLK_STS_OK;
295
296error:
297 btrfs_print_tree(c: eb, follow: 0);
298 btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
299 eb->start);
300 /*
301 * Be noisy if this is an extent buffer from a log tree. We don't abort
302 * a transaction in case there's a bad log tree extent buffer, we just
303 * fallback to a transaction commit. Still we want to know when there is
304 * a bad log tree extent buffer, as that may signal a bug somewhere.
305 */
306 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
307 btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
308 return errno_to_blk_status(errno: ret);
309}
310
311static bool check_tree_block_fsid(struct extent_buffer *eb)
312{
313 struct btrfs_fs_info *fs_info = eb->fs_info;
314 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
315 u8 fsid[BTRFS_FSID_SIZE];
316
317 read_extent_buffer(eb, dst: fsid, offsetof(struct btrfs_header, fsid),
318 BTRFS_FSID_SIZE);
319
320 /*
321 * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid.
322 * This is then overwritten by metadata_uuid if it is present in the
323 * device_list_add(). The same true for a seed device as well. So use of
324 * fs_devices::metadata_uuid is appropriate here.
325 */
326 if (memcmp(p: fsid, q: fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0)
327 return false;
328
329 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
330 if (!memcmp(p: fsid, q: seed_devs->fsid, BTRFS_FSID_SIZE))
331 return false;
332
333 return true;
334}
335
336/* Do basic extent buffer checks at read time */
337int btrfs_validate_extent_buffer(struct extent_buffer *eb,
338 struct btrfs_tree_parent_check *check)
339{
340 struct btrfs_fs_info *fs_info = eb->fs_info;
341 u64 found_start;
342 const u32 csum_size = fs_info->csum_size;
343 u8 found_level;
344 u8 result[BTRFS_CSUM_SIZE];
345 const u8 *header_csum;
346 int ret = 0;
347
348 ASSERT(check);
349
350 found_start = btrfs_header_bytenr(eb);
351 if (found_start != eb->start) {
352 btrfs_err_rl(fs_info,
353 "bad tree block start, mirror %u want %llu have %llu",
354 eb->read_mirror, eb->start, found_start);
355 ret = -EIO;
356 goto out;
357 }
358 if (check_tree_block_fsid(eb)) {
359 btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u",
360 eb->start, eb->read_mirror);
361 ret = -EIO;
362 goto out;
363 }
364 found_level = btrfs_header_level(eb);
365 if (found_level >= BTRFS_MAX_LEVEL) {
366 btrfs_err(fs_info,
367 "bad tree block level, mirror %u level %d on logical %llu",
368 eb->read_mirror, btrfs_header_level(eb), eb->start);
369 ret = -EIO;
370 goto out;
371 }
372
373 csum_tree_block(buf: eb, result);
374 header_csum = page_address(eb->pages[0]) +
375 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, csum));
376
377 if (memcmp(p: result, q: header_csum, size: csum_size) != 0) {
378 btrfs_warn_rl(fs_info,
379"checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d",
380 eb->start, eb->read_mirror,
381 CSUM_FMT_VALUE(csum_size, header_csum),
382 CSUM_FMT_VALUE(csum_size, result),
383 btrfs_header_level(eb));
384 ret = -EUCLEAN;
385 goto out;
386 }
387
388 if (found_level != check->level) {
389 btrfs_err(fs_info,
390 "level verify failed on logical %llu mirror %u wanted %u found %u",
391 eb->start, eb->read_mirror, check->level, found_level);
392 ret = -EIO;
393 goto out;
394 }
395 if (unlikely(check->transid &&
396 btrfs_header_generation(eb) != check->transid)) {
397 btrfs_err_rl(eb->fs_info,
398"parent transid verify failed on logical %llu mirror %u wanted %llu found %llu",
399 eb->start, eb->read_mirror, check->transid,
400 btrfs_header_generation(eb));
401 ret = -EIO;
402 goto out;
403 }
404 if (check->has_first_key) {
405 struct btrfs_key *expect_key = &check->first_key;
406 struct btrfs_key found_key;
407
408 if (found_level)
409 btrfs_node_key_to_cpu(eb, cpu_key: &found_key, nr: 0);
410 else
411 btrfs_item_key_to_cpu(eb, cpu_key: &found_key, nr: 0);
412 if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) {
413 btrfs_err(fs_info,
414"tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)",
415 eb->start, check->transid,
416 expect_key->objectid,
417 expect_key->type, expect_key->offset,
418 found_key.objectid, found_key.type,
419 found_key.offset);
420 ret = -EUCLEAN;
421 goto out;
422 }
423 }
424 if (check->owner_root) {
425 ret = btrfs_check_eb_owner(eb, root_owner: check->owner_root);
426 if (ret < 0)
427 goto out;
428 }
429
430 /*
431 * If this is a leaf block and it is corrupt, set the corrupt bit so
432 * that we don't try and read the other copies of this block, just
433 * return -EIO.
434 */
435 if (found_level == 0 && btrfs_check_leaf(leaf: eb)) {
436 set_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &eb->bflags);
437 ret = -EIO;
438 }
439
440 if (found_level > 0 && btrfs_check_node(node: eb))
441 ret = -EIO;
442
443 if (ret)
444 btrfs_err(fs_info,
445 "read time tree block corruption detected on logical %llu mirror %u",
446 eb->start, eb->read_mirror);
447out:
448 return ret;
449}
450
451#ifdef CONFIG_MIGRATION
452static int btree_migrate_folio(struct address_space *mapping,
453 struct folio *dst, struct folio *src, enum migrate_mode mode)
454{
455 /*
456 * we can't safely write a btree page from here,
457 * we haven't done the locking hook
458 */
459 if (folio_test_dirty(folio: src))
460 return -EAGAIN;
461 /*
462 * Buffers may be managed in a filesystem specific way.
463 * We must have no buffers or drop them.
464 */
465 if (folio_get_private(folio: src) &&
466 !filemap_release_folio(folio: src, GFP_KERNEL))
467 return -EAGAIN;
468 return migrate_folio(mapping, dst, src, mode);
469}
470#else
471#define btree_migrate_folio NULL
472#endif
473
474static int btree_writepages(struct address_space *mapping,
475 struct writeback_control *wbc)
476{
477 struct btrfs_fs_info *fs_info;
478 int ret;
479
480 if (wbc->sync_mode == WB_SYNC_NONE) {
481
482 if (wbc->for_kupdate)
483 return 0;
484
485 fs_info = BTRFS_I(inode: mapping->host)->root->fs_info;
486 /* this is a bit racy, but that's ok */
487 ret = __percpu_counter_compare(fbc: &fs_info->dirty_metadata_bytes,
488 BTRFS_DIRTY_METADATA_THRESH,
489 batch: fs_info->dirty_metadata_batch);
490 if (ret < 0)
491 return 0;
492 }
493 return btree_write_cache_pages(mapping, wbc);
494}
495
496static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
497{
498 if (folio_test_writeback(folio) || folio_test_dirty(folio))
499 return false;
500
501 return try_release_extent_buffer(page: &folio->page);
502}
503
504static void btree_invalidate_folio(struct folio *folio, size_t offset,
505 size_t length)
506{
507 struct extent_io_tree *tree;
508 tree = &BTRFS_I(inode: folio->mapping->host)->io_tree;
509 extent_invalidate_folio(tree, folio, offset);
510 btree_release_folio(folio, GFP_NOFS);
511 if (folio_get_private(folio)) {
512 btrfs_warn(BTRFS_I(folio->mapping->host)->root->fs_info,
513 "folio private not zero on folio %llu",
514 (unsigned long long)folio_pos(folio));
515 folio_detach_private(folio);
516 }
517}
518
519#ifdef DEBUG
520static bool btree_dirty_folio(struct address_space *mapping,
521 struct folio *folio)
522{
523 struct btrfs_fs_info *fs_info = btrfs_sb(mapping->host->i_sb);
524 struct btrfs_subpage_info *spi = fs_info->subpage_info;
525 struct btrfs_subpage *subpage;
526 struct extent_buffer *eb;
527 int cur_bit = 0;
528 u64 page_start = folio_pos(folio);
529
530 if (fs_info->sectorsize == PAGE_SIZE) {
531 eb = folio_get_private(folio);
532 BUG_ON(!eb);
533 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
534 BUG_ON(!atomic_read(&eb->refs));
535 btrfs_assert_tree_write_locked(eb);
536 return filemap_dirty_folio(mapping, folio);
537 }
538
539 ASSERT(spi);
540 subpage = folio_get_private(folio);
541
542 for (cur_bit = spi->dirty_offset;
543 cur_bit < spi->dirty_offset + spi->bitmap_nr_bits;
544 cur_bit++) {
545 unsigned long flags;
546 u64 cur;
547
548 spin_lock_irqsave(&subpage->lock, flags);
549 if (!test_bit(cur_bit, subpage->bitmaps)) {
550 spin_unlock_irqrestore(&subpage->lock, flags);
551 continue;
552 }
553 spin_unlock_irqrestore(&subpage->lock, flags);
554 cur = page_start + cur_bit * fs_info->sectorsize;
555
556 eb = find_extent_buffer(fs_info, cur);
557 ASSERT(eb);
558 ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
559 ASSERT(atomic_read(&eb->refs));
560 btrfs_assert_tree_write_locked(eb);
561 free_extent_buffer(eb);
562
563 cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1;
564 }
565 return filemap_dirty_folio(mapping, folio);
566}
567#else
568#define btree_dirty_folio filemap_dirty_folio
569#endif
570
571static const struct address_space_operations btree_aops = {
572 .writepages = btree_writepages,
573 .release_folio = btree_release_folio,
574 .invalidate_folio = btree_invalidate_folio,
575 .migrate_folio = btree_migrate_folio,
576 .dirty_folio = btree_dirty_folio,
577};
578
579struct extent_buffer *btrfs_find_create_tree_block(
580 struct btrfs_fs_info *fs_info,
581 u64 bytenr, u64 owner_root,
582 int level)
583{
584 if (btrfs_is_testing(fs_info))
585 return alloc_test_extent_buffer(fs_info, start: bytenr);
586 return alloc_extent_buffer(fs_info, start: bytenr, owner_root, level);
587}
588
589/*
590 * Read tree block at logical address @bytenr and do variant basic but critical
591 * verification.
592 *
593 * @check: expected tree parentness check, see comments of the
594 * structure for details.
595 */
596struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr,
597 struct btrfs_tree_parent_check *check)
598{
599 struct extent_buffer *buf = NULL;
600 int ret;
601
602 ASSERT(check);
603
604 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root: check->owner_root,
605 level: check->level);
606 if (IS_ERR(ptr: buf))
607 return buf;
608
609 ret = btrfs_read_extent_buffer(eb: buf, check);
610 if (ret) {
611 free_extent_buffer_stale(eb: buf);
612 return ERR_PTR(error: ret);
613 }
614 if (btrfs_check_eb_owner(eb: buf, root_owner: check->owner_root)) {
615 free_extent_buffer_stale(eb: buf);
616 return ERR_PTR(error: -EUCLEAN);
617 }
618 return buf;
619
620}
621
622static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
623 u64 objectid)
624{
625 bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
626
627 memset(&root->root_key, 0, sizeof(root->root_key));
628 memset(&root->root_item, 0, sizeof(root->root_item));
629 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
630 root->fs_info = fs_info;
631 root->root_key.objectid = objectid;
632 root->node = NULL;
633 root->commit_root = NULL;
634 root->state = 0;
635 RB_CLEAR_NODE(&root->rb_node);
636
637 root->last_trans = 0;
638 root->free_objectid = 0;
639 root->nr_delalloc_inodes = 0;
640 root->nr_ordered_extents = 0;
641 root->inode_tree = RB_ROOT;
642 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
643
644 btrfs_init_root_block_rsv(root);
645
646 INIT_LIST_HEAD(list: &root->dirty_list);
647 INIT_LIST_HEAD(list: &root->root_list);
648 INIT_LIST_HEAD(list: &root->delalloc_inodes);
649 INIT_LIST_HEAD(list: &root->delalloc_root);
650 INIT_LIST_HEAD(list: &root->ordered_extents);
651 INIT_LIST_HEAD(list: &root->ordered_root);
652 INIT_LIST_HEAD(list: &root->reloc_dirty_list);
653 INIT_LIST_HEAD(list: &root->logged_list[0]);
654 INIT_LIST_HEAD(list: &root->logged_list[1]);
655 spin_lock_init(&root->inode_lock);
656 spin_lock_init(&root->delalloc_lock);
657 spin_lock_init(&root->ordered_extent_lock);
658 spin_lock_init(&root->accounting_lock);
659 spin_lock_init(&root->log_extents_lock[0]);
660 spin_lock_init(&root->log_extents_lock[1]);
661 spin_lock_init(&root->qgroup_meta_rsv_lock);
662 mutex_init(&root->objectid_mutex);
663 mutex_init(&root->log_mutex);
664 mutex_init(&root->ordered_extent_mutex);
665 mutex_init(&root->delalloc_mutex);
666 init_waitqueue_head(&root->qgroup_flush_wait);
667 init_waitqueue_head(&root->log_writer_wait);
668 init_waitqueue_head(&root->log_commit_wait[0]);
669 init_waitqueue_head(&root->log_commit_wait[1]);
670 INIT_LIST_HEAD(list: &root->log_ctxs[0]);
671 INIT_LIST_HEAD(list: &root->log_ctxs[1]);
672 atomic_set(v: &root->log_commit[0], i: 0);
673 atomic_set(v: &root->log_commit[1], i: 0);
674 atomic_set(v: &root->log_writers, i: 0);
675 atomic_set(v: &root->log_batch, i: 0);
676 refcount_set(r: &root->refs, n: 1);
677 atomic_set(v: &root->snapshot_force_cow, i: 0);
678 atomic_set(v: &root->nr_swapfiles, i: 0);
679 btrfs_set_root_log_transid(root, log_transid: 0);
680 root->log_transid_committed = -1;
681 btrfs_set_root_last_log_commit(root, commit_id: 0);
682 root->anon_dev = 0;
683 if (!dummy) {
684 extent_io_tree_init(fs_info, tree: &root->dirty_log_pages,
685 owner: IO_TREE_ROOT_DIRTY_LOG_PAGES);
686 extent_io_tree_init(fs_info, tree: &root->log_csum_range,
687 owner: IO_TREE_LOG_CSUM_RANGE);
688 }
689
690 spin_lock_init(&root->root_item_lock);
691 btrfs_qgroup_init_swapped_blocks(swapped_blocks: &root->swapped_blocks);
692#ifdef CONFIG_BTRFS_DEBUG
693 INIT_LIST_HEAD(list: &root->leak_list);
694 spin_lock(lock: &fs_info->fs_roots_radix_lock);
695 list_add_tail(new: &root->leak_list, head: &fs_info->allocated_roots);
696 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
697#endif
698}
699
700static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
701 u64 objectid, gfp_t flags)
702{
703 struct btrfs_root *root = kzalloc(size: sizeof(*root), flags);
704 if (root)
705 __setup_root(root, fs_info, objectid);
706 return root;
707}
708
709#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
710/* Should only be used by the testing infrastructure */
711struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info)
712{
713 struct btrfs_root *root;
714
715 if (!fs_info)
716 return ERR_PTR(error: -EINVAL);
717
718 root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL);
719 if (!root)
720 return ERR_PTR(error: -ENOMEM);
721
722 /* We don't use the stripesize in selftest, set it as sectorsize */
723 root->alloc_bytenr = 0;
724
725 return root;
726}
727#endif
728
729static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node)
730{
731 const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node);
732 const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node);
733
734 return btrfs_comp_cpu_keys(k1: &a->root_key, k2: &b->root_key);
735}
736
737static int global_root_key_cmp(const void *k, const struct rb_node *node)
738{
739 const struct btrfs_key *key = k;
740 const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node);
741
742 return btrfs_comp_cpu_keys(k1: key, k2: &root->root_key);
743}
744
745int btrfs_global_root_insert(struct btrfs_root *root)
746{
747 struct btrfs_fs_info *fs_info = root->fs_info;
748 struct rb_node *tmp;
749 int ret = 0;
750
751 write_lock(&fs_info->global_root_lock);
752 tmp = rb_find_add(node: &root->rb_node, tree: &fs_info->global_root_tree, cmp: global_root_cmp);
753 write_unlock(&fs_info->global_root_lock);
754
755 if (tmp) {
756 ret = -EEXIST;
757 btrfs_warn(fs_info, "global root %llu %llu already exists",
758 root->root_key.objectid, root->root_key.offset);
759 }
760 return ret;
761}
762
763void btrfs_global_root_delete(struct btrfs_root *root)
764{
765 struct btrfs_fs_info *fs_info = root->fs_info;
766
767 write_lock(&fs_info->global_root_lock);
768 rb_erase(&root->rb_node, &fs_info->global_root_tree);
769 write_unlock(&fs_info->global_root_lock);
770}
771
772struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info,
773 struct btrfs_key *key)
774{
775 struct rb_node *node;
776 struct btrfs_root *root = NULL;
777
778 read_lock(&fs_info->global_root_lock);
779 node = rb_find(key, tree: &fs_info->global_root_tree, cmp: global_root_key_cmp);
780 if (node)
781 root = container_of(node, struct btrfs_root, rb_node);
782 read_unlock(&fs_info->global_root_lock);
783
784 return root;
785}
786
787static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr)
788{
789 struct btrfs_block_group *block_group;
790 u64 ret;
791
792 if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
793 return 0;
794
795 if (bytenr)
796 block_group = btrfs_lookup_block_group(info: fs_info, bytenr);
797 else
798 block_group = btrfs_lookup_first_block_group(info: fs_info, bytenr);
799 ASSERT(block_group);
800 if (!block_group)
801 return 0;
802 ret = block_group->global_root_id;
803 btrfs_put_block_group(cache: block_group);
804
805 return ret;
806}
807
808struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr)
809{
810 struct btrfs_key key = {
811 .objectid = BTRFS_CSUM_TREE_OBJECTID,
812 .type = BTRFS_ROOT_ITEM_KEY,
813 .offset = btrfs_global_root_id(fs_info, bytenr),
814 };
815
816 return btrfs_global_root(fs_info, key: &key);
817}
818
819struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr)
820{
821 struct btrfs_key key = {
822 .objectid = BTRFS_EXTENT_TREE_OBJECTID,
823 .type = BTRFS_ROOT_ITEM_KEY,
824 .offset = btrfs_global_root_id(fs_info, bytenr),
825 };
826
827 return btrfs_global_root(fs_info, key: &key);
828}
829
830struct btrfs_root *btrfs_block_group_root(struct btrfs_fs_info *fs_info)
831{
832 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE))
833 return fs_info->block_group_root;
834 return btrfs_extent_root(fs_info, bytenr: 0);
835}
836
837struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
838 u64 objectid)
839{
840 struct btrfs_fs_info *fs_info = trans->fs_info;
841 struct extent_buffer *leaf;
842 struct btrfs_root *tree_root = fs_info->tree_root;
843 struct btrfs_root *root;
844 struct btrfs_key key;
845 unsigned int nofs_flag;
846 int ret = 0;
847
848 /*
849 * We're holding a transaction handle, so use a NOFS memory allocation
850 * context to avoid deadlock if reclaim happens.
851 */
852 nofs_flag = memalloc_nofs_save();
853 root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL);
854 memalloc_nofs_restore(flags: nofs_flag);
855 if (!root)
856 return ERR_PTR(error: -ENOMEM);
857
858 root->root_key.objectid = objectid;
859 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
860 root->root_key.offset = 0;
861
862 leaf = btrfs_alloc_tree_block(trans, root, parent: 0, root_objectid: objectid, NULL, level: 0, hint: 0, empty_size: 0,
863 reloc_src_root: 0, nest: BTRFS_NESTING_NORMAL);
864 if (IS_ERR(ptr: leaf)) {
865 ret = PTR_ERR(ptr: leaf);
866 leaf = NULL;
867 goto fail;
868 }
869
870 root->node = leaf;
871 btrfs_mark_buffer_dirty(trans, buf: leaf);
872
873 root->commit_root = btrfs_root_node(root);
874 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
875
876 btrfs_set_root_flags(s: &root->root_item, val: 0);
877 btrfs_set_root_limit(s: &root->root_item, val: 0);
878 btrfs_set_root_bytenr(s: &root->root_item, val: leaf->start);
879 btrfs_set_root_generation(s: &root->root_item, val: trans->transid);
880 btrfs_set_root_level(s: &root->root_item, val: 0);
881 btrfs_set_root_refs(s: &root->root_item, val: 1);
882 btrfs_set_root_used(s: &root->root_item, val: leaf->len);
883 btrfs_set_root_last_snapshot(s: &root->root_item, val: 0);
884 btrfs_set_root_dirid(s: &root->root_item, val: 0);
885 if (is_fstree(rootid: objectid))
886 generate_random_guid(guid: root->root_item.uuid);
887 else
888 export_guid(dst: root->root_item.uuid, src: &guid_null);
889 btrfs_set_root_drop_level(s: &root->root_item, val: 0);
890
891 btrfs_tree_unlock(eb: leaf);
892
893 key.objectid = objectid;
894 key.type = BTRFS_ROOT_ITEM_KEY;
895 key.offset = 0;
896 ret = btrfs_insert_root(trans, root: tree_root, key: &key, item: &root->root_item);
897 if (ret)
898 goto fail;
899
900 return root;
901
902fail:
903 btrfs_put_root(root);
904
905 return ERR_PTR(error: ret);
906}
907
908static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
909 struct btrfs_fs_info *fs_info)
910{
911 struct btrfs_root *root;
912
913 root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS);
914 if (!root)
915 return ERR_PTR(error: -ENOMEM);
916
917 root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID;
918 root->root_key.type = BTRFS_ROOT_ITEM_KEY;
919 root->root_key.offset = BTRFS_TREE_LOG_OBJECTID;
920
921 return root;
922}
923
924int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
925 struct btrfs_root *root)
926{
927 struct extent_buffer *leaf;
928
929 /*
930 * DON'T set SHAREABLE bit for log trees.
931 *
932 * Log trees are not exposed to user space thus can't be snapshotted,
933 * and they go away before a real commit is actually done.
934 *
935 * They do store pointers to file data extents, and those reference
936 * counts still get updated (along with back refs to the log tree).
937 */
938
939 leaf = btrfs_alloc_tree_block(trans, root, parent: 0, BTRFS_TREE_LOG_OBJECTID,
940 NULL, level: 0, hint: 0, empty_size: 0, reloc_src_root: 0, nest: BTRFS_NESTING_NORMAL);
941 if (IS_ERR(ptr: leaf))
942 return PTR_ERR(ptr: leaf);
943
944 root->node = leaf;
945
946 btrfs_mark_buffer_dirty(trans, buf: root->node);
947 btrfs_tree_unlock(eb: root->node);
948
949 return 0;
950}
951
952int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
953 struct btrfs_fs_info *fs_info)
954{
955 struct btrfs_root *log_root;
956
957 log_root = alloc_log_tree(trans, fs_info);
958 if (IS_ERR(ptr: log_root))
959 return PTR_ERR(ptr: log_root);
960
961 if (!btrfs_is_zoned(fs_info)) {
962 int ret = btrfs_alloc_log_tree_node(trans, root: log_root);
963
964 if (ret) {
965 btrfs_put_root(root: log_root);
966 return ret;
967 }
968 }
969
970 WARN_ON(fs_info->log_root_tree);
971 fs_info->log_root_tree = log_root;
972 return 0;
973}
974
975int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
976 struct btrfs_root *root)
977{
978 struct btrfs_fs_info *fs_info = root->fs_info;
979 struct btrfs_root *log_root;
980 struct btrfs_inode_item *inode_item;
981 int ret;
982
983 log_root = alloc_log_tree(trans, fs_info);
984 if (IS_ERR(ptr: log_root))
985 return PTR_ERR(ptr: log_root);
986
987 ret = btrfs_alloc_log_tree_node(trans, root: log_root);
988 if (ret) {
989 btrfs_put_root(root: log_root);
990 return ret;
991 }
992
993 log_root->last_trans = trans->transid;
994 log_root->root_key.offset = root->root_key.objectid;
995
996 inode_item = &log_root->root_item.inode;
997 btrfs_set_stack_inode_generation(s: inode_item, val: 1);
998 btrfs_set_stack_inode_size(s: inode_item, val: 3);
999 btrfs_set_stack_inode_nlink(s: inode_item, val: 1);
1000 btrfs_set_stack_inode_nbytes(s: inode_item,
1001 val: fs_info->nodesize);
1002 btrfs_set_stack_inode_mode(s: inode_item, S_IFDIR | 0755);
1003
1004 btrfs_set_root_node(item: &log_root->root_item, node: log_root->node);
1005
1006 WARN_ON(root->log_root);
1007 root->log_root = log_root;
1008 btrfs_set_root_log_transid(root, log_transid: 0);
1009 root->log_transid_committed = -1;
1010 btrfs_set_root_last_log_commit(root, commit_id: 0);
1011 return 0;
1012}
1013
1014static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root,
1015 struct btrfs_path *path,
1016 struct btrfs_key *key)
1017{
1018 struct btrfs_root *root;
1019 struct btrfs_tree_parent_check check = { 0 };
1020 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1021 u64 generation;
1022 int ret;
1023 int level;
1024
1025 root = btrfs_alloc_root(fs_info, objectid: key->objectid, GFP_NOFS);
1026 if (!root)
1027 return ERR_PTR(error: -ENOMEM);
1028
1029 ret = btrfs_find_root(root: tree_root, search_key: key, path,
1030 root_item: &root->root_item, root_key: &root->root_key);
1031 if (ret) {
1032 if (ret > 0)
1033 ret = -ENOENT;
1034 goto fail;
1035 }
1036
1037 generation = btrfs_root_generation(s: &root->root_item);
1038 level = btrfs_root_level(s: &root->root_item);
1039 check.level = level;
1040 check.transid = generation;
1041 check.owner_root = key->objectid;
1042 root->node = read_tree_block(fs_info, bytenr: btrfs_root_bytenr(s: &root->root_item),
1043 check: &check);
1044 if (IS_ERR(ptr: root->node)) {
1045 ret = PTR_ERR(ptr: root->node);
1046 root->node = NULL;
1047 goto fail;
1048 }
1049 if (!btrfs_buffer_uptodate(eb: root->node, parent_transid: generation, atomic: 0)) {
1050 ret = -EIO;
1051 goto fail;
1052 }
1053
1054 /*
1055 * For real fs, and not log/reloc trees, root owner must
1056 * match its root node owner
1057 */
1058 if (!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state) &&
1059 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1060 root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1061 root->root_key.objectid != btrfs_header_owner(eb: root->node)) {
1062 btrfs_crit(fs_info,
1063"root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu",
1064 root->root_key.objectid, root->node->start,
1065 btrfs_header_owner(root->node),
1066 root->root_key.objectid);
1067 ret = -EUCLEAN;
1068 goto fail;
1069 }
1070 root->commit_root = btrfs_root_node(root);
1071 return root;
1072fail:
1073 btrfs_put_root(root);
1074 return ERR_PTR(error: ret);
1075}
1076
1077struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
1078 struct btrfs_key *key)
1079{
1080 struct btrfs_root *root;
1081 struct btrfs_path *path;
1082
1083 path = btrfs_alloc_path();
1084 if (!path)
1085 return ERR_PTR(error: -ENOMEM);
1086 root = read_tree_root_path(tree_root, path, key);
1087 btrfs_free_path(p: path);
1088
1089 return root;
1090}
1091
1092/*
1093 * Initialize subvolume root in-memory structure
1094 *
1095 * @anon_dev: anonymous device to attach to the root, if zero, allocate new
1096 */
1097static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev)
1098{
1099 int ret;
1100
1101 btrfs_drew_lock_init(lock: &root->snapshot_lock);
1102
1103 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID &&
1104 !btrfs_is_data_reloc_root(root) &&
1105 is_fstree(rootid: root->root_key.objectid)) {
1106 set_bit(nr: BTRFS_ROOT_SHAREABLE, addr: &root->state);
1107 btrfs_check_and_init_root_item(item: &root->root_item);
1108 }
1109
1110 /*
1111 * Don't assign anonymous block device to roots that are not exposed to
1112 * userspace, the id pool is limited to 1M
1113 */
1114 if (is_fstree(rootid: root->root_key.objectid) &&
1115 btrfs_root_refs(s: &root->root_item) > 0) {
1116 if (!anon_dev) {
1117 ret = get_anon_bdev(&root->anon_dev);
1118 if (ret)
1119 goto fail;
1120 } else {
1121 root->anon_dev = anon_dev;
1122 }
1123 }
1124
1125 mutex_lock(&root->objectid_mutex);
1126 ret = btrfs_init_root_free_objectid(root);
1127 if (ret) {
1128 mutex_unlock(lock: &root->objectid_mutex);
1129 goto fail;
1130 }
1131
1132 ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
1133
1134 mutex_unlock(lock: &root->objectid_mutex);
1135
1136 return 0;
1137fail:
1138 /* The caller is responsible to call btrfs_free_fs_root */
1139 return ret;
1140}
1141
1142static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
1143 u64 root_id)
1144{
1145 struct btrfs_root *root;
1146
1147 spin_lock(lock: &fs_info->fs_roots_radix_lock);
1148 root = radix_tree_lookup(&fs_info->fs_roots_radix,
1149 (unsigned long)root_id);
1150 root = btrfs_grab_root(root);
1151 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
1152 return root;
1153}
1154
1155static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info,
1156 u64 objectid)
1157{
1158 struct btrfs_key key = {
1159 .objectid = objectid,
1160 .type = BTRFS_ROOT_ITEM_KEY,
1161 .offset = 0,
1162 };
1163
1164 switch (objectid) {
1165 case BTRFS_ROOT_TREE_OBJECTID:
1166 return btrfs_grab_root(root: fs_info->tree_root);
1167 case BTRFS_EXTENT_TREE_OBJECTID:
1168 return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key));
1169 case BTRFS_CHUNK_TREE_OBJECTID:
1170 return btrfs_grab_root(root: fs_info->chunk_root);
1171 case BTRFS_DEV_TREE_OBJECTID:
1172 return btrfs_grab_root(root: fs_info->dev_root);
1173 case BTRFS_CSUM_TREE_OBJECTID:
1174 return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key));
1175 case BTRFS_QUOTA_TREE_OBJECTID:
1176 return btrfs_grab_root(root: fs_info->quota_root);
1177 case BTRFS_UUID_TREE_OBJECTID:
1178 return btrfs_grab_root(root: fs_info->uuid_root);
1179 case BTRFS_BLOCK_GROUP_TREE_OBJECTID:
1180 return btrfs_grab_root(root: fs_info->block_group_root);
1181 case BTRFS_FREE_SPACE_TREE_OBJECTID:
1182 return btrfs_grab_root(root: btrfs_global_root(fs_info, key: &key));
1183 case BTRFS_RAID_STRIPE_TREE_OBJECTID:
1184 return btrfs_grab_root(root: fs_info->stripe_root);
1185 default:
1186 return NULL;
1187 }
1188}
1189
1190int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
1191 struct btrfs_root *root)
1192{
1193 int ret;
1194
1195 ret = radix_tree_preload(GFP_NOFS);
1196 if (ret)
1197 return ret;
1198
1199 spin_lock(lock: &fs_info->fs_roots_radix_lock);
1200 ret = radix_tree_insert(&fs_info->fs_roots_radix,
1201 index: (unsigned long)root->root_key.objectid,
1202 root);
1203 if (ret == 0) {
1204 btrfs_grab_root(root);
1205 set_bit(nr: BTRFS_ROOT_IN_RADIX, addr: &root->state);
1206 }
1207 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
1208 radix_tree_preload_end();
1209
1210 return ret;
1211}
1212
1213void btrfs_check_leaked_roots(struct btrfs_fs_info *fs_info)
1214{
1215#ifdef CONFIG_BTRFS_DEBUG
1216 struct btrfs_root *root;
1217
1218 while (!list_empty(head: &fs_info->allocated_roots)) {
1219 char buf[BTRFS_ROOT_NAME_BUF_LEN];
1220
1221 root = list_first_entry(&fs_info->allocated_roots,
1222 struct btrfs_root, leak_list);
1223 btrfs_err(fs_info, "leaked root %s refcount %d",
1224 btrfs_root_name(&root->root_key, buf),
1225 refcount_read(&root->refs));
1226 while (refcount_read(r: &root->refs) > 1)
1227 btrfs_put_root(root);
1228 btrfs_put_root(root);
1229 }
1230#endif
1231}
1232
1233static void free_global_roots(struct btrfs_fs_info *fs_info)
1234{
1235 struct btrfs_root *root;
1236 struct rb_node *node;
1237
1238 while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) {
1239 root = rb_entry(node, struct btrfs_root, rb_node);
1240 rb_erase(&root->rb_node, &fs_info->global_root_tree);
1241 btrfs_put_root(root);
1242 }
1243}
1244
1245void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
1246{
1247 percpu_counter_destroy(fbc: &fs_info->dirty_metadata_bytes);
1248 percpu_counter_destroy(fbc: &fs_info->delalloc_bytes);
1249 percpu_counter_destroy(fbc: &fs_info->ordered_bytes);
1250 percpu_counter_destroy(fbc: &fs_info->dev_replace.bio_counter);
1251 btrfs_free_csum_hash(fs_info);
1252 btrfs_free_stripe_hash_table(info: fs_info);
1253 btrfs_free_ref_cache(fs_info);
1254 kfree(objp: fs_info->balance_ctl);
1255 kfree(objp: fs_info->delayed_root);
1256 free_global_roots(fs_info);
1257 btrfs_put_root(root: fs_info->tree_root);
1258 btrfs_put_root(root: fs_info->chunk_root);
1259 btrfs_put_root(root: fs_info->dev_root);
1260 btrfs_put_root(root: fs_info->quota_root);
1261 btrfs_put_root(root: fs_info->uuid_root);
1262 btrfs_put_root(root: fs_info->fs_root);
1263 btrfs_put_root(root: fs_info->data_reloc_root);
1264 btrfs_put_root(root: fs_info->block_group_root);
1265 btrfs_put_root(root: fs_info->stripe_root);
1266 btrfs_check_leaked_roots(fs_info);
1267 btrfs_extent_buffer_leak_debug_check(fs_info);
1268 kfree(objp: fs_info->super_copy);
1269 kfree(objp: fs_info->super_for_commit);
1270 kfree(objp: fs_info->subpage_info);
1271 kvfree(addr: fs_info);
1272}
1273
1274
1275/*
1276 * Get an in-memory reference of a root structure.
1277 *
1278 * For essential trees like root/extent tree, we grab it from fs_info directly.
1279 * For subvolume trees, we check the cached filesystem roots first. If not
1280 * found, then read it from disk and add it to cached fs roots.
1281 *
1282 * Caller should release the root by calling btrfs_put_root() after the usage.
1283 *
1284 * NOTE: Reloc and log trees can't be read by this function as they share the
1285 * same root objectid.
1286 *
1287 * @objectid: root id
1288 * @anon_dev: preallocated anonymous block device number for new roots,
1289 * pass 0 for new allocation.
1290 * @check_ref: whether to check root item references, If true, return -ENOENT
1291 * for orphan roots
1292 */
1293static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info,
1294 u64 objectid, dev_t anon_dev,
1295 bool check_ref)
1296{
1297 struct btrfs_root *root;
1298 struct btrfs_path *path;
1299 struct btrfs_key key;
1300 int ret;
1301
1302 root = btrfs_get_global_root(fs_info, objectid);
1303 if (root)
1304 return root;
1305
1306 /*
1307 * If we're called for non-subvolume trees, and above function didn't
1308 * find one, do not try to read it from disk.
1309 *
1310 * This is namely for free-space-tree and quota tree, which can change
1311 * at runtime and should only be grabbed from fs_info.
1312 */
1313 if (!is_fstree(rootid: objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID)
1314 return ERR_PTR(error: -ENOENT);
1315again:
1316 root = btrfs_lookup_fs_root(fs_info, root_id: objectid);
1317 if (root) {
1318 /* Shouldn't get preallocated anon_dev for cached roots */
1319 ASSERT(!anon_dev);
1320 if (check_ref && btrfs_root_refs(s: &root->root_item) == 0) {
1321 btrfs_put_root(root);
1322 return ERR_PTR(error: -ENOENT);
1323 }
1324 return root;
1325 }
1326
1327 key.objectid = objectid;
1328 key.type = BTRFS_ROOT_ITEM_KEY;
1329 key.offset = (u64)-1;
1330 root = btrfs_read_tree_root(tree_root: fs_info->tree_root, key: &key);
1331 if (IS_ERR(ptr: root))
1332 return root;
1333
1334 if (check_ref && btrfs_root_refs(s: &root->root_item) == 0) {
1335 ret = -ENOENT;
1336 goto fail;
1337 }
1338
1339 ret = btrfs_init_fs_root(root, anon_dev);
1340 if (ret)
1341 goto fail;
1342
1343 path = btrfs_alloc_path();
1344 if (!path) {
1345 ret = -ENOMEM;
1346 goto fail;
1347 }
1348 key.objectid = BTRFS_ORPHAN_OBJECTID;
1349 key.type = BTRFS_ORPHAN_ITEM_KEY;
1350 key.offset = objectid;
1351
1352 ret = btrfs_search_slot(NULL, root: fs_info->tree_root, key: &key, p: path, ins_len: 0, cow: 0);
1353 btrfs_free_path(p: path);
1354 if (ret < 0)
1355 goto fail;
1356 if (ret == 0)
1357 set_bit(nr: BTRFS_ROOT_ORPHAN_ITEM_INSERTED, addr: &root->state);
1358
1359 ret = btrfs_insert_fs_root(fs_info, root);
1360 if (ret) {
1361 if (ret == -EEXIST) {
1362 btrfs_put_root(root);
1363 goto again;
1364 }
1365 goto fail;
1366 }
1367 return root;
1368fail:
1369 /*
1370 * If our caller provided us an anonymous device, then it's his
1371 * responsibility to free it in case we fail. So we have to set our
1372 * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root()
1373 * and once again by our caller.
1374 */
1375 if (anon_dev)
1376 root->anon_dev = 0;
1377 btrfs_put_root(root);
1378 return ERR_PTR(error: ret);
1379}
1380
1381/*
1382 * Get in-memory reference of a root structure
1383 *
1384 * @objectid: tree objectid
1385 * @check_ref: if set, verify that the tree exists and the item has at least
1386 * one reference
1387 */
1388struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
1389 u64 objectid, bool check_ref)
1390{
1391 return btrfs_get_root_ref(fs_info, objectid, anon_dev: 0, check_ref);
1392}
1393
1394/*
1395 * Get in-memory reference of a root structure, created as new, optionally pass
1396 * the anonymous block device id
1397 *
1398 * @objectid: tree objectid
1399 * @anon_dev: if zero, allocate a new anonymous block device or use the
1400 * parameter value
1401 */
1402struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info,
1403 u64 objectid, dev_t anon_dev)
1404{
1405 return btrfs_get_root_ref(fs_info, objectid, anon_dev, check_ref: true);
1406}
1407
1408/*
1409 * Return a root for the given objectid.
1410 *
1411 * @fs_info: the fs_info
1412 * @objectid: the objectid we need to lookup
1413 *
1414 * This is exclusively used for backref walking, and exists specifically because
1415 * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref
1416 * creation time, which means we may have to read the tree_root in order to look
1417 * up a fs root that is not in memory. If the root is not in memory we will
1418 * read the tree root commit root and look up the fs root from there. This is a
1419 * temporary root, it will not be inserted into the radix tree as it doesn't
1420 * have the most uptodate information, it'll simply be discarded once the
1421 * backref code is finished using the root.
1422 */
1423struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info,
1424 struct btrfs_path *path,
1425 u64 objectid)
1426{
1427 struct btrfs_root *root;
1428 struct btrfs_key key;
1429
1430 ASSERT(path->search_commit_root && path->skip_locking);
1431
1432 /*
1433 * This can return -ENOENT if we ask for a root that doesn't exist, but
1434 * since this is called via the backref walking code we won't be looking
1435 * up a root that doesn't exist, unless there's corruption. So if root
1436 * != NULL just return it.
1437 */
1438 root = btrfs_get_global_root(fs_info, objectid);
1439 if (root)
1440 return root;
1441
1442 root = btrfs_lookup_fs_root(fs_info, root_id: objectid);
1443 if (root)
1444 return root;
1445
1446 key.objectid = objectid;
1447 key.type = BTRFS_ROOT_ITEM_KEY;
1448 key.offset = (u64)-1;
1449 root = read_tree_root_path(tree_root: fs_info->tree_root, path, key: &key);
1450 btrfs_release_path(p: path);
1451
1452 return root;
1453}
1454
1455static int cleaner_kthread(void *arg)
1456{
1457 struct btrfs_fs_info *fs_info = arg;
1458 int again;
1459
1460 while (1) {
1461 again = 0;
1462
1463 set_bit(nr: BTRFS_FS_CLEANER_RUNNING, addr: &fs_info->flags);
1464
1465 /* Make the cleaner go to sleep early. */
1466 if (btrfs_need_cleaner_sleep(fs_info))
1467 goto sleep;
1468
1469 /*
1470 * Do not do anything if we might cause open_ctree() to block
1471 * before we have finished mounting the filesystem.
1472 */
1473 if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1474 goto sleep;
1475
1476 if (!mutex_trylock(lock: &fs_info->cleaner_mutex))
1477 goto sleep;
1478
1479 /*
1480 * Avoid the problem that we change the status of the fs
1481 * during the above check and trylock.
1482 */
1483 if (btrfs_need_cleaner_sleep(fs_info)) {
1484 mutex_unlock(lock: &fs_info->cleaner_mutex);
1485 goto sleep;
1486 }
1487
1488 if (test_and_clear_bit(nr: BTRFS_FS_FEATURE_CHANGED, addr: &fs_info->flags))
1489 btrfs_sysfs_feature_update(fs_info);
1490
1491 btrfs_run_delayed_iputs(fs_info);
1492
1493 again = btrfs_clean_one_deleted_snapshot(fs_info);
1494 mutex_unlock(lock: &fs_info->cleaner_mutex);
1495
1496 /*
1497 * The defragger has dealt with the R/O remount and umount,
1498 * needn't do anything special here.
1499 */
1500 btrfs_run_defrag_inodes(fs_info);
1501
1502 /*
1503 * Acquires fs_info->reclaim_bgs_lock to avoid racing
1504 * with relocation (btrfs_relocate_chunk) and relocation
1505 * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group)
1506 * after acquiring fs_info->reclaim_bgs_lock. So we
1507 * can't hold, nor need to, fs_info->cleaner_mutex when deleting
1508 * unused block groups.
1509 */
1510 btrfs_delete_unused_bgs(fs_info);
1511
1512 /*
1513 * Reclaim block groups in the reclaim_bgs list after we deleted
1514 * all unused block_groups. This possibly gives us some more free
1515 * space.
1516 */
1517 btrfs_reclaim_bgs(fs_info);
1518sleep:
1519 clear_and_wake_up_bit(bit: BTRFS_FS_CLEANER_RUNNING, word: &fs_info->flags);
1520 if (kthread_should_park())
1521 kthread_parkme();
1522 if (kthread_should_stop())
1523 return 0;
1524 if (!again) {
1525 set_current_state(TASK_INTERRUPTIBLE);
1526 schedule();
1527 __set_current_state(TASK_RUNNING);
1528 }
1529 }
1530}
1531
1532static int transaction_kthread(void *arg)
1533{
1534 struct btrfs_root *root = arg;
1535 struct btrfs_fs_info *fs_info = root->fs_info;
1536 struct btrfs_trans_handle *trans;
1537 struct btrfs_transaction *cur;
1538 u64 transid;
1539 time64_t delta;
1540 unsigned long delay;
1541 bool cannot_commit;
1542
1543 do {
1544 cannot_commit = false;
1545 delay = msecs_to_jiffies(m: fs_info->commit_interval * 1000);
1546 mutex_lock(&fs_info->transaction_kthread_mutex);
1547
1548 spin_lock(lock: &fs_info->trans_lock);
1549 cur = fs_info->running_transaction;
1550 if (!cur) {
1551 spin_unlock(lock: &fs_info->trans_lock);
1552 goto sleep;
1553 }
1554
1555 delta = ktime_get_seconds() - cur->start_time;
1556 if (!test_and_clear_bit(nr: BTRFS_FS_COMMIT_TRANS, addr: &fs_info->flags) &&
1557 cur->state < TRANS_STATE_COMMIT_PREP &&
1558 delta < fs_info->commit_interval) {
1559 spin_unlock(lock: &fs_info->trans_lock);
1560 delay -= msecs_to_jiffies(m: (delta - 1) * 1000);
1561 delay = min(delay,
1562 msecs_to_jiffies(fs_info->commit_interval * 1000));
1563 goto sleep;
1564 }
1565 transid = cur->transid;
1566 spin_unlock(lock: &fs_info->trans_lock);
1567
1568 /* If the file system is aborted, this will always fail. */
1569 trans = btrfs_attach_transaction(root);
1570 if (IS_ERR(ptr: trans)) {
1571 if (PTR_ERR(ptr: trans) != -ENOENT)
1572 cannot_commit = true;
1573 goto sleep;
1574 }
1575 if (transid == trans->transid) {
1576 btrfs_commit_transaction(trans);
1577 } else {
1578 btrfs_end_transaction(trans);
1579 }
1580sleep:
1581 wake_up_process(tsk: fs_info->cleaner_kthread);
1582 mutex_unlock(lock: &fs_info->transaction_kthread_mutex);
1583
1584 if (BTRFS_FS_ERROR(fs_info))
1585 btrfs_cleanup_transaction(fs_info);
1586 if (!kthread_should_stop() &&
1587 (!btrfs_transaction_blocked(info: fs_info) ||
1588 cannot_commit))
1589 schedule_timeout_interruptible(timeout: delay);
1590 } while (!kthread_should_stop());
1591 return 0;
1592}
1593
1594/*
1595 * This will find the highest generation in the array of root backups. The
1596 * index of the highest array is returned, or -EINVAL if we can't find
1597 * anything.
1598 *
1599 * We check to make sure the array is valid by comparing the
1600 * generation of the latest root in the array with the generation
1601 * in the super block. If they don't match we pitch it.
1602 */
1603static int find_newest_super_backup(struct btrfs_fs_info *info)
1604{
1605 const u64 newest_gen = btrfs_super_generation(s: info->super_copy);
1606 u64 cur;
1607 struct btrfs_root_backup *root_backup;
1608 int i;
1609
1610 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
1611 root_backup = info->super_copy->super_roots + i;
1612 cur = btrfs_backup_tree_root_gen(s: root_backup);
1613 if (cur == newest_gen)
1614 return i;
1615 }
1616
1617 return -EINVAL;
1618}
1619
1620/*
1621 * copy all the root pointers into the super backup array.
1622 * this will bump the backup pointer by one when it is
1623 * done
1624 */
1625static void backup_super_roots(struct btrfs_fs_info *info)
1626{
1627 const int next_backup = info->backup_root_index;
1628 struct btrfs_root_backup *root_backup;
1629
1630 root_backup = info->super_for_commit->super_roots + next_backup;
1631
1632 /*
1633 * make sure all of our padding and empty slots get zero filled
1634 * regardless of which ones we use today
1635 */
1636 memset(root_backup, 0, sizeof(*root_backup));
1637
1638 info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS;
1639
1640 btrfs_set_backup_tree_root(s: root_backup, val: info->tree_root->node->start);
1641 btrfs_set_backup_tree_root_gen(s: root_backup,
1642 val: btrfs_header_generation(eb: info->tree_root->node));
1643
1644 btrfs_set_backup_tree_root_level(s: root_backup,
1645 val: btrfs_header_level(eb: info->tree_root->node));
1646
1647 btrfs_set_backup_chunk_root(s: root_backup, val: info->chunk_root->node->start);
1648 btrfs_set_backup_chunk_root_gen(s: root_backup,
1649 val: btrfs_header_generation(eb: info->chunk_root->node));
1650 btrfs_set_backup_chunk_root_level(s: root_backup,
1651 val: btrfs_header_level(eb: info->chunk_root->node));
1652
1653 if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) {
1654 struct btrfs_root *extent_root = btrfs_extent_root(fs_info: info, bytenr: 0);
1655 struct btrfs_root *csum_root = btrfs_csum_root(fs_info: info, bytenr: 0);
1656
1657 btrfs_set_backup_extent_root(s: root_backup,
1658 val: extent_root->node->start);
1659 btrfs_set_backup_extent_root_gen(s: root_backup,
1660 val: btrfs_header_generation(eb: extent_root->node));
1661 btrfs_set_backup_extent_root_level(s: root_backup,
1662 val: btrfs_header_level(eb: extent_root->node));
1663
1664 btrfs_set_backup_csum_root(s: root_backup, val: csum_root->node->start);
1665 btrfs_set_backup_csum_root_gen(s: root_backup,
1666 val: btrfs_header_generation(eb: csum_root->node));
1667 btrfs_set_backup_csum_root_level(s: root_backup,
1668 val: btrfs_header_level(eb: csum_root->node));
1669 }
1670
1671 /*
1672 * we might commit during log recovery, which happens before we set
1673 * the fs_root. Make sure it is valid before we fill it in.
1674 */
1675 if (info->fs_root && info->fs_root->node) {
1676 btrfs_set_backup_fs_root(s: root_backup,
1677 val: info->fs_root->node->start);
1678 btrfs_set_backup_fs_root_gen(s: root_backup,
1679 val: btrfs_header_generation(eb: info->fs_root->node));
1680 btrfs_set_backup_fs_root_level(s: root_backup,
1681 val: btrfs_header_level(eb: info->fs_root->node));
1682 }
1683
1684 btrfs_set_backup_dev_root(s: root_backup, val: info->dev_root->node->start);
1685 btrfs_set_backup_dev_root_gen(s: root_backup,
1686 val: btrfs_header_generation(eb: info->dev_root->node));
1687 btrfs_set_backup_dev_root_level(s: root_backup,
1688 val: btrfs_header_level(eb: info->dev_root->node));
1689
1690 btrfs_set_backup_total_bytes(s: root_backup,
1691 val: btrfs_super_total_bytes(s: info->super_copy));
1692 btrfs_set_backup_bytes_used(s: root_backup,
1693 val: btrfs_super_bytes_used(s: info->super_copy));
1694 btrfs_set_backup_num_devices(s: root_backup,
1695 val: btrfs_super_num_devices(s: info->super_copy));
1696
1697 /*
1698 * if we don't copy this out to the super_copy, it won't get remembered
1699 * for the next commit
1700 */
1701 memcpy(&info->super_copy->super_roots,
1702 &info->super_for_commit->super_roots,
1703 sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS);
1704}
1705
1706/*
1707 * Reads a backup root based on the passed priority. Prio 0 is the newest, prio
1708 * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots
1709 *
1710 * @fs_info: filesystem whose backup roots need to be read
1711 * @priority: priority of backup root required
1712 *
1713 * Returns backup root index on success and -EINVAL otherwise.
1714 */
1715static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority)
1716{
1717 int backup_index = find_newest_super_backup(info: fs_info);
1718 struct btrfs_super_block *super = fs_info->super_copy;
1719 struct btrfs_root_backup *root_backup;
1720
1721 if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) {
1722 if (priority == 0)
1723 return backup_index;
1724
1725 backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority;
1726 backup_index %= BTRFS_NUM_BACKUP_ROOTS;
1727 } else {
1728 return -EINVAL;
1729 }
1730
1731 root_backup = super->super_roots + backup_index;
1732
1733 btrfs_set_super_generation(s: super,
1734 val: btrfs_backup_tree_root_gen(s: root_backup));
1735 btrfs_set_super_root(s: super, val: btrfs_backup_tree_root(s: root_backup));
1736 btrfs_set_super_root_level(s: super,
1737 val: btrfs_backup_tree_root_level(s: root_backup));
1738 btrfs_set_super_bytes_used(s: super, val: btrfs_backup_bytes_used(s: root_backup));
1739
1740 /*
1741 * Fixme: the total bytes and num_devices need to match or we should
1742 * need a fsck
1743 */
1744 btrfs_set_super_total_bytes(s: super, val: btrfs_backup_total_bytes(s: root_backup));
1745 btrfs_set_super_num_devices(s: super, val: btrfs_backup_num_devices(s: root_backup));
1746
1747 return backup_index;
1748}
1749
1750/* helper to cleanup workers */
1751static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
1752{
1753 btrfs_destroy_workqueue(wq: fs_info->fixup_workers);
1754 btrfs_destroy_workqueue(wq: fs_info->delalloc_workers);
1755 btrfs_destroy_workqueue(wq: fs_info->workers);
1756 if (fs_info->endio_workers)
1757 destroy_workqueue(wq: fs_info->endio_workers);
1758 if (fs_info->rmw_workers)
1759 destroy_workqueue(wq: fs_info->rmw_workers);
1760 if (fs_info->compressed_write_workers)
1761 destroy_workqueue(wq: fs_info->compressed_write_workers);
1762 btrfs_destroy_workqueue(wq: fs_info->endio_write_workers);
1763 btrfs_destroy_workqueue(wq: fs_info->endio_freespace_worker);
1764 btrfs_destroy_workqueue(wq: fs_info->delayed_workers);
1765 btrfs_destroy_workqueue(wq: fs_info->caching_workers);
1766 btrfs_destroy_workqueue(wq: fs_info->flush_workers);
1767 btrfs_destroy_workqueue(wq: fs_info->qgroup_rescan_workers);
1768 if (fs_info->discard_ctl.discard_workers)
1769 destroy_workqueue(wq: fs_info->discard_ctl.discard_workers);
1770 /*
1771 * Now that all other work queues are destroyed, we can safely destroy
1772 * the queues used for metadata I/O, since tasks from those other work
1773 * queues can do metadata I/O operations.
1774 */
1775 if (fs_info->endio_meta_workers)
1776 destroy_workqueue(wq: fs_info->endio_meta_workers);
1777}
1778
1779static void free_root_extent_buffers(struct btrfs_root *root)
1780{
1781 if (root) {
1782 free_extent_buffer(eb: root->node);
1783 free_extent_buffer(eb: root->commit_root);
1784 root->node = NULL;
1785 root->commit_root = NULL;
1786 }
1787}
1788
1789static void free_global_root_pointers(struct btrfs_fs_info *fs_info)
1790{
1791 struct btrfs_root *root, *tmp;
1792
1793 rbtree_postorder_for_each_entry_safe(root, tmp,
1794 &fs_info->global_root_tree,
1795 rb_node)
1796 free_root_extent_buffers(root);
1797}
1798
1799/* helper to cleanup tree roots */
1800static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root)
1801{
1802 free_root_extent_buffers(root: info->tree_root);
1803
1804 free_global_root_pointers(fs_info: info);
1805 free_root_extent_buffers(root: info->dev_root);
1806 free_root_extent_buffers(root: info->quota_root);
1807 free_root_extent_buffers(root: info->uuid_root);
1808 free_root_extent_buffers(root: info->fs_root);
1809 free_root_extent_buffers(root: info->data_reloc_root);
1810 free_root_extent_buffers(root: info->block_group_root);
1811 free_root_extent_buffers(root: info->stripe_root);
1812 if (free_chunk_root)
1813 free_root_extent_buffers(root: info->chunk_root);
1814}
1815
1816void btrfs_put_root(struct btrfs_root *root)
1817{
1818 if (!root)
1819 return;
1820
1821 if (refcount_dec_and_test(r: &root->refs)) {
1822 WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
1823 WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state));
1824 if (root->anon_dev)
1825 free_anon_bdev(root->anon_dev);
1826 free_root_extent_buffers(root);
1827#ifdef CONFIG_BTRFS_DEBUG
1828 spin_lock(lock: &root->fs_info->fs_roots_radix_lock);
1829 list_del_init(entry: &root->leak_list);
1830 spin_unlock(lock: &root->fs_info->fs_roots_radix_lock);
1831#endif
1832 kfree(objp: root);
1833 }
1834}
1835
1836void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info)
1837{
1838 int ret;
1839 struct btrfs_root *gang[8];
1840 int i;
1841
1842 while (!list_empty(head: &fs_info->dead_roots)) {
1843 gang[0] = list_entry(fs_info->dead_roots.next,
1844 struct btrfs_root, root_list);
1845 list_del(entry: &gang[0]->root_list);
1846
1847 if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state))
1848 btrfs_drop_and_free_fs_root(fs_info, root: gang[0]);
1849 btrfs_put_root(root: gang[0]);
1850 }
1851
1852 while (1) {
1853 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1854 results: (void **)gang, first_index: 0,
1855 ARRAY_SIZE(gang));
1856 if (!ret)
1857 break;
1858 for (i = 0; i < ret; i++)
1859 btrfs_drop_and_free_fs_root(fs_info, root: gang[i]);
1860 }
1861}
1862
1863static void btrfs_init_scrub(struct btrfs_fs_info *fs_info)
1864{
1865 mutex_init(&fs_info->scrub_lock);
1866 atomic_set(v: &fs_info->scrubs_running, i: 0);
1867 atomic_set(v: &fs_info->scrub_pause_req, i: 0);
1868 atomic_set(v: &fs_info->scrubs_paused, i: 0);
1869 atomic_set(v: &fs_info->scrub_cancel_req, i: 0);
1870 init_waitqueue_head(&fs_info->scrub_pause_wait);
1871 refcount_set(r: &fs_info->scrub_workers_refcnt, n: 0);
1872}
1873
1874static void btrfs_init_balance(struct btrfs_fs_info *fs_info)
1875{
1876 spin_lock_init(&fs_info->balance_lock);
1877 mutex_init(&fs_info->balance_mutex);
1878 atomic_set(v: &fs_info->balance_pause_req, i: 0);
1879 atomic_set(v: &fs_info->balance_cancel_req, i: 0);
1880 fs_info->balance_ctl = NULL;
1881 init_waitqueue_head(&fs_info->balance_wait_q);
1882 atomic_set(v: &fs_info->reloc_cancel_req, i: 0);
1883}
1884
1885static int btrfs_init_btree_inode(struct super_block *sb)
1886{
1887 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1888 unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID,
1889 root: fs_info->tree_root);
1890 struct inode *inode;
1891
1892 inode = new_inode(sb);
1893 if (!inode)
1894 return -ENOMEM;
1895
1896 inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
1897 set_nlink(inode, nlink: 1);
1898 /*
1899 * we set the i_size on the btree inode to the max possible int.
1900 * the real end of the address space is determined by all of
1901 * the devices in the system
1902 */
1903 inode->i_size = OFFSET_MAX;
1904 inode->i_mapping->a_ops = &btree_aops;
1905 mapping_set_gfp_mask(m: inode->i_mapping, GFP_NOFS);
1906
1907 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
1908 extent_io_tree_init(fs_info, tree: &BTRFS_I(inode)->io_tree,
1909 owner: IO_TREE_BTREE_INODE_IO);
1910 extent_map_tree_init(tree: &BTRFS_I(inode)->extent_tree);
1911
1912 BTRFS_I(inode)->root = btrfs_grab_root(root: fs_info->tree_root);
1913 BTRFS_I(inode)->location.objectid = BTRFS_BTREE_INODE_OBJECTID;
1914 BTRFS_I(inode)->location.type = 0;
1915 BTRFS_I(inode)->location.offset = 0;
1916 set_bit(nr: BTRFS_INODE_DUMMY, addr: &BTRFS_I(inode)->runtime_flags);
1917 __insert_inode_hash(inode, hashval: hash);
1918 fs_info->btree_inode = inode;
1919
1920 return 0;
1921}
1922
1923static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info)
1924{
1925 mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount);
1926 init_rwsem(&fs_info->dev_replace.rwsem);
1927 init_waitqueue_head(&fs_info->dev_replace.replace_wait);
1928}
1929
1930static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info)
1931{
1932 spin_lock_init(&fs_info->qgroup_lock);
1933 mutex_init(&fs_info->qgroup_ioctl_lock);
1934 fs_info->qgroup_tree = RB_ROOT;
1935 INIT_LIST_HEAD(list: &fs_info->dirty_qgroups);
1936 fs_info->qgroup_seq = 1;
1937 fs_info->qgroup_ulist = NULL;
1938 fs_info->qgroup_rescan_running = false;
1939 fs_info->qgroup_drop_subtree_thres = BTRFS_MAX_LEVEL;
1940 mutex_init(&fs_info->qgroup_rescan_lock);
1941}
1942
1943static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
1944{
1945 u32 max_active = fs_info->thread_pool_size;
1946 unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
1947 unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE;
1948
1949 fs_info->workers =
1950 btrfs_alloc_workqueue(fs_info, name: "worker", flags, limit_active: max_active, thresh: 16);
1951
1952 fs_info->delalloc_workers =
1953 btrfs_alloc_workqueue(fs_info, name: "delalloc",
1954 flags, limit_active: max_active, thresh: 2);
1955
1956 fs_info->flush_workers =
1957 btrfs_alloc_workqueue(fs_info, name: "flush_delalloc",
1958 flags, limit_active: max_active, thresh: 0);
1959
1960 fs_info->caching_workers =
1961 btrfs_alloc_workqueue(fs_info, name: "cache", flags, limit_active: max_active, thresh: 0);
1962
1963 fs_info->fixup_workers =
1964 btrfs_alloc_ordered_workqueue(fs_info, name: "fixup", flags: ordered_flags);
1965
1966 fs_info->endio_workers =
1967 alloc_workqueue(fmt: "btrfs-endio", flags, max_active);
1968 fs_info->endio_meta_workers =
1969 alloc_workqueue(fmt: "btrfs-endio-meta", flags, max_active);
1970 fs_info->rmw_workers = alloc_workqueue(fmt: "btrfs-rmw", flags, max_active);
1971 fs_info->endio_write_workers =
1972 btrfs_alloc_workqueue(fs_info, name: "endio-write", flags,
1973 limit_active: max_active, thresh: 2);
1974 fs_info->compressed_write_workers =
1975 alloc_workqueue(fmt: "btrfs-compressed-write", flags, max_active);
1976 fs_info->endio_freespace_worker =
1977 btrfs_alloc_workqueue(fs_info, name: "freespace-write", flags,
1978 limit_active: max_active, thresh: 0);
1979 fs_info->delayed_workers =
1980 btrfs_alloc_workqueue(fs_info, name: "delayed-meta", flags,
1981 limit_active: max_active, thresh: 0);
1982 fs_info->qgroup_rescan_workers =
1983 btrfs_alloc_ordered_workqueue(fs_info, name: "qgroup-rescan",
1984 flags: ordered_flags);
1985 fs_info->discard_ctl.discard_workers =
1986 alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE);
1987
1988 if (!(fs_info->workers &&
1989 fs_info->delalloc_workers && fs_info->flush_workers &&
1990 fs_info->endio_workers && fs_info->endio_meta_workers &&
1991 fs_info->compressed_write_workers &&
1992 fs_info->endio_write_workers &&
1993 fs_info->endio_freespace_worker && fs_info->rmw_workers &&
1994 fs_info->caching_workers && fs_info->fixup_workers &&
1995 fs_info->delayed_workers && fs_info->qgroup_rescan_workers &&
1996 fs_info->discard_ctl.discard_workers)) {
1997 return -ENOMEM;
1998 }
1999
2000 return 0;
2001}
2002
2003static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type)
2004{
2005 struct crypto_shash *csum_shash;
2006 const char *csum_driver = btrfs_super_csum_driver(csum_type);
2007
2008 csum_shash = crypto_alloc_shash(alg_name: csum_driver, type: 0, mask: 0);
2009
2010 if (IS_ERR(ptr: csum_shash)) {
2011 btrfs_err(fs_info, "error allocating %s hash for checksum",
2012 csum_driver);
2013 return PTR_ERR(ptr: csum_shash);
2014 }
2015
2016 fs_info->csum_shash = csum_shash;
2017
2018 /*
2019 * Check if the checksum implementation is a fast accelerated one.
2020 * As-is this is a bit of a hack and should be replaced once the csum
2021 * implementations provide that information themselves.
2022 */
2023 switch (csum_type) {
2024 case BTRFS_CSUM_TYPE_CRC32:
2025 if (!strstr(crypto_shash_driver_name(tfm: csum_shash), "generic"))
2026 set_bit(nr: BTRFS_FS_CSUM_IMPL_FAST, addr: &fs_info->flags);
2027 break;
2028 case BTRFS_CSUM_TYPE_XXHASH:
2029 set_bit(nr: BTRFS_FS_CSUM_IMPL_FAST, addr: &fs_info->flags);
2030 break;
2031 default:
2032 break;
2033 }
2034
2035 btrfs_info(fs_info, "using %s (%s) checksum algorithm",
2036 btrfs_super_csum_name(csum_type),
2037 crypto_shash_driver_name(csum_shash));
2038 return 0;
2039}
2040
2041static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
2042 struct btrfs_fs_devices *fs_devices)
2043{
2044 int ret;
2045 struct btrfs_tree_parent_check check = { 0 };
2046 struct btrfs_root *log_tree_root;
2047 struct btrfs_super_block *disk_super = fs_info->super_copy;
2048 u64 bytenr = btrfs_super_log_root(s: disk_super);
2049 int level = btrfs_super_log_root_level(s: disk_super);
2050
2051 if (fs_devices->rw_devices == 0) {
2052 btrfs_warn(fs_info, "log replay required on RO media");
2053 return -EIO;
2054 }
2055
2056 log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID,
2057 GFP_KERNEL);
2058 if (!log_tree_root)
2059 return -ENOMEM;
2060
2061 check.level = level;
2062 check.transid = fs_info->generation + 1;
2063 check.owner_root = BTRFS_TREE_LOG_OBJECTID;
2064 log_tree_root->node = read_tree_block(fs_info, bytenr, check: &check);
2065 if (IS_ERR(ptr: log_tree_root->node)) {
2066 btrfs_warn(fs_info, "failed to read log tree");
2067 ret = PTR_ERR(ptr: log_tree_root->node);
2068 log_tree_root->node = NULL;
2069 btrfs_put_root(root: log_tree_root);
2070 return ret;
2071 }
2072 if (!extent_buffer_uptodate(eb: log_tree_root->node)) {
2073 btrfs_err(fs_info, "failed to read log tree");
2074 btrfs_put_root(root: log_tree_root);
2075 return -EIO;
2076 }
2077
2078 /* returns with log_tree_root freed on success */
2079 ret = btrfs_recover_log_trees(tree_root: log_tree_root);
2080 if (ret) {
2081 btrfs_handle_fs_error(fs_info, ret,
2082 "Failed to recover log tree");
2083 btrfs_put_root(root: log_tree_root);
2084 return ret;
2085 }
2086
2087 if (sb_rdonly(sb: fs_info->sb)) {
2088 ret = btrfs_commit_super(fs_info);
2089 if (ret)
2090 return ret;
2091 }
2092
2093 return 0;
2094}
2095
2096static int load_global_roots_objectid(struct btrfs_root *tree_root,
2097 struct btrfs_path *path, u64 objectid,
2098 const char *name)
2099{
2100 struct btrfs_fs_info *fs_info = tree_root->fs_info;
2101 struct btrfs_root *root;
2102 u64 max_global_id = 0;
2103 int ret;
2104 struct btrfs_key key = {
2105 .objectid = objectid,
2106 .type = BTRFS_ROOT_ITEM_KEY,
2107 .offset = 0,
2108 };
2109 bool found = false;
2110
2111 /* If we have IGNOREDATACSUMS skip loading these roots. */
2112 if (objectid == BTRFS_CSUM_TREE_OBJECTID &&
2113 btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
2114 set_bit(nr: BTRFS_FS_STATE_NO_CSUMS, addr: &fs_info->fs_state);
2115 return 0;
2116 }
2117
2118 while (1) {
2119 ret = btrfs_search_slot(NULL, root: tree_root, key: &key, p: path, ins_len: 0, cow: 0);
2120 if (ret < 0)
2121 break;
2122
2123 if (path->slots[0] >= btrfs_header_nritems(eb: path->nodes[0])) {
2124 ret = btrfs_next_leaf(root: tree_root, path);
2125 if (ret) {
2126 if (ret > 0)
2127 ret = 0;
2128 break;
2129 }
2130 }
2131 ret = 0;
2132
2133 btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, nr: path->slots[0]);
2134 if (key.objectid != objectid)
2135 break;
2136 btrfs_release_path(p: path);
2137
2138 /*
2139 * Just worry about this for extent tree, it'll be the same for
2140 * everybody.
2141 */
2142 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2143 max_global_id = max(max_global_id, key.offset);
2144
2145 found = true;
2146 root = read_tree_root_path(tree_root, path, key: &key);
2147 if (IS_ERR(ptr: root)) {
2148 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2149 ret = PTR_ERR(ptr: root);
2150 break;
2151 }
2152 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2153 ret = btrfs_global_root_insert(root);
2154 if (ret) {
2155 btrfs_put_root(root);
2156 break;
2157 }
2158 key.offset++;
2159 }
2160 btrfs_release_path(p: path);
2161
2162 if (objectid == BTRFS_EXTENT_TREE_OBJECTID)
2163 fs_info->nr_global_roots = max_global_id + 1;
2164
2165 if (!found || ret) {
2166 if (objectid == BTRFS_CSUM_TREE_OBJECTID)
2167 set_bit(nr: BTRFS_FS_STATE_NO_CSUMS, addr: &fs_info->fs_state);
2168
2169 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS))
2170 ret = ret ? ret : -ENOENT;
2171 else
2172 ret = 0;
2173 btrfs_err(fs_info, "failed to load root %s", name);
2174 }
2175 return ret;
2176}
2177
2178static int load_global_roots(struct btrfs_root *tree_root)
2179{
2180 struct btrfs_path *path;
2181 int ret = 0;
2182
2183 path = btrfs_alloc_path();
2184 if (!path)
2185 return -ENOMEM;
2186
2187 ret = load_global_roots_objectid(tree_root, path,
2188 BTRFS_EXTENT_TREE_OBJECTID, name: "extent");
2189 if (ret)
2190 goto out;
2191 ret = load_global_roots_objectid(tree_root, path,
2192 BTRFS_CSUM_TREE_OBJECTID, name: "csum");
2193 if (ret)
2194 goto out;
2195 if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE))
2196 goto out;
2197 ret = load_global_roots_objectid(tree_root, path,
2198 BTRFS_FREE_SPACE_TREE_OBJECTID,
2199 name: "free space");
2200out:
2201 btrfs_free_path(p: path);
2202 return ret;
2203}
2204
2205static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
2206{
2207 struct btrfs_root *tree_root = fs_info->tree_root;
2208 struct btrfs_root *root;
2209 struct btrfs_key location;
2210 int ret;
2211
2212 BUG_ON(!fs_info->tree_root);
2213
2214 ret = load_global_roots(tree_root);
2215 if (ret)
2216 return ret;
2217
2218 location.type = BTRFS_ROOT_ITEM_KEY;
2219 location.offset = 0;
2220
2221 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) {
2222 location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID;
2223 root = btrfs_read_tree_root(tree_root, key: &location);
2224 if (IS_ERR(ptr: root)) {
2225 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2226 ret = PTR_ERR(ptr: root);
2227 goto out;
2228 }
2229 } else {
2230 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2231 fs_info->block_group_root = root;
2232 }
2233 }
2234
2235 location.objectid = BTRFS_DEV_TREE_OBJECTID;
2236 root = btrfs_read_tree_root(tree_root, key: &location);
2237 if (IS_ERR(ptr: root)) {
2238 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2239 ret = PTR_ERR(ptr: root);
2240 goto out;
2241 }
2242 } else {
2243 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2244 fs_info->dev_root = root;
2245 }
2246 /* Initialize fs_info for all devices in any case */
2247 ret = btrfs_init_devices_late(fs_info);
2248 if (ret)
2249 goto out;
2250
2251 /*
2252 * This tree can share blocks with some other fs tree during relocation
2253 * and we need a proper setup by btrfs_get_fs_root
2254 */
2255 root = btrfs_get_fs_root(fs_info: tree_root->fs_info,
2256 BTRFS_DATA_RELOC_TREE_OBJECTID, check_ref: true);
2257 if (IS_ERR(ptr: root)) {
2258 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2259 ret = PTR_ERR(ptr: root);
2260 goto out;
2261 }
2262 } else {
2263 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2264 fs_info->data_reloc_root = root;
2265 }
2266
2267 location.objectid = BTRFS_QUOTA_TREE_OBJECTID;
2268 root = btrfs_read_tree_root(tree_root, key: &location);
2269 if (!IS_ERR(ptr: root)) {
2270 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2271 fs_info->quota_root = root;
2272 }
2273
2274 location.objectid = BTRFS_UUID_TREE_OBJECTID;
2275 root = btrfs_read_tree_root(tree_root, key: &location);
2276 if (IS_ERR(ptr: root)) {
2277 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2278 ret = PTR_ERR(ptr: root);
2279 if (ret != -ENOENT)
2280 goto out;
2281 }
2282 } else {
2283 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2284 fs_info->uuid_root = root;
2285 }
2286
2287 if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) {
2288 location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID;
2289 root = btrfs_read_tree_root(tree_root, key: &location);
2290 if (IS_ERR(ptr: root)) {
2291 if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) {
2292 ret = PTR_ERR(ptr: root);
2293 goto out;
2294 }
2295 } else {
2296 set_bit(nr: BTRFS_ROOT_TRACK_DIRTY, addr: &root->state);
2297 fs_info->stripe_root = root;
2298 }
2299 }
2300
2301 return 0;
2302out:
2303 btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d",
2304 location.objectid, ret);
2305 return ret;
2306}
2307
2308/*
2309 * Real super block validation
2310 * NOTE: super csum type and incompat features will not be checked here.
2311 *
2312 * @sb: super block to check
2313 * @mirror_num: the super block number to check its bytenr:
2314 * 0 the primary (1st) sb
2315 * 1, 2 2nd and 3rd backup copy
2316 * -1 skip bytenr check
2317 */
2318int btrfs_validate_super(struct btrfs_fs_info *fs_info,
2319 struct btrfs_super_block *sb, int mirror_num)
2320{
2321 u64 nodesize = btrfs_super_nodesize(s: sb);
2322 u64 sectorsize = btrfs_super_sectorsize(s: sb);
2323 int ret = 0;
2324
2325 if (btrfs_super_magic(s: sb) != BTRFS_MAGIC) {
2326 btrfs_err(fs_info, "no valid FS found");
2327 ret = -EINVAL;
2328 }
2329 if (btrfs_super_flags(s: sb) & ~BTRFS_SUPER_FLAG_SUPP) {
2330 btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
2331 btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
2332 ret = -EINVAL;
2333 }
2334 if (btrfs_super_root_level(s: sb) >= BTRFS_MAX_LEVEL) {
2335 btrfs_err(fs_info, "tree_root level too big: %d >= %d",
2336 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
2337 ret = -EINVAL;
2338 }
2339 if (btrfs_super_chunk_root_level(s: sb) >= BTRFS_MAX_LEVEL) {
2340 btrfs_err(fs_info, "chunk_root level too big: %d >= %d",
2341 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
2342 ret = -EINVAL;
2343 }
2344 if (btrfs_super_log_root_level(s: sb) >= BTRFS_MAX_LEVEL) {
2345 btrfs_err(fs_info, "log_root level too big: %d >= %d",
2346 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
2347 ret = -EINVAL;
2348 }
2349
2350 /*
2351 * Check sectorsize and nodesize first, other check will need it.
2352 * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here.
2353 */
2354 if (!is_power_of_2(n: sectorsize) || sectorsize < 4096 ||
2355 sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2356 btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize);
2357 ret = -EINVAL;
2358 }
2359
2360 /*
2361 * We only support at most two sectorsizes: 4K and PAGE_SIZE.
2362 *
2363 * We can support 16K sectorsize with 64K page size without problem,
2364 * but such sectorsize/pagesize combination doesn't make much sense.
2365 * 4K will be our future standard, PAGE_SIZE is supported from the very
2366 * beginning.
2367 */
2368 if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) {
2369 btrfs_err(fs_info,
2370 "sectorsize %llu not yet supported for page size %lu",
2371 sectorsize, PAGE_SIZE);
2372 ret = -EINVAL;
2373 }
2374
2375 if (!is_power_of_2(n: nodesize) || nodesize < sectorsize ||
2376 nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) {
2377 btrfs_err(fs_info, "invalid nodesize %llu", nodesize);
2378 ret = -EINVAL;
2379 }
2380 if (nodesize != le32_to_cpu(sb->__unused_leafsize)) {
2381 btrfs_err(fs_info, "invalid leafsize %u, should be %llu",
2382 le32_to_cpu(sb->__unused_leafsize), nodesize);
2383 ret = -EINVAL;
2384 }
2385
2386 /* Root alignment check */
2387 if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) {
2388 btrfs_warn(fs_info, "tree_root block unaligned: %llu",
2389 btrfs_super_root(sb));
2390 ret = -EINVAL;
2391 }
2392 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) {
2393 btrfs_warn(fs_info, "chunk_root block unaligned: %llu",
2394 btrfs_super_chunk_root(sb));
2395 ret = -EINVAL;
2396 }
2397 if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) {
2398 btrfs_warn(fs_info, "log_root block unaligned: %llu",
2399 btrfs_super_log_root(sb));
2400 ret = -EINVAL;
2401 }
2402
2403 if (!fs_info->fs_devices->temp_fsid &&
2404 memcmp(p: fs_info->fs_devices->fsid, q: sb->fsid, BTRFS_FSID_SIZE) != 0) {
2405 btrfs_err(fs_info,
2406 "superblock fsid doesn't match fsid of fs_devices: %pU != %pU",
2407 sb->fsid, fs_info->fs_devices->fsid);
2408 ret = -EINVAL;
2409 }
2410
2411 if (memcmp(p: fs_info->fs_devices->metadata_uuid, q: btrfs_sb_fsid_ptr(sb),
2412 BTRFS_FSID_SIZE) != 0) {
2413 btrfs_err(fs_info,
2414"superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU",
2415 btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid);
2416 ret = -EINVAL;
2417 }
2418
2419 if (memcmp(p: fs_info->fs_devices->metadata_uuid, q: sb->dev_item.fsid,
2420 BTRFS_FSID_SIZE) != 0) {
2421 btrfs_err(fs_info,
2422 "dev_item UUID does not match metadata fsid: %pU != %pU",
2423 fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid);
2424 ret = -EINVAL;
2425 }
2426
2427 /*
2428 * Artificial requirement for block-group-tree to force newer features
2429 * (free-space-tree, no-holes) so the test matrix is smaller.
2430 */
2431 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
2432 (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) ||
2433 !btrfs_fs_incompat(fs_info, NO_HOLES))) {
2434 btrfs_err(fs_info,
2435 "block-group-tree feature requires fres-space-tree and no-holes");
2436 ret = -EINVAL;
2437 }
2438
2439 /*
2440 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
2441 * done later
2442 */
2443 if (btrfs_super_bytes_used(s: sb) < 6 * btrfs_super_nodesize(s: sb)) {
2444 btrfs_err(fs_info, "bytes_used is too small %llu",
2445 btrfs_super_bytes_used(sb));
2446 ret = -EINVAL;
2447 }
2448 if (!is_power_of_2(n: btrfs_super_stripesize(s: sb))) {
2449 btrfs_err(fs_info, "invalid stripesize %u",
2450 btrfs_super_stripesize(sb));
2451 ret = -EINVAL;
2452 }
2453 if (btrfs_super_num_devices(s: sb) > (1UL << 31))
2454 btrfs_warn(fs_info, "suspicious number of devices: %llu",
2455 btrfs_super_num_devices(sb));
2456 if (btrfs_super_num_devices(s: sb) == 0) {
2457 btrfs_err(fs_info, "number of devices is 0");
2458 ret = -EINVAL;
2459 }
2460
2461 if (mirror_num >= 0 &&
2462 btrfs_super_bytenr(s: sb) != btrfs_sb_offset(mirror: mirror_num)) {
2463 btrfs_err(fs_info, "super offset mismatch %llu != %u",
2464 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
2465 ret = -EINVAL;
2466 }
2467
2468 /*
2469 * Obvious sys_chunk_array corruptions, it must hold at least one key
2470 * and one chunk
2471 */
2472 if (btrfs_super_sys_array_size(s: sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
2473 btrfs_err(fs_info, "system chunk array too big %u > %u",
2474 btrfs_super_sys_array_size(sb),
2475 BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
2476 ret = -EINVAL;
2477 }
2478 if (btrfs_super_sys_array_size(s: sb) < sizeof(struct btrfs_disk_key)
2479 + sizeof(struct btrfs_chunk)) {
2480 btrfs_err(fs_info, "system chunk array too small %u < %zu",
2481 btrfs_super_sys_array_size(sb),
2482 sizeof(struct btrfs_disk_key)
2483 + sizeof(struct btrfs_chunk));
2484 ret = -EINVAL;
2485 }
2486
2487 /*
2488 * The generation is a global counter, we'll trust it more than the others
2489 * but it's still possible that it's the one that's wrong.
2490 */
2491 if (btrfs_super_generation(s: sb) < btrfs_super_chunk_root_generation(s: sb))
2492 btrfs_warn(fs_info,
2493 "suspicious: generation < chunk_root_generation: %llu < %llu",
2494 btrfs_super_generation(sb),
2495 btrfs_super_chunk_root_generation(sb));
2496 if (btrfs_super_generation(s: sb) < btrfs_super_cache_generation(s: sb)
2497 && btrfs_super_cache_generation(s: sb) != (u64)-1)
2498 btrfs_warn(fs_info,
2499 "suspicious: generation < cache_generation: %llu < %llu",
2500 btrfs_super_generation(sb),
2501 btrfs_super_cache_generation(sb));
2502
2503 return ret;
2504}
2505
2506/*
2507 * Validation of super block at mount time.
2508 * Some checks already done early at mount time, like csum type and incompat
2509 * flags will be skipped.
2510 */
2511static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info)
2512{
2513 return btrfs_validate_super(fs_info, sb: fs_info->super_copy, mirror_num: 0);
2514}
2515
2516/*
2517 * Validation of super block at write time.
2518 * Some checks like bytenr check will be skipped as their values will be
2519 * overwritten soon.
2520 * Extra checks like csum type and incompat flags will be done here.
2521 */
2522static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info,
2523 struct btrfs_super_block *sb)
2524{
2525 int ret;
2526
2527 ret = btrfs_validate_super(fs_info, sb, mirror_num: -1);
2528 if (ret < 0)
2529 goto out;
2530 if (!btrfs_supported_super_csum(csum_type: btrfs_super_csum_type(s: sb))) {
2531 ret = -EUCLEAN;
2532 btrfs_err(fs_info, "invalid csum type, has %u want %u",
2533 btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32);
2534 goto out;
2535 }
2536 if (btrfs_super_incompat_flags(s: sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
2537 ret = -EUCLEAN;
2538 btrfs_err(fs_info,
2539 "invalid incompat flags, has 0x%llx valid mask 0x%llx",
2540 btrfs_super_incompat_flags(sb),
2541 (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP);
2542 goto out;
2543 }
2544out:
2545 if (ret < 0)
2546 btrfs_err(fs_info,
2547 "super block corruption detected before writing it to disk");
2548 return ret;
2549}
2550
2551static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level)
2552{
2553 struct btrfs_tree_parent_check check = {
2554 .level = level,
2555 .transid = gen,
2556 .owner_root = root->root_key.objectid
2557 };
2558 int ret = 0;
2559
2560 root->node = read_tree_block(fs_info: root->fs_info, bytenr, check: &check);
2561 if (IS_ERR(ptr: root->node)) {
2562 ret = PTR_ERR(ptr: root->node);
2563 root->node = NULL;
2564 return ret;
2565 }
2566 if (!extent_buffer_uptodate(eb: root->node)) {
2567 free_extent_buffer(eb: root->node);
2568 root->node = NULL;
2569 return -EIO;
2570 }
2571
2572 btrfs_set_root_node(item: &root->root_item, node: root->node);
2573 root->commit_root = btrfs_root_node(root);
2574 btrfs_set_root_refs(s: &root->root_item, val: 1);
2575 return ret;
2576}
2577
2578static int load_important_roots(struct btrfs_fs_info *fs_info)
2579{
2580 struct btrfs_super_block *sb = fs_info->super_copy;
2581 u64 gen, bytenr;
2582 int level, ret;
2583
2584 bytenr = btrfs_super_root(s: sb);
2585 gen = btrfs_super_generation(s: sb);
2586 level = btrfs_super_root_level(s: sb);
2587 ret = load_super_root(root: fs_info->tree_root, bytenr, gen, level);
2588 if (ret) {
2589 btrfs_warn(fs_info, "couldn't read tree root");
2590 return ret;
2591 }
2592 return 0;
2593}
2594
2595static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
2596{
2597 int backup_index = find_newest_super_backup(info: fs_info);
2598 struct btrfs_super_block *sb = fs_info->super_copy;
2599 struct btrfs_root *tree_root = fs_info->tree_root;
2600 bool handle_error = false;
2601 int ret = 0;
2602 int i;
2603
2604 for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) {
2605 if (handle_error) {
2606 if (!IS_ERR(ptr: tree_root->node))
2607 free_extent_buffer(eb: tree_root->node);
2608 tree_root->node = NULL;
2609
2610 if (!btrfs_test_opt(fs_info, USEBACKUPROOT))
2611 break;
2612
2613 free_root_pointers(info: fs_info, free_chunk_root: 0);
2614
2615 /*
2616 * Don't use the log in recovery mode, it won't be
2617 * valid
2618 */
2619 btrfs_set_super_log_root(s: sb, val: 0);
2620
2621 /* We can't trust the free space cache either */
2622 btrfs_set_opt(fs_info->mount_opt, CLEAR_CACHE);
2623
2624 btrfs_warn(fs_info, "try to load backup roots slot %d", i);
2625 ret = read_backup_root(fs_info, priority: i);
2626 backup_index = ret;
2627 if (ret < 0)
2628 return ret;
2629 }
2630
2631 ret = load_important_roots(fs_info);
2632 if (ret) {
2633 handle_error = true;
2634 continue;
2635 }
2636
2637 /*
2638 * No need to hold btrfs_root::objectid_mutex since the fs
2639 * hasn't been fully initialised and we are the only user
2640 */
2641 ret = btrfs_init_root_free_objectid(root: tree_root);
2642 if (ret < 0) {
2643 handle_error = true;
2644 continue;
2645 }
2646
2647 ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID);
2648
2649 ret = btrfs_read_roots(fs_info);
2650 if (ret < 0) {
2651 handle_error = true;
2652 continue;
2653 }
2654
2655 /* All successful */
2656 fs_info->generation = btrfs_header_generation(eb: tree_root->node);
2657 btrfs_set_last_trans_committed(fs_info, gen: fs_info->generation);
2658 fs_info->last_reloc_trans = 0;
2659
2660 /* Always begin writing backup roots after the one being used */
2661 if (backup_index < 0) {
2662 fs_info->backup_root_index = 0;
2663 } else {
2664 fs_info->backup_root_index = backup_index + 1;
2665 fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS;
2666 }
2667 break;
2668 }
2669
2670 return ret;
2671}
2672
2673void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
2674{
2675 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
2676 INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
2677 INIT_LIST_HEAD(list: &fs_info->trans_list);
2678 INIT_LIST_HEAD(list: &fs_info->dead_roots);
2679 INIT_LIST_HEAD(list: &fs_info->delayed_iputs);
2680 INIT_LIST_HEAD(list: &fs_info->delalloc_roots);
2681 INIT_LIST_HEAD(list: &fs_info->caching_block_groups);
2682 spin_lock_init(&fs_info->delalloc_root_lock);
2683 spin_lock_init(&fs_info->trans_lock);
2684 spin_lock_init(&fs_info->fs_roots_radix_lock);
2685 spin_lock_init(&fs_info->delayed_iput_lock);
2686 spin_lock_init(&fs_info->defrag_inodes_lock);
2687 spin_lock_init(&fs_info->super_lock);
2688 spin_lock_init(&fs_info->buffer_lock);
2689 spin_lock_init(&fs_info->unused_bgs_lock);
2690 spin_lock_init(&fs_info->treelog_bg_lock);
2691 spin_lock_init(&fs_info->zone_active_bgs_lock);
2692 spin_lock_init(&fs_info->relocation_bg_lock);
2693 rwlock_init(&fs_info->tree_mod_log_lock);
2694 rwlock_init(&fs_info->global_root_lock);
2695 mutex_init(&fs_info->unused_bg_unpin_mutex);
2696 mutex_init(&fs_info->reclaim_bgs_lock);
2697 mutex_init(&fs_info->reloc_mutex);
2698 mutex_init(&fs_info->delalloc_root_mutex);
2699 mutex_init(&fs_info->zoned_meta_io_lock);
2700 mutex_init(&fs_info->zoned_data_reloc_io_lock);
2701 seqlock_init(&fs_info->profiles_lock);
2702
2703 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers);
2704 btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters);
2705 btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered);
2706 btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent);
2707 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep,
2708 BTRFS_LOCKDEP_TRANS_COMMIT_PREP);
2709 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked,
2710 BTRFS_LOCKDEP_TRANS_UNBLOCKED);
2711 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed,
2712 BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED);
2713 btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed,
2714 BTRFS_LOCKDEP_TRANS_COMPLETED);
2715
2716 INIT_LIST_HEAD(list: &fs_info->dirty_cowonly_roots);
2717 INIT_LIST_HEAD(list: &fs_info->space_info);
2718 INIT_LIST_HEAD(list: &fs_info->tree_mod_seq_list);
2719 INIT_LIST_HEAD(list: &fs_info->unused_bgs);
2720 INIT_LIST_HEAD(list: &fs_info->reclaim_bgs);
2721 INIT_LIST_HEAD(list: &fs_info->zone_active_bgs);
2722#ifdef CONFIG_BTRFS_DEBUG
2723 INIT_LIST_HEAD(list: &fs_info->allocated_roots);
2724 INIT_LIST_HEAD(list: &fs_info->allocated_ebs);
2725 spin_lock_init(&fs_info->eb_leak_lock);
2726#endif
2727 extent_map_tree_init(tree: &fs_info->mapping_tree);
2728 btrfs_init_block_rsv(rsv: &fs_info->global_block_rsv,
2729 type: BTRFS_BLOCK_RSV_GLOBAL);
2730 btrfs_init_block_rsv(rsv: &fs_info->trans_block_rsv, type: BTRFS_BLOCK_RSV_TRANS);
2731 btrfs_init_block_rsv(rsv: &fs_info->chunk_block_rsv, type: BTRFS_BLOCK_RSV_CHUNK);
2732 btrfs_init_block_rsv(rsv: &fs_info->empty_block_rsv, type: BTRFS_BLOCK_RSV_EMPTY);
2733 btrfs_init_block_rsv(rsv: &fs_info->delayed_block_rsv,
2734 type: BTRFS_BLOCK_RSV_DELOPS);
2735 btrfs_init_block_rsv(rsv: &fs_info->delayed_refs_rsv,
2736 type: BTRFS_BLOCK_RSV_DELREFS);
2737
2738 atomic_set(v: &fs_info->async_delalloc_pages, i: 0);
2739 atomic_set(v: &fs_info->defrag_running, i: 0);
2740 atomic_set(v: &fs_info->nr_delayed_iputs, i: 0);
2741 atomic64_set(v: &fs_info->tree_mod_seq, i: 0);
2742 fs_info->global_root_tree = RB_ROOT;
2743 fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE;
2744 fs_info->metadata_ratio = 0;
2745 fs_info->defrag_inodes = RB_ROOT;
2746 atomic64_set(v: &fs_info->free_chunk_space, i: 0);
2747 fs_info->tree_mod_log = RB_ROOT;
2748 fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL;
2749 btrfs_init_ref_verify(fs_info);
2750
2751 fs_info->thread_pool_size = min_t(unsigned long,
2752 num_online_cpus() + 2, 8);
2753
2754 INIT_LIST_HEAD(list: &fs_info->ordered_roots);
2755 spin_lock_init(&fs_info->ordered_root_lock);
2756
2757 btrfs_init_scrub(fs_info);
2758 btrfs_init_balance(fs_info);
2759 btrfs_init_async_reclaim_work(fs_info);
2760
2761 rwlock_init(&fs_info->block_group_cache_lock);
2762 fs_info->block_group_cache_tree = RB_ROOT_CACHED;
2763
2764 extent_io_tree_init(fs_info, tree: &fs_info->excluded_extents,
2765 owner: IO_TREE_FS_EXCLUDED_EXTENTS);
2766
2767 mutex_init(&fs_info->ordered_operations_mutex);
2768 mutex_init(&fs_info->tree_log_mutex);
2769 mutex_init(&fs_info->chunk_mutex);
2770 mutex_init(&fs_info->transaction_kthread_mutex);
2771 mutex_init(&fs_info->cleaner_mutex);
2772 mutex_init(&fs_info->ro_block_group_mutex);
2773 init_rwsem(&fs_info->commit_root_sem);
2774 init_rwsem(&fs_info->cleanup_work_sem);
2775 init_rwsem(&fs_info->subvol_sem);
2776 sema_init(sem: &fs_info->uuid_tree_rescan_sem, val: 1);
2777
2778 btrfs_init_dev_replace_locks(fs_info);
2779 btrfs_init_qgroup(fs_info);
2780 btrfs_discard_init(fs_info);
2781
2782 btrfs_init_free_cluster(cluster: &fs_info->meta_alloc_cluster);
2783 btrfs_init_free_cluster(cluster: &fs_info->data_alloc_cluster);
2784
2785 init_waitqueue_head(&fs_info->transaction_throttle);
2786 init_waitqueue_head(&fs_info->transaction_wait);
2787 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2788 init_waitqueue_head(&fs_info->async_submit_wait);
2789 init_waitqueue_head(&fs_info->delayed_iputs_wait);
2790
2791 /* Usable values until the real ones are cached from the superblock */
2792 fs_info->nodesize = 4096;
2793 fs_info->sectorsize = 4096;
2794 fs_info->sectorsize_bits = ilog2(4096);
2795 fs_info->stripesize = 4096;
2796
2797 fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE;
2798
2799 spin_lock_init(&fs_info->swapfile_pins_lock);
2800 fs_info->swapfile_pins = RB_ROOT;
2801
2802 fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH;
2803 INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work);
2804}
2805
2806static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
2807{
2808 int ret;
2809
2810 fs_info->sb = sb;
2811 sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
2812 sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
2813
2814 ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
2815 if (ret)
2816 return ret;
2817
2818 ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
2819 if (ret)
2820 return ret;
2821
2822 fs_info->dirty_metadata_batch = PAGE_SIZE *
2823 (1 + ilog2(nr_cpu_ids));
2824
2825 ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
2826 if (ret)
2827 return ret;
2828
2829 ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
2830 GFP_KERNEL);
2831 if (ret)
2832 return ret;
2833
2834 fs_info->delayed_root = kmalloc(size: sizeof(struct btrfs_delayed_root),
2835 GFP_KERNEL);
2836 if (!fs_info->delayed_root)
2837 return -ENOMEM;
2838 btrfs_init_delayed_root(delayed_root: fs_info->delayed_root);
2839
2840 if (sb_rdonly(sb))
2841 set_bit(nr: BTRFS_FS_STATE_RO, addr: &fs_info->fs_state);
2842
2843 return btrfs_alloc_stripe_hash_table(info: fs_info);
2844}
2845
2846static int btrfs_uuid_rescan_kthread(void *data)
2847{
2848 struct btrfs_fs_info *fs_info = data;
2849 int ret;
2850
2851 /*
2852 * 1st step is to iterate through the existing UUID tree and
2853 * to delete all entries that contain outdated data.
2854 * 2nd step is to add all missing entries to the UUID tree.
2855 */
2856 ret = btrfs_uuid_tree_iterate(fs_info);
2857 if (ret < 0) {
2858 if (ret != -EINTR)
2859 btrfs_warn(fs_info, "iterating uuid_tree failed %d",
2860 ret);
2861 up(sem: &fs_info->uuid_tree_rescan_sem);
2862 return ret;
2863 }
2864 return btrfs_uuid_scan_kthread(data);
2865}
2866
2867static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
2868{
2869 struct task_struct *task;
2870
2871 down(sem: &fs_info->uuid_tree_rescan_sem);
2872 task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
2873 if (IS_ERR(ptr: task)) {
2874 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
2875 btrfs_warn(fs_info, "failed to start uuid_rescan task");
2876 up(sem: &fs_info->uuid_tree_rescan_sem);
2877 return PTR_ERR(ptr: task);
2878 }
2879
2880 return 0;
2881}
2882
2883static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info)
2884{
2885 u64 root_objectid = 0;
2886 struct btrfs_root *gang[8];
2887 int i = 0;
2888 int err = 0;
2889 unsigned int ret = 0;
2890
2891 while (1) {
2892 spin_lock(lock: &fs_info->fs_roots_radix_lock);
2893 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
2894 results: (void **)gang, first_index: root_objectid,
2895 ARRAY_SIZE(gang));
2896 if (!ret) {
2897 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
2898 break;
2899 }
2900 root_objectid = gang[ret - 1]->root_key.objectid + 1;
2901
2902 for (i = 0; i < ret; i++) {
2903 /* Avoid to grab roots in dead_roots. */
2904 if (btrfs_root_refs(s: &gang[i]->root_item) == 0) {
2905 gang[i] = NULL;
2906 continue;
2907 }
2908 /* Grab all the search result for later use. */
2909 gang[i] = btrfs_grab_root(root: gang[i]);
2910 }
2911 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
2912
2913 for (i = 0; i < ret; i++) {
2914 if (!gang[i])
2915 continue;
2916 root_objectid = gang[i]->root_key.objectid;
2917 err = btrfs_orphan_cleanup(root: gang[i]);
2918 if (err)
2919 goto out;
2920 btrfs_put_root(root: gang[i]);
2921 }
2922 root_objectid++;
2923 }
2924out:
2925 /* Release the uncleaned roots due to error. */
2926 for (; i < ret; i++) {
2927 if (gang[i])
2928 btrfs_put_root(root: gang[i]);
2929 }
2930 return err;
2931}
2932
2933/*
2934 * Some options only have meaning at mount time and shouldn't persist across
2935 * remounts, or be displayed. Clear these at the end of mount and remount
2936 * code paths.
2937 */
2938void btrfs_clear_oneshot_options(struct btrfs_fs_info *fs_info)
2939{
2940 btrfs_clear_opt(fs_info->mount_opt, USEBACKUPROOT);
2941 btrfs_clear_opt(fs_info->mount_opt, CLEAR_CACHE);
2942}
2943
2944/*
2945 * Mounting logic specific to read-write file systems. Shared by open_ctree
2946 * and btrfs_remount when remounting from read-only to read-write.
2947 */
2948int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
2949{
2950 int ret;
2951 const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE);
2952 bool rebuild_free_space_tree = false;
2953
2954 if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
2955 btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
2956 rebuild_free_space_tree = true;
2957 } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2958 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) {
2959 btrfs_warn(fs_info, "free space tree is invalid");
2960 rebuild_free_space_tree = true;
2961 }
2962
2963 if (rebuild_free_space_tree) {
2964 btrfs_info(fs_info, "rebuilding free space tree");
2965 ret = btrfs_rebuild_free_space_tree(fs_info);
2966 if (ret) {
2967 btrfs_warn(fs_info,
2968 "failed to rebuild free space tree: %d", ret);
2969 goto out;
2970 }
2971 }
2972
2973 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
2974 !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) {
2975 btrfs_info(fs_info, "disabling free space tree");
2976 ret = btrfs_delete_free_space_tree(fs_info);
2977 if (ret) {
2978 btrfs_warn(fs_info,
2979 "failed to disable free space tree: %d", ret);
2980 goto out;
2981 }
2982 }
2983
2984 /*
2985 * btrfs_find_orphan_roots() is responsible for finding all the dead
2986 * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
2987 * them into the fs_info->fs_roots_radix tree. This must be done before
2988 * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
2989 * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
2990 * item before the root's tree is deleted - this means that if we unmount
2991 * or crash before the deletion completes, on the next mount we will not
2992 * delete what remains of the tree because the orphan item does not
2993 * exists anymore, which is what tells us we have a pending deletion.
2994 */
2995 ret = btrfs_find_orphan_roots(fs_info);
2996 if (ret)
2997 goto out;
2998
2999 ret = btrfs_cleanup_fs_roots(fs_info);
3000 if (ret)
3001 goto out;
3002
3003 down_read(sem: &fs_info->cleanup_work_sem);
3004 if ((ret = btrfs_orphan_cleanup(root: fs_info->fs_root)) ||
3005 (ret = btrfs_orphan_cleanup(root: fs_info->tree_root))) {
3006 up_read(sem: &fs_info->cleanup_work_sem);
3007 goto out;
3008 }
3009 up_read(sem: &fs_info->cleanup_work_sem);
3010
3011 mutex_lock(&fs_info->cleaner_mutex);
3012 ret = btrfs_recover_relocation(fs_info);
3013 mutex_unlock(lock: &fs_info->cleaner_mutex);
3014 if (ret < 0) {
3015 btrfs_warn(fs_info, "failed to recover relocation: %d", ret);
3016 goto out;
3017 }
3018
3019 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) &&
3020 !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
3021 btrfs_info(fs_info, "creating free space tree");
3022 ret = btrfs_create_free_space_tree(fs_info);
3023 if (ret) {
3024 btrfs_warn(fs_info,
3025 "failed to create free space tree: %d", ret);
3026 goto out;
3027 }
3028 }
3029
3030 if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) {
3031 ret = btrfs_set_free_space_cache_v1_active(fs_info, active: cache_opt);
3032 if (ret)
3033 goto out;
3034 }
3035
3036 ret = btrfs_resume_balance_async(fs_info);
3037 if (ret)
3038 goto out;
3039
3040 ret = btrfs_resume_dev_replace_async(fs_info);
3041 if (ret) {
3042 btrfs_warn(fs_info, "failed to resume dev_replace");
3043 goto out;
3044 }
3045
3046 btrfs_qgroup_rescan_resume(fs_info);
3047
3048 if (!fs_info->uuid_root) {
3049 btrfs_info(fs_info, "creating UUID tree");
3050 ret = btrfs_create_uuid_tree(fs_info);
3051 if (ret) {
3052 btrfs_warn(fs_info,
3053 "failed to create the UUID tree %d", ret);
3054 goto out;
3055 }
3056 }
3057
3058out:
3059 return ret;
3060}
3061
3062/*
3063 * Do various sanity and dependency checks of different features.
3064 *
3065 * @is_rw_mount: If the mount is read-write.
3066 *
3067 * This is the place for less strict checks (like for subpage or artificial
3068 * feature dependencies).
3069 *
3070 * For strict checks or possible corruption detection, see
3071 * btrfs_validate_super().
3072 *
3073 * This should be called after btrfs_parse_options(), as some mount options
3074 * (space cache related) can modify on-disk format like free space tree and
3075 * screw up certain feature dependencies.
3076 */
3077int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
3078{
3079 struct btrfs_super_block *disk_super = fs_info->super_copy;
3080 u64 incompat = btrfs_super_incompat_flags(s: disk_super);
3081 const u64 compat_ro = btrfs_super_compat_ro_flags(s: disk_super);
3082 const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP);
3083
3084 if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) {
3085 btrfs_err(fs_info,
3086 "cannot mount because of unknown incompat features (0x%llx)",
3087 incompat);
3088 return -EINVAL;
3089 }
3090
3091 /* Runtime limitation for mixed block groups. */
3092 if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
3093 (fs_info->sectorsize != fs_info->nodesize)) {
3094 btrfs_err(fs_info,
3095"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups",
3096 fs_info->nodesize, fs_info->sectorsize);
3097 return -EINVAL;
3098 }
3099
3100 /* Mixed backref is an always-enabled feature. */
3101 incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
3102
3103 /* Set compression related flags just in case. */
3104 if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
3105 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
3106 else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
3107 incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
3108
3109 /*
3110 * An ancient flag, which should really be marked deprecated.
3111 * Such runtime limitation doesn't really need a incompat flag.
3112 */
3113 if (btrfs_super_nodesize(s: disk_super) > PAGE_SIZE)
3114 incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
3115
3116 if (compat_ro_unsupp && is_rw_mount) {
3117 btrfs_err(fs_info,
3118 "cannot mount read-write because of unknown compat_ro features (0x%llx)",
3119 compat_ro);
3120 return -EINVAL;
3121 }
3122
3123 /*
3124 * We have unsupported RO compat features, although RO mounted, we
3125 * should not cause any metadata writes, including log replay.
3126 * Or we could screw up whatever the new feature requires.
3127 */
3128 if (compat_ro_unsupp && btrfs_super_log_root(s: disk_super) &&
3129 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3130 btrfs_err(fs_info,
3131"cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay",
3132 compat_ro);
3133 return -EINVAL;
3134 }
3135
3136 /*
3137 * Artificial limitations for block group tree, to force
3138 * block-group-tree to rely on no-holes and free-space-tree.
3139 */
3140 if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) &&
3141 (!btrfs_fs_incompat(fs_info, NO_HOLES) ||
3142 !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) {
3143 btrfs_err(fs_info,
3144"block-group-tree feature requires no-holes and free-space-tree features");
3145 return -EINVAL;
3146 }
3147
3148 /*
3149 * Subpage runtime limitation on v1 cache.
3150 *
3151 * V1 space cache still has some hard codeed PAGE_SIZE usage, while
3152 * we're already defaulting to v2 cache, no need to bother v1 as it's
3153 * going to be deprecated anyway.
3154 */
3155 if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) {
3156 btrfs_warn(fs_info,
3157 "v1 space cache is not supported for page size %lu with sectorsize %u",
3158 PAGE_SIZE, fs_info->sectorsize);
3159 return -EINVAL;
3160 }
3161
3162 /* This can be called by remount, we need to protect the super block. */
3163 spin_lock(lock: &fs_info->super_lock);
3164 btrfs_set_super_incompat_flags(s: disk_super, val: incompat);
3165 spin_unlock(lock: &fs_info->super_lock);
3166
3167 return 0;
3168}
3169
3170int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
3171 char *options)
3172{
3173 u32 sectorsize;
3174 u32 nodesize;
3175 u32 stripesize;
3176 u64 generation;
3177 u16 csum_type;
3178 struct btrfs_super_block *disk_super;
3179 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3180 struct btrfs_root *tree_root;
3181 struct btrfs_root *chunk_root;
3182 int ret;
3183 int level;
3184
3185 ret = init_mount_fs_info(fs_info, sb);
3186 if (ret)
3187 goto fail;
3188
3189 /* These need to be init'ed before we start creating inodes and such. */
3190 tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
3191 GFP_KERNEL);
3192 fs_info->tree_root = tree_root;
3193 chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
3194 GFP_KERNEL);
3195 fs_info->chunk_root = chunk_root;
3196 if (!tree_root || !chunk_root) {
3197 ret = -ENOMEM;
3198 goto fail;
3199 }
3200
3201 ret = btrfs_init_btree_inode(sb);
3202 if (ret)
3203 goto fail;
3204
3205 invalidate_bdev(bdev: fs_devices->latest_dev->bdev);
3206
3207 /*
3208 * Read super block and check the signature bytes only
3209 */
3210 disk_super = btrfs_read_dev_super(bdev: fs_devices->latest_dev->bdev);
3211 if (IS_ERR(ptr: disk_super)) {
3212 ret = PTR_ERR(ptr: disk_super);
3213 goto fail_alloc;
3214 }
3215
3216 /*
3217 * Verify the type first, if that or the checksum value are
3218 * corrupted, we'll find out
3219 */
3220 csum_type = btrfs_super_csum_type(s: disk_super);
3221 if (!btrfs_supported_super_csum(csum_type)) {
3222 btrfs_err(fs_info, "unsupported checksum algorithm: %u",
3223 csum_type);
3224 ret = -EINVAL;
3225 btrfs_release_disk_super(super: disk_super);
3226 goto fail_alloc;
3227 }
3228
3229 fs_info->csum_size = btrfs_super_csum_size(s: disk_super);
3230
3231 ret = btrfs_init_csum_hash(fs_info, csum_type);
3232 if (ret) {
3233 btrfs_release_disk_super(super: disk_super);
3234 goto fail_alloc;
3235 }
3236
3237 /*
3238 * We want to check superblock checksum, the type is stored inside.
3239 * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k).
3240 */
3241 if (btrfs_check_super_csum(fs_info, disk_sb: disk_super)) {
3242 btrfs_err(fs_info, "superblock checksum mismatch");
3243 ret = -EINVAL;
3244 btrfs_release_disk_super(super: disk_super);
3245 goto fail_alloc;
3246 }
3247
3248 /*
3249 * super_copy is zeroed at allocation time and we never touch the
3250 * following bytes up to INFO_SIZE, the checksum is calculated from
3251 * the whole block of INFO_SIZE
3252 */
3253 memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy));
3254 btrfs_release_disk_super(super: disk_super);
3255
3256 disk_super = fs_info->super_copy;
3257
3258 memcpy(fs_info->super_for_commit, fs_info->super_copy,
3259 sizeof(*fs_info->super_for_commit));
3260
3261 ret = btrfs_validate_mount_super(fs_info);
3262 if (ret) {
3263 btrfs_err(fs_info, "superblock contains fatal errors");
3264 ret = -EINVAL;
3265 goto fail_alloc;
3266 }
3267
3268 if (!btrfs_super_root(s: disk_super)) {
3269 btrfs_err(fs_info, "invalid superblock tree root bytenr");
3270 ret = -EINVAL;
3271 goto fail_alloc;
3272 }
3273
3274 /* check FS state, whether FS is broken. */
3275 if (btrfs_super_flags(s: disk_super) & BTRFS_SUPER_FLAG_ERROR)
3276 WRITE_ONCE(fs_info->fs_error, -EUCLEAN);
3277
3278 /*
3279 * In the long term, we'll store the compression type in the super
3280 * block, and it'll be used for per file compression control.
3281 */
3282 fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
3283
3284
3285 /* Set up fs_info before parsing mount options */
3286 nodesize = btrfs_super_nodesize(s: disk_super);
3287 sectorsize = btrfs_super_sectorsize(s: disk_super);
3288 stripesize = sectorsize;
3289 fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
3290 fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
3291
3292 fs_info->nodesize = nodesize;
3293 fs_info->sectorsize = sectorsize;
3294 fs_info->sectorsize_bits = ilog2(sectorsize);
3295 fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(info: fs_info) / fs_info->csum_size;
3296 fs_info->stripesize = stripesize;
3297
3298 ret = btrfs_parse_options(info: fs_info, options, new_flags: sb->s_flags);
3299 if (ret)
3300 goto fail_alloc;
3301
3302 ret = btrfs_check_features(fs_info, is_rw_mount: !sb_rdonly(sb));
3303 if (ret < 0)
3304 goto fail_alloc;
3305
3306 if (sectorsize < PAGE_SIZE) {
3307 struct btrfs_subpage_info *subpage_info;
3308
3309 /*
3310 * V1 space cache has some hardcoded PAGE_SIZE usage, and is
3311 * going to be deprecated.
3312 *
3313 * Force to use v2 cache for subpage case.
3314 */
3315 btrfs_clear_opt(fs_info->mount_opt, SPACE_CACHE);
3316 btrfs_set_and_info(fs_info, FREE_SPACE_TREE,
3317 "forcing free space tree for sector size %u with page size %lu",
3318 sectorsize, PAGE_SIZE);
3319
3320 btrfs_warn(fs_info,
3321 "read-write for sector size %u with page size %lu is experimental",
3322 sectorsize, PAGE_SIZE);
3323 subpage_info = kzalloc(size: sizeof(*subpage_info), GFP_KERNEL);
3324 if (!subpage_info) {
3325 ret = -ENOMEM;
3326 goto fail_alloc;
3327 }
3328 btrfs_init_subpage_info(subpage_info, sectorsize);
3329 fs_info->subpage_info = subpage_info;
3330 }
3331
3332 ret = btrfs_init_workqueues(fs_info);
3333 if (ret)
3334 goto fail_sb_buffer;
3335
3336 sb->s_bdi->ra_pages *= btrfs_super_num_devices(s: disk_super);
3337 sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
3338
3339 sb->s_blocksize = sectorsize;
3340 sb->s_blocksize_bits = blksize_bits(size: sectorsize);
3341 memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE);
3342
3343 mutex_lock(&fs_info->chunk_mutex);
3344 ret = btrfs_read_sys_array(fs_info);
3345 mutex_unlock(lock: &fs_info->chunk_mutex);
3346 if (ret) {
3347 btrfs_err(fs_info, "failed to read the system array: %d", ret);
3348 goto fail_sb_buffer;
3349 }
3350
3351 generation = btrfs_super_chunk_root_generation(s: disk_super);
3352 level = btrfs_super_chunk_root_level(s: disk_super);
3353 ret = load_super_root(root: chunk_root, bytenr: btrfs_super_chunk_root(s: disk_super),
3354 gen: generation, level);
3355 if (ret) {
3356 btrfs_err(fs_info, "failed to read chunk root");
3357 goto fail_tree_roots;
3358 }
3359
3360 read_extent_buffer(eb: chunk_root->node, dst: fs_info->chunk_tree_uuid,
3361 offsetof(struct btrfs_header, chunk_tree_uuid),
3362 BTRFS_UUID_SIZE);
3363
3364 ret = btrfs_read_chunk_tree(fs_info);
3365 if (ret) {
3366 btrfs_err(fs_info, "failed to read chunk tree: %d", ret);
3367 goto fail_tree_roots;
3368 }
3369
3370 /*
3371 * At this point we know all the devices that make this filesystem,
3372 * including the seed devices but we don't know yet if the replace
3373 * target is required. So free devices that are not part of this
3374 * filesystem but skip the replace target device which is checked
3375 * below in btrfs_init_dev_replace().
3376 */
3377 btrfs_free_extra_devids(fs_devices);
3378 if (!fs_devices->latest_dev->bdev) {
3379 btrfs_err(fs_info, "failed to read devices");
3380 ret = -EIO;
3381 goto fail_tree_roots;
3382 }
3383
3384 ret = init_tree_roots(fs_info);
3385 if (ret)
3386 goto fail_tree_roots;
3387
3388 /*
3389 * Get zone type information of zoned block devices. This will also
3390 * handle emulation of a zoned filesystem if a regular device has the
3391 * zoned incompat feature flag set.
3392 */
3393 ret = btrfs_get_dev_zone_info_all_devices(fs_info);
3394 if (ret) {
3395 btrfs_err(fs_info,
3396 "zoned: failed to read device zone info: %d", ret);
3397 goto fail_block_groups;
3398 }
3399
3400 /*
3401 * If we have a uuid root and we're not being told to rescan we need to
3402 * check the generation here so we can set the
3403 * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the
3404 * transaction during a balance or the log replay without updating the
3405 * uuid generation, and then if we crash we would rescan the uuid tree,
3406 * even though it was perfectly fine.
3407 */
3408 if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) &&
3409 fs_info->generation == btrfs_super_uuid_tree_generation(s: disk_super))
3410 set_bit(nr: BTRFS_FS_UPDATE_UUID_TREE_GEN, addr: &fs_info->flags);
3411
3412 ret = btrfs_verify_dev_extents(fs_info);
3413 if (ret) {
3414 btrfs_err(fs_info,
3415 "failed to verify dev extents against chunks: %d",
3416 ret);
3417 goto fail_block_groups;
3418 }
3419 ret = btrfs_recover_balance(fs_info);
3420 if (ret) {
3421 btrfs_err(fs_info, "failed to recover balance: %d", ret);
3422 goto fail_block_groups;
3423 }
3424
3425 ret = btrfs_init_dev_stats(fs_info);
3426 if (ret) {
3427 btrfs_err(fs_info, "failed to init dev_stats: %d", ret);
3428 goto fail_block_groups;
3429 }
3430
3431 ret = btrfs_init_dev_replace(fs_info);
3432 if (ret) {
3433 btrfs_err(fs_info, "failed to init dev_replace: %d", ret);
3434 goto fail_block_groups;
3435 }
3436
3437 ret = btrfs_check_zoned_mode(fs_info);
3438 if (ret) {
3439 btrfs_err(fs_info, "failed to initialize zoned mode: %d",
3440 ret);
3441 goto fail_block_groups;
3442 }
3443
3444 ret = btrfs_sysfs_add_fsid(fs_devs: fs_devices);
3445 if (ret) {
3446 btrfs_err(fs_info, "failed to init sysfs fsid interface: %d",
3447 ret);
3448 goto fail_block_groups;
3449 }
3450
3451 ret = btrfs_sysfs_add_mounted(fs_info);
3452 if (ret) {
3453 btrfs_err(fs_info, "failed to init sysfs interface: %d", ret);
3454 goto fail_fsdev_sysfs;
3455 }
3456
3457 ret = btrfs_init_space_info(fs_info);
3458 if (ret) {
3459 btrfs_err(fs_info, "failed to initialize space info: %d", ret);
3460 goto fail_sysfs;
3461 }
3462
3463 ret = btrfs_read_block_groups(info: fs_info);
3464 if (ret) {
3465 btrfs_err(fs_info, "failed to read block groups: %d", ret);
3466 goto fail_sysfs;
3467 }
3468
3469 btrfs_free_zone_cache(fs_info);
3470
3471 btrfs_check_active_zone_reservation(fs_info);
3472
3473 if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices &&
3474 !btrfs_check_rw_degradable(fs_info, NULL)) {
3475 btrfs_warn(fs_info,
3476 "writable mount is not allowed due to too many missing devices");
3477 ret = -EINVAL;
3478 goto fail_sysfs;
3479 }
3480
3481 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info,
3482 "btrfs-cleaner");
3483 if (IS_ERR(ptr: fs_info->cleaner_kthread)) {
3484 ret = PTR_ERR(ptr: fs_info->cleaner_kthread);
3485 goto fail_sysfs;
3486 }
3487
3488 fs_info->transaction_kthread = kthread_run(transaction_kthread,
3489 tree_root,
3490 "btrfs-transaction");
3491 if (IS_ERR(ptr: fs_info->transaction_kthread)) {
3492 ret = PTR_ERR(ptr: fs_info->transaction_kthread);
3493 goto fail_cleaner;
3494 }
3495
3496 if (!btrfs_test_opt(fs_info, NOSSD) &&
3497 !fs_info->fs_devices->rotating) {
3498 btrfs_set_and_info(fs_info, SSD, "enabling ssd optimizations");
3499 }
3500
3501 /*
3502 * For devices supporting discard turn on discard=async automatically,
3503 * unless it's already set or disabled. This could be turned off by
3504 * nodiscard for the same mount.
3505 *
3506 * The zoned mode piggy backs on the discard functionality for
3507 * resetting a zone. There is no reason to delay the zone reset as it is
3508 * fast enough. So, do not enable async discard for zoned mode.
3509 */
3510 if (!(btrfs_test_opt(fs_info, DISCARD_SYNC) ||
3511 btrfs_test_opt(fs_info, DISCARD_ASYNC) ||
3512 btrfs_test_opt(fs_info, NODISCARD)) &&
3513 fs_info->fs_devices->discardable &&
3514 !btrfs_is_zoned(fs_info)) {
3515 btrfs_set_and_info(fs_info, DISCARD_ASYNC,
3516 "auto enabling async discard");
3517 }
3518
3519 ret = btrfs_read_qgroup_config(fs_info);
3520 if (ret)
3521 goto fail_trans_kthread;
3522
3523 if (btrfs_build_ref_tree(fs_info))
3524 btrfs_err(fs_info, "couldn't build ref tree");
3525
3526 /* do not make disk changes in broken FS or nologreplay is given */
3527 if (btrfs_super_log_root(s: disk_super) != 0 &&
3528 !btrfs_test_opt(fs_info, NOLOGREPLAY)) {
3529 btrfs_info(fs_info, "start tree-log replay");
3530 ret = btrfs_replay_log(fs_info, fs_devices);
3531 if (ret)
3532 goto fail_qgroup;
3533 }
3534
3535 fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, check_ref: true);
3536 if (IS_ERR(ptr: fs_info->fs_root)) {
3537 ret = PTR_ERR(ptr: fs_info->fs_root);
3538 btrfs_warn(fs_info, "failed to read fs tree: %d", ret);
3539 fs_info->fs_root = NULL;
3540 goto fail_qgroup;
3541 }
3542
3543 if (sb_rdonly(sb))
3544 goto clear_oneshot;
3545
3546 ret = btrfs_start_pre_rw_mount(fs_info);
3547 if (ret) {
3548 close_ctree(fs_info);
3549 return ret;
3550 }
3551 btrfs_discard_resume(fs_info);
3552
3553 if (fs_info->uuid_root &&
3554 (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) ||
3555 fs_info->generation != btrfs_super_uuid_tree_generation(s: disk_super))) {
3556 btrfs_info(fs_info, "checking UUID tree");
3557 ret = btrfs_check_uuid_tree(fs_info);
3558 if (ret) {
3559 btrfs_warn(fs_info,
3560 "failed to check the UUID tree: %d", ret);
3561 close_ctree(fs_info);
3562 return ret;
3563 }
3564 }
3565
3566 set_bit(nr: BTRFS_FS_OPEN, addr: &fs_info->flags);
3567
3568 /* Kick the cleaner thread so it'll start deleting snapshots. */
3569 if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags))
3570 wake_up_process(tsk: fs_info->cleaner_kthread);
3571
3572clear_oneshot:
3573 btrfs_clear_oneshot_options(fs_info);
3574 return 0;
3575
3576fail_qgroup:
3577 btrfs_free_qgroup_config(fs_info);
3578fail_trans_kthread:
3579 kthread_stop(k: fs_info->transaction_kthread);
3580 btrfs_cleanup_transaction(fs_info);
3581 btrfs_free_fs_roots(fs_info);
3582fail_cleaner:
3583 kthread_stop(k: fs_info->cleaner_kthread);
3584
3585 /*
3586 * make sure we're done with the btree inode before we stop our
3587 * kthreads
3588 */
3589 filemap_write_and_wait(mapping: fs_info->btree_inode->i_mapping);
3590
3591fail_sysfs:
3592 btrfs_sysfs_remove_mounted(fs_info);
3593
3594fail_fsdev_sysfs:
3595 btrfs_sysfs_remove_fsid(fs_devs: fs_info->fs_devices);
3596
3597fail_block_groups:
3598 btrfs_put_block_group_cache(info: fs_info);
3599
3600fail_tree_roots:
3601 if (fs_info->data_reloc_root)
3602 btrfs_drop_and_free_fs_root(fs_info, root: fs_info->data_reloc_root);
3603 free_root_pointers(info: fs_info, free_chunk_root: true);
3604 invalidate_inode_pages2(mapping: fs_info->btree_inode->i_mapping);
3605
3606fail_sb_buffer:
3607 btrfs_stop_all_workers(fs_info);
3608 btrfs_free_block_groups(info: fs_info);
3609fail_alloc:
3610 btrfs_mapping_tree_free(tree: &fs_info->mapping_tree);
3611
3612 iput(fs_info->btree_inode);
3613fail:
3614 btrfs_close_devices(fs_devices: fs_info->fs_devices);
3615 ASSERT(ret < 0);
3616 return ret;
3617}
3618ALLOW_ERROR_INJECTION(open_ctree, ERRNO);
3619
3620static void btrfs_end_super_write(struct bio *bio)
3621{
3622 struct btrfs_device *device = bio->bi_private;
3623 struct bio_vec *bvec;
3624 struct bvec_iter_all iter_all;
3625 struct page *page;
3626
3627 bio_for_each_segment_all(bvec, bio, iter_all) {
3628 page = bvec->bv_page;
3629
3630 if (bio->bi_status) {
3631 btrfs_warn_rl_in_rcu(device->fs_info,
3632 "lost page write due to IO error on %s (%d)",
3633 btrfs_dev_name(device),
3634 blk_status_to_errno(bio->bi_status));
3635 ClearPageUptodate(page);
3636 SetPageError(page);
3637 btrfs_dev_stat_inc_and_print(dev: device,
3638 index: BTRFS_DEV_STAT_WRITE_ERRS);
3639 } else {
3640 SetPageUptodate(page);
3641 }
3642
3643 put_page(page);
3644 unlock_page(page);
3645 }
3646
3647 bio_put(bio);
3648}
3649
3650struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
3651 int copy_num, bool drop_cache)
3652{
3653 struct btrfs_super_block *super;
3654 struct page *page;
3655 u64 bytenr, bytenr_orig;
3656 struct address_space *mapping = bdev->bd_inode->i_mapping;
3657 int ret;
3658
3659 bytenr_orig = btrfs_sb_offset(mirror: copy_num);
3660 ret = btrfs_sb_log_location_bdev(bdev, mirror: copy_num, READ, bytenr_ret: &bytenr);
3661 if (ret == -ENOENT)
3662 return ERR_PTR(error: -EINVAL);
3663 else if (ret)
3664 return ERR_PTR(error: ret);
3665
3666 if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev))
3667 return ERR_PTR(error: -EINVAL);
3668
3669 if (drop_cache) {
3670 /* This should only be called with the primary sb. */
3671 ASSERT(copy_num == 0);
3672
3673 /*
3674 * Drop the page of the primary superblock, so later read will
3675 * always read from the device.
3676 */
3677 invalidate_inode_pages2_range(mapping,
3678 start: bytenr >> PAGE_SHIFT,
3679 end: (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
3680 }
3681
3682 page = read_cache_page_gfp(mapping, index: bytenr >> PAGE_SHIFT, GFP_NOFS);
3683 if (IS_ERR(ptr: page))
3684 return ERR_CAST(ptr: page);
3685
3686 super = page_address(page);
3687 if (btrfs_super_magic(s: super) != BTRFS_MAGIC) {
3688 btrfs_release_disk_super(super);
3689 return ERR_PTR(error: -ENODATA);
3690 }
3691
3692 if (btrfs_super_bytenr(s: super) != bytenr_orig) {
3693 btrfs_release_disk_super(super);
3694 return ERR_PTR(error: -EINVAL);
3695 }
3696
3697 return super;
3698}
3699
3700
3701struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev)
3702{
3703 struct btrfs_super_block *super, *latest = NULL;
3704 int i;
3705 u64 transid = 0;
3706
3707 /* we would like to check all the supers, but that would make
3708 * a btrfs mount succeed after a mkfs from a different FS.
3709 * So, we need to add a special mount option to scan for
3710 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
3711 */
3712 for (i = 0; i < 1; i++) {
3713 super = btrfs_read_dev_one_super(bdev, copy_num: i, drop_cache: false);
3714 if (IS_ERR(ptr: super))
3715 continue;
3716
3717 if (!latest || btrfs_super_generation(s: super) > transid) {
3718 if (latest)
3719 btrfs_release_disk_super(super);
3720
3721 latest = super;
3722 transid = btrfs_super_generation(s: super);
3723 }
3724 }
3725
3726 return super;
3727}
3728
3729/*
3730 * Write superblock @sb to the @device. Do not wait for completion, all the
3731 * pages we use for writing are locked.
3732 *
3733 * Write @max_mirrors copies of the superblock, where 0 means default that fit
3734 * the expected device size at commit time. Note that max_mirrors must be
3735 * same for write and wait phases.
3736 *
3737 * Return number of errors when page is not found or submission fails.
3738 */
3739static int write_dev_supers(struct btrfs_device *device,
3740 struct btrfs_super_block *sb, int max_mirrors)
3741{
3742 struct btrfs_fs_info *fs_info = device->fs_info;
3743 struct address_space *mapping = device->bdev->bd_inode->i_mapping;
3744 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
3745 int i;
3746 int errors = 0;
3747 int ret;
3748 u64 bytenr, bytenr_orig;
3749
3750 if (max_mirrors == 0)
3751 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3752
3753 shash->tfm = fs_info->csum_shash;
3754
3755 for (i = 0; i < max_mirrors; i++) {
3756 struct page *page;
3757 struct bio *bio;
3758 struct btrfs_super_block *disk_super;
3759
3760 bytenr_orig = btrfs_sb_offset(mirror: i);
3761 ret = btrfs_sb_log_location(device, mirror: i, WRITE, bytenr_ret: &bytenr);
3762 if (ret == -ENOENT) {
3763 continue;
3764 } else if (ret < 0) {
3765 btrfs_err(device->fs_info,
3766 "couldn't get super block location for mirror %d",
3767 i);
3768 errors++;
3769 continue;
3770 }
3771 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3772 device->commit_total_bytes)
3773 break;
3774
3775 btrfs_set_super_bytenr(s: sb, val: bytenr_orig);
3776
3777 crypto_shash_digest(desc: shash, data: (const char *)sb + BTRFS_CSUM_SIZE,
3778 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE,
3779 out: sb->csum);
3780
3781 page = find_or_create_page(mapping, index: bytenr >> PAGE_SHIFT,
3782 GFP_NOFS);
3783 if (!page) {
3784 btrfs_err(device->fs_info,
3785 "couldn't get super block page for bytenr %llu",
3786 bytenr);
3787 errors++;
3788 continue;
3789 }
3790
3791 /* Bump the refcount for wait_dev_supers() */
3792 get_page(page);
3793
3794 disk_super = page_address(page);
3795 memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE);
3796
3797 /*
3798 * Directly use bios here instead of relying on the page cache
3799 * to do I/O, so we don't lose the ability to do integrity
3800 * checking.
3801 */
3802 bio = bio_alloc(bdev: device->bdev, nr_vecs: 1,
3803 opf: REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO,
3804 GFP_NOFS);
3805 bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT;
3806 bio->bi_private = device;
3807 bio->bi_end_io = btrfs_end_super_write;
3808 __bio_add_page(bio, page, BTRFS_SUPER_INFO_SIZE,
3809 offset_in_page(bytenr));
3810
3811 /*
3812 * We FUA only the first super block. The others we allow to
3813 * go down lazy and there's a short window where the on-disk
3814 * copies might still contain the older version.
3815 */
3816 if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER))
3817 bio->bi_opf |= REQ_FUA;
3818 submit_bio(bio);
3819
3820 if (btrfs_advance_sb_log(device, mirror: i))
3821 errors++;
3822 }
3823 return errors < i ? 0 : -1;
3824}
3825
3826/*
3827 * Wait for write completion of superblocks done by write_dev_supers,
3828 * @max_mirrors same for write and wait phases.
3829 *
3830 * Return number of errors when page is not found or not marked up to
3831 * date.
3832 */
3833static int wait_dev_supers(struct btrfs_device *device, int max_mirrors)
3834{
3835 int i;
3836 int errors = 0;
3837 bool primary_failed = false;
3838 int ret;
3839 u64 bytenr;
3840
3841 if (max_mirrors == 0)
3842 max_mirrors = BTRFS_SUPER_MIRROR_MAX;
3843
3844 for (i = 0; i < max_mirrors; i++) {
3845 struct page *page;
3846
3847 ret = btrfs_sb_log_location(device, mirror: i, READ, bytenr_ret: &bytenr);
3848 if (ret == -ENOENT) {
3849 break;
3850 } else if (ret < 0) {
3851 errors++;
3852 if (i == 0)
3853 primary_failed = true;
3854 continue;
3855 }
3856 if (bytenr + BTRFS_SUPER_INFO_SIZE >=
3857 device->commit_total_bytes)
3858 break;
3859
3860 page = find_get_page(mapping: device->bdev->bd_inode->i_mapping,
3861 offset: bytenr >> PAGE_SHIFT);
3862 if (!page) {
3863 errors++;
3864 if (i == 0)
3865 primary_failed = true;
3866 continue;
3867 }
3868 /* Page is submitted locked and unlocked once the IO completes */
3869 wait_on_page_locked(page);
3870 if (PageError(page)) {
3871 errors++;
3872 if (i == 0)
3873 primary_failed = true;
3874 }
3875
3876 /* Drop our reference */
3877 put_page(page);
3878
3879 /* Drop the reference from the writing run */
3880 put_page(page);
3881 }
3882
3883 /* log error, force error return */
3884 if (primary_failed) {
3885 btrfs_err(device->fs_info, "error writing primary super block to device %llu",
3886 device->devid);
3887 return -1;
3888 }
3889
3890 return errors < i ? 0 : -1;
3891}
3892
3893/*
3894 * endio for the write_dev_flush, this will wake anyone waiting
3895 * for the barrier when it is done
3896 */
3897static void btrfs_end_empty_barrier(struct bio *bio)
3898{
3899 bio_uninit(bio);
3900 complete(bio->bi_private);
3901}
3902
3903/*
3904 * Submit a flush request to the device if it supports it. Error handling is
3905 * done in the waiting counterpart.
3906 */
3907static void write_dev_flush(struct btrfs_device *device)
3908{
3909 struct bio *bio = &device->flush_bio;
3910
3911 device->last_flush_error = BLK_STS_OK;
3912
3913 bio_init(bio, bdev: device->bdev, NULL, max_vecs: 0,
3914 opf: REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH);
3915 bio->bi_end_io = btrfs_end_empty_barrier;
3916 init_completion(x: &device->flush_wait);
3917 bio->bi_private = &device->flush_wait;
3918 submit_bio(bio);
3919 set_bit(BTRFS_DEV_STATE_FLUSH_SENT, addr: &device->dev_state);
3920}
3921
3922/*
3923 * If the flush bio has been submitted by write_dev_flush, wait for it.
3924 * Return true for any error, and false otherwise.
3925 */
3926static bool wait_dev_flush(struct btrfs_device *device)
3927{
3928 struct bio *bio = &device->flush_bio;
3929
3930 if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, addr: &device->dev_state))
3931 return false;
3932
3933 wait_for_completion_io(&device->flush_wait);
3934
3935 if (bio->bi_status) {
3936 device->last_flush_error = bio->bi_status;
3937 btrfs_dev_stat_inc_and_print(dev: device, index: BTRFS_DEV_STAT_FLUSH_ERRS);
3938 return true;
3939 }
3940
3941 return false;
3942}
3943
3944/*
3945 * send an empty flush down to each device in parallel,
3946 * then wait for them
3947 */
3948static int barrier_all_devices(struct btrfs_fs_info *info)
3949{
3950 struct list_head *head;
3951 struct btrfs_device *dev;
3952 int errors_wait = 0;
3953
3954 lockdep_assert_held(&info->fs_devices->device_list_mutex);
3955 /* send down all the barriers */
3956 head = &info->fs_devices->devices;
3957 list_for_each_entry(dev, head, dev_list) {
3958 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3959 continue;
3960 if (!dev->bdev)
3961 continue;
3962 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3963 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3964 continue;
3965
3966 write_dev_flush(device: dev);
3967 }
3968
3969 /* wait for all the barriers */
3970 list_for_each_entry(dev, head, dev_list) {
3971 if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
3972 continue;
3973 if (!dev->bdev) {
3974 errors_wait++;
3975 continue;
3976 }
3977 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
3978 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
3979 continue;
3980
3981 if (wait_dev_flush(device: dev))
3982 errors_wait++;
3983 }
3984
3985 /*
3986 * Checks last_flush_error of disks in order to determine the device
3987 * state.
3988 */
3989 if (errors_wait && !btrfs_check_rw_degradable(fs_info: info, NULL))
3990 return -EIO;
3991
3992 return 0;
3993}
3994
3995int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags)
3996{
3997 int raid_type;
3998 int min_tolerated = INT_MAX;
3999
4000 if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 ||
4001 (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE))
4002 min_tolerated = min_t(int, min_tolerated,
4003 btrfs_raid_array[BTRFS_RAID_SINGLE].
4004 tolerated_failures);
4005
4006 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
4007 if (raid_type == BTRFS_RAID_SINGLE)
4008 continue;
4009 if (!(flags & btrfs_raid_array[raid_type].bg_flag))
4010 continue;
4011 min_tolerated = min_t(int, min_tolerated,
4012 btrfs_raid_array[raid_type].
4013 tolerated_failures);
4014 }
4015
4016 if (min_tolerated == INT_MAX) {
4017 pr_warn("BTRFS: unknown raid flag: %llu", flags);
4018 min_tolerated = 0;
4019 }
4020
4021 return min_tolerated;
4022}
4023
4024int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors)
4025{
4026 struct list_head *head;
4027 struct btrfs_device *dev;
4028 struct btrfs_super_block *sb;
4029 struct btrfs_dev_item *dev_item;
4030 int ret;
4031 int do_barriers;
4032 int max_errors;
4033 int total_errors = 0;
4034 u64 flags;
4035
4036 do_barriers = !btrfs_test_opt(fs_info, NOBARRIER);
4037
4038 /*
4039 * max_mirrors == 0 indicates we're from commit_transaction,
4040 * not from fsync where the tree roots in fs_info have not
4041 * been consistent on disk.
4042 */
4043 if (max_mirrors == 0)
4044 backup_super_roots(info: fs_info);
4045
4046 sb = fs_info->super_for_commit;
4047 dev_item = &sb->dev_item;
4048
4049 mutex_lock(&fs_info->fs_devices->device_list_mutex);
4050 head = &fs_info->fs_devices->devices;
4051 max_errors = btrfs_super_num_devices(s: fs_info->super_copy) - 1;
4052
4053 if (do_barriers) {
4054 ret = barrier_all_devices(info: fs_info);
4055 if (ret) {
4056 mutex_unlock(
4057 lock: &fs_info->fs_devices->device_list_mutex);
4058 btrfs_handle_fs_error(fs_info, ret,
4059 "errors while submitting device barriers.");
4060 return ret;
4061 }
4062 }
4063
4064 list_for_each_entry(dev, head, dev_list) {
4065 if (!dev->bdev) {
4066 total_errors++;
4067 continue;
4068 }
4069 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4070 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4071 continue;
4072
4073 btrfs_set_stack_device_generation(s: dev_item, val: 0);
4074 btrfs_set_stack_device_type(s: dev_item, val: dev->type);
4075 btrfs_set_stack_device_id(s: dev_item, val: dev->devid);
4076 btrfs_set_stack_device_total_bytes(s: dev_item,
4077 val: dev->commit_total_bytes);
4078 btrfs_set_stack_device_bytes_used(s: dev_item,
4079 val: dev->commit_bytes_used);
4080 btrfs_set_stack_device_io_align(s: dev_item, val: dev->io_align);
4081 btrfs_set_stack_device_io_width(s: dev_item, val: dev->io_width);
4082 btrfs_set_stack_device_sector_size(s: dev_item, val: dev->sector_size);
4083 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
4084 memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid,
4085 BTRFS_FSID_SIZE);
4086
4087 flags = btrfs_super_flags(s: sb);
4088 btrfs_set_super_flags(s: sb, val: flags | BTRFS_HEADER_FLAG_WRITTEN);
4089
4090 ret = btrfs_validate_write_super(fs_info, sb);
4091 if (ret < 0) {
4092 mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex);
4093 btrfs_handle_fs_error(fs_info, -EUCLEAN,
4094 "unexpected superblock corruption detected");
4095 return -EUCLEAN;
4096 }
4097
4098 ret = write_dev_supers(device: dev, sb, max_mirrors);
4099 if (ret)
4100 total_errors++;
4101 }
4102 if (total_errors > max_errors) {
4103 btrfs_err(fs_info, "%d errors while writing supers",
4104 total_errors);
4105 mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex);
4106
4107 /* FUA is masked off if unsupported and can't be the reason */
4108 btrfs_handle_fs_error(fs_info, -EIO,
4109 "%d errors while writing supers",
4110 total_errors);
4111 return -EIO;
4112 }
4113
4114 total_errors = 0;
4115 list_for_each_entry(dev, head, dev_list) {
4116 if (!dev->bdev)
4117 continue;
4118 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) ||
4119 !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))
4120 continue;
4121
4122 ret = wait_dev_supers(device: dev, max_mirrors);
4123 if (ret)
4124 total_errors++;
4125 }
4126 mutex_unlock(lock: &fs_info->fs_devices->device_list_mutex);
4127 if (total_errors > max_errors) {
4128 btrfs_handle_fs_error(fs_info, -EIO,
4129 "%d errors while writing supers",
4130 total_errors);
4131 return -EIO;
4132 }
4133 return 0;
4134}
4135
4136/* Drop a fs root from the radix tree and free it. */
4137void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
4138 struct btrfs_root *root)
4139{
4140 bool drop_ref = false;
4141
4142 spin_lock(lock: &fs_info->fs_roots_radix_lock);
4143 radix_tree_delete(&fs_info->fs_roots_radix,
4144 (unsigned long)root->root_key.objectid);
4145 if (test_and_clear_bit(nr: BTRFS_ROOT_IN_RADIX, addr: &root->state))
4146 drop_ref = true;
4147 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
4148
4149 if (BTRFS_FS_ERROR(fs_info)) {
4150 ASSERT(root->log_root == NULL);
4151 if (root->reloc_root) {
4152 btrfs_put_root(root: root->reloc_root);
4153 root->reloc_root = NULL;
4154 }
4155 }
4156
4157 if (drop_ref)
4158 btrfs_put_root(root);
4159}
4160
4161int btrfs_commit_super(struct btrfs_fs_info *fs_info)
4162{
4163 struct btrfs_root *root = fs_info->tree_root;
4164 struct btrfs_trans_handle *trans;
4165
4166 mutex_lock(&fs_info->cleaner_mutex);
4167 btrfs_run_delayed_iputs(fs_info);
4168 mutex_unlock(lock: &fs_info->cleaner_mutex);
4169 wake_up_process(tsk: fs_info->cleaner_kthread);
4170
4171 /* wait until ongoing cleanup work done */
4172 down_write(sem: &fs_info->cleanup_work_sem);
4173 up_write(sem: &fs_info->cleanup_work_sem);
4174
4175 trans = btrfs_join_transaction(root);
4176 if (IS_ERR(ptr: trans))
4177 return PTR_ERR(ptr: trans);
4178 return btrfs_commit_transaction(trans);
4179}
4180
4181static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info)
4182{
4183 struct btrfs_transaction *trans;
4184 struct btrfs_transaction *tmp;
4185 bool found = false;
4186
4187 if (list_empty(head: &fs_info->trans_list))
4188 return;
4189
4190 /*
4191 * This function is only called at the very end of close_ctree(),
4192 * thus no other running transaction, no need to take trans_lock.
4193 */
4194 ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags));
4195 list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) {
4196 struct extent_state *cached = NULL;
4197 u64 dirty_bytes = 0;
4198 u64 cur = 0;
4199 u64 found_start;
4200 u64 found_end;
4201
4202 found = true;
4203 while (find_first_extent_bit(tree: &trans->dirty_pages, start: cur,
4204 start_ret: &found_start, end_ret: &found_end, bits: EXTENT_DIRTY, cached_state: &cached)) {
4205 dirty_bytes += found_end + 1 - found_start;
4206 cur = found_end + 1;
4207 }
4208 btrfs_warn(fs_info,
4209 "transaction %llu (with %llu dirty metadata bytes) is not committed",
4210 trans->transid, dirty_bytes);
4211 btrfs_cleanup_one_transaction(trans, fs_info);
4212
4213 if (trans == fs_info->running_transaction)
4214 fs_info->running_transaction = NULL;
4215 list_del_init(entry: &trans->list);
4216
4217 btrfs_put_transaction(transaction: trans);
4218 trace_btrfs_transaction_commit(fs_info);
4219 }
4220 ASSERT(!found);
4221}
4222
4223void __cold close_ctree(struct btrfs_fs_info *fs_info)
4224{
4225 int ret;
4226
4227 set_bit(nr: BTRFS_FS_CLOSING_START, addr: &fs_info->flags);
4228
4229 /*
4230 * If we had UNFINISHED_DROPS we could still be processing them, so
4231 * clear that bit and wake up relocation so it can stop.
4232 * We must do this before stopping the block group reclaim task, because
4233 * at btrfs_relocate_block_group() we wait for this bit, and after the
4234 * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we
4235 * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will
4236 * return 1.
4237 */
4238 btrfs_wake_unfinished_drop(fs_info);
4239
4240 /*
4241 * We may have the reclaim task running and relocating a data block group,
4242 * in which case it may create delayed iputs. So stop it before we park
4243 * the cleaner kthread otherwise we can get new delayed iputs after
4244 * parking the cleaner, and that can make the async reclaim task to hang
4245 * if it's waiting for delayed iputs to complete, since the cleaner is
4246 * parked and can not run delayed iputs - this will make us hang when
4247 * trying to stop the async reclaim task.
4248 */
4249 cancel_work_sync(work: &fs_info->reclaim_bgs_work);
4250 /*
4251 * We don't want the cleaner to start new transactions, add more delayed
4252 * iputs, etc. while we're closing. We can't use kthread_stop() yet
4253 * because that frees the task_struct, and the transaction kthread might
4254 * still try to wake up the cleaner.
4255 */
4256 kthread_park(k: fs_info->cleaner_kthread);
4257
4258 /* wait for the qgroup rescan worker to stop */
4259 btrfs_qgroup_wait_for_completion(fs_info, interruptible: false);
4260
4261 /* wait for the uuid_scan task to finish */
4262 down(sem: &fs_info->uuid_tree_rescan_sem);
4263 /* avoid complains from lockdep et al., set sem back to initial state */
4264 up(sem: &fs_info->uuid_tree_rescan_sem);
4265
4266 /* pause restriper - we want to resume on mount */
4267 btrfs_pause_balance(fs_info);
4268
4269 btrfs_dev_replace_suspend_for_unmount(fs_info);
4270
4271 btrfs_scrub_cancel(info: fs_info);
4272
4273 /* wait for any defraggers to finish */
4274 wait_event(fs_info->transaction_wait,
4275 (atomic_read(&fs_info->defrag_running) == 0));
4276
4277 /* clear out the rbtree of defraggable inodes */
4278 btrfs_cleanup_defrag_inodes(fs_info);
4279
4280 /*
4281 * After we parked the cleaner kthread, ordered extents may have
4282 * completed and created new delayed iputs. If one of the async reclaim
4283 * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we
4284 * can hang forever trying to stop it, because if a delayed iput is
4285 * added after it ran btrfs_run_delayed_iputs() and before it called
4286 * btrfs_wait_on_delayed_iputs(), it will hang forever since there is
4287 * no one else to run iputs.
4288 *
4289 * So wait for all ongoing ordered extents to complete and then run
4290 * delayed iputs. This works because once we reach this point no one
4291 * can either create new ordered extents nor create delayed iputs
4292 * through some other means.
4293 *
4294 * Also note that btrfs_wait_ordered_roots() is not safe here, because
4295 * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent,
4296 * but the delayed iput for the respective inode is made only when doing
4297 * the final btrfs_put_ordered_extent() (which must happen at
4298 * btrfs_finish_ordered_io() when we are unmounting).
4299 */
4300 btrfs_flush_workqueue(wq: fs_info->endio_write_workers);
4301 /* Ordered extents for free space inodes. */
4302 btrfs_flush_workqueue(wq: fs_info->endio_freespace_worker);
4303 btrfs_run_delayed_iputs(fs_info);
4304
4305 cancel_work_sync(work: &fs_info->async_reclaim_work);
4306 cancel_work_sync(work: &fs_info->async_data_reclaim_work);
4307 cancel_work_sync(work: &fs_info->preempt_reclaim_work);
4308
4309 /* Cancel or finish ongoing discard work */
4310 btrfs_discard_cleanup(fs_info);
4311
4312 if (!sb_rdonly(sb: fs_info->sb)) {
4313 /*
4314 * The cleaner kthread is stopped, so do one final pass over
4315 * unused block groups.
4316 */
4317 btrfs_delete_unused_bgs(fs_info);
4318
4319 /*
4320 * There might be existing delayed inode workers still running
4321 * and holding an empty delayed inode item. We must wait for
4322 * them to complete first because they can create a transaction.
4323 * This happens when someone calls btrfs_balance_delayed_items()
4324 * and then a transaction commit runs the same delayed nodes
4325 * before any delayed worker has done something with the nodes.
4326 * We must wait for any worker here and not at transaction
4327 * commit time since that could cause a deadlock.
4328 * This is a very rare case.
4329 */
4330 btrfs_flush_workqueue(wq: fs_info->delayed_workers);
4331
4332 ret = btrfs_commit_super(fs_info);
4333 if (ret)
4334 btrfs_err(fs_info, "commit super ret %d", ret);
4335 }
4336
4337 if (BTRFS_FS_ERROR(fs_info))
4338 btrfs_error_commit_super(fs_info);
4339
4340 kthread_stop(k: fs_info->transaction_kthread);
4341 kthread_stop(k: fs_info->cleaner_kthread);
4342
4343 ASSERT(list_empty(&fs_info->delayed_iputs));
4344 set_bit(nr: BTRFS_FS_CLOSING_DONE, addr: &fs_info->flags);
4345
4346 if (btrfs_check_quota_leak(fs_info)) {
4347 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
4348 btrfs_err(fs_info, "qgroup reserved space leaked");
4349 }
4350
4351 btrfs_free_qgroup_config(fs_info);
4352 ASSERT(list_empty(&fs_info->delalloc_roots));
4353
4354 if (percpu_counter_sum(fbc: &fs_info->delalloc_bytes)) {
4355 btrfs_info(fs_info, "at unmount delalloc count %lld",
4356 percpu_counter_sum(&fs_info->delalloc_bytes));
4357 }
4358
4359 if (percpu_counter_sum(fbc: &fs_info->ordered_bytes))
4360 btrfs_info(fs_info, "at unmount dio bytes count %lld",
4361 percpu_counter_sum(&fs_info->ordered_bytes));
4362
4363 btrfs_sysfs_remove_mounted(fs_info);
4364 btrfs_sysfs_remove_fsid(fs_devs: fs_info->fs_devices);
4365
4366 btrfs_put_block_group_cache(info: fs_info);
4367
4368 /*
4369 * we must make sure there is not any read request to
4370 * submit after we stopping all workers.
4371 */
4372 invalidate_inode_pages2(mapping: fs_info->btree_inode->i_mapping);
4373 btrfs_stop_all_workers(fs_info);
4374
4375 /* We shouldn't have any transaction open at this point */
4376 warn_about_uncommitted_trans(fs_info);
4377
4378 clear_bit(nr: BTRFS_FS_OPEN, addr: &fs_info->flags);
4379 free_root_pointers(info: fs_info, free_chunk_root: true);
4380 btrfs_free_fs_roots(fs_info);
4381
4382 /*
4383 * We must free the block groups after dropping the fs_roots as we could
4384 * have had an IO error and have left over tree log blocks that aren't
4385 * cleaned up until the fs roots are freed. This makes the block group
4386 * accounting appear to be wrong because there's pending reserved bytes,
4387 * so make sure we do the block group cleanup afterwards.
4388 */
4389 btrfs_free_block_groups(info: fs_info);
4390
4391 iput(fs_info->btree_inode);
4392
4393 btrfs_mapping_tree_free(tree: &fs_info->mapping_tree);
4394 btrfs_close_devices(fs_devices: fs_info->fs_devices);
4395}
4396
4397void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans,
4398 struct extent_buffer *buf)
4399{
4400 struct btrfs_fs_info *fs_info = buf->fs_info;
4401 u64 transid = btrfs_header_generation(eb: buf);
4402
4403#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4404 /*
4405 * This is a fast path so only do this check if we have sanity tests
4406 * enabled. Normal people shouldn't be using unmapped buffers as dirty
4407 * outside of the sanity tests.
4408 */
4409 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))
4410 return;
4411#endif
4412 /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */
4413 ASSERT(trans->transid == fs_info->generation);
4414 btrfs_assert_tree_write_locked(eb: buf);
4415 if (unlikely(transid != fs_info->generation)) {
4416 btrfs_abort_transaction(trans, -EUCLEAN);
4417 btrfs_crit(fs_info,
4418"dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu",
4419 buf->start, transid, fs_info->generation);
4420 }
4421 set_extent_buffer_dirty(buf);
4422}
4423
4424static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info,
4425 int flush_delayed)
4426{
4427 /*
4428 * looks as though older kernels can get into trouble with
4429 * this code, they end up stuck in balance_dirty_pages forever
4430 */
4431 int ret;
4432
4433 if (current->flags & PF_MEMALLOC)
4434 return;
4435
4436 if (flush_delayed)
4437 btrfs_balance_delayed_items(fs_info);
4438
4439 ret = __percpu_counter_compare(fbc: &fs_info->dirty_metadata_bytes,
4440 BTRFS_DIRTY_METADATA_THRESH,
4441 batch: fs_info->dirty_metadata_batch);
4442 if (ret > 0) {
4443 balance_dirty_pages_ratelimited(mapping: fs_info->btree_inode->i_mapping);
4444 }
4445}
4446
4447void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info)
4448{
4449 __btrfs_btree_balance_dirty(fs_info, flush_delayed: 1);
4450}
4451
4452void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info)
4453{
4454 __btrfs_btree_balance_dirty(fs_info, flush_delayed: 0);
4455}
4456
4457static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info)
4458{
4459 /* cleanup FS via transaction */
4460 btrfs_cleanup_transaction(fs_info);
4461
4462 mutex_lock(&fs_info->cleaner_mutex);
4463 btrfs_run_delayed_iputs(fs_info);
4464 mutex_unlock(lock: &fs_info->cleaner_mutex);
4465
4466 down_write(sem: &fs_info->cleanup_work_sem);
4467 up_write(sem: &fs_info->cleanup_work_sem);
4468}
4469
4470static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info)
4471{
4472 struct btrfs_root *gang[8];
4473 u64 root_objectid = 0;
4474 int ret;
4475
4476 spin_lock(lock: &fs_info->fs_roots_radix_lock);
4477 while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
4478 results: (void **)gang, first_index: root_objectid,
4479 ARRAY_SIZE(gang))) != 0) {
4480 int i;
4481
4482 for (i = 0; i < ret; i++)
4483 gang[i] = btrfs_grab_root(root: gang[i]);
4484 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
4485
4486 for (i = 0; i < ret; i++) {
4487 if (!gang[i])
4488 continue;
4489 root_objectid = gang[i]->root_key.objectid;
4490 btrfs_free_log(NULL, root: gang[i]);
4491 btrfs_put_root(root: gang[i]);
4492 }
4493 root_objectid++;
4494 spin_lock(lock: &fs_info->fs_roots_radix_lock);
4495 }
4496 spin_unlock(lock: &fs_info->fs_roots_radix_lock);
4497 btrfs_free_log_root_tree(NULL, fs_info);
4498}
4499
4500static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
4501{
4502 struct btrfs_ordered_extent *ordered;
4503
4504 spin_lock(lock: &root->ordered_extent_lock);
4505 /*
4506 * This will just short circuit the ordered completion stuff which will
4507 * make sure the ordered extent gets properly cleaned up.
4508 */
4509 list_for_each_entry(ordered, &root->ordered_extents,
4510 root_extent_list)
4511 set_bit(nr: BTRFS_ORDERED_IOERR, addr: &ordered->flags);
4512 spin_unlock(lock: &root->ordered_extent_lock);
4513}
4514
4515static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
4516{
4517 struct btrfs_root *root;
4518 LIST_HEAD(splice);
4519
4520 spin_lock(lock: &fs_info->ordered_root_lock);
4521 list_splice_init(list: &fs_info->ordered_roots, head: &splice);
4522 while (!list_empty(head: &splice)) {
4523 root = list_first_entry(&splice, struct btrfs_root,
4524 ordered_root);
4525 list_move_tail(list: &root->ordered_root,
4526 head: &fs_info->ordered_roots);
4527
4528 spin_unlock(lock: &fs_info->ordered_root_lock);
4529 btrfs_destroy_ordered_extents(root);
4530
4531 cond_resched();
4532 spin_lock(lock: &fs_info->ordered_root_lock);
4533 }
4534 spin_unlock(lock: &fs_info->ordered_root_lock);
4535
4536 /*
4537 * We need this here because if we've been flipped read-only we won't
4538 * get sync() from the umount, so we need to make sure any ordered
4539 * extents that haven't had their dirty pages IO start writeout yet
4540 * actually get run and error out properly.
4541 */
4542 btrfs_wait_ordered_roots(fs_info, U64_MAX, range_start: 0, range_len: (u64)-1);
4543}
4544
4545static void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
4546 struct btrfs_fs_info *fs_info)
4547{
4548 struct rb_node *node;
4549 struct btrfs_delayed_ref_root *delayed_refs;
4550 struct btrfs_delayed_ref_node *ref;
4551
4552 delayed_refs = &trans->delayed_refs;
4553
4554 spin_lock(lock: &delayed_refs->lock);
4555 if (atomic_read(v: &delayed_refs->num_entries) == 0) {
4556 spin_unlock(lock: &delayed_refs->lock);
4557 btrfs_debug(fs_info, "delayed_refs has NO entry");
4558 return;
4559 }
4560
4561 while ((node = rb_first_cached(&delayed_refs->href_root)) != NULL) {
4562 struct btrfs_delayed_ref_head *head;
4563 struct rb_node *n;
4564 bool pin_bytes = false;
4565
4566 head = rb_entry(node, struct btrfs_delayed_ref_head,
4567 href_node);
4568 if (btrfs_delayed_ref_lock(delayed_refs, head))
4569 continue;
4570
4571 spin_lock(lock: &head->lock);
4572 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
4573 ref = rb_entry(n, struct btrfs_delayed_ref_node,
4574 ref_node);
4575 rb_erase_cached(node: &ref->ref_node, root: &head->ref_tree);
4576 RB_CLEAR_NODE(&ref->ref_node);
4577 if (!list_empty(head: &ref->add_list))
4578 list_del(entry: &ref->add_list);
4579 atomic_dec(v: &delayed_refs->num_entries);
4580 btrfs_put_delayed_ref(ref);
4581 btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 1, nr_csums: 0);
4582 }
4583 if (head->must_insert_reserved)
4584 pin_bytes = true;
4585 btrfs_free_delayed_extent_op(op: head->extent_op);
4586 btrfs_delete_ref_head(delayed_refs, head);
4587 spin_unlock(lock: &head->lock);
4588 spin_unlock(lock: &delayed_refs->lock);
4589 mutex_unlock(lock: &head->mutex);
4590
4591 if (pin_bytes) {
4592 struct btrfs_block_group *cache;
4593
4594 cache = btrfs_lookup_block_group(info: fs_info, bytenr: head->bytenr);
4595 BUG_ON(!cache);
4596
4597 spin_lock(lock: &cache->space_info->lock);
4598 spin_lock(lock: &cache->lock);
4599 cache->pinned += head->num_bytes;
4600 btrfs_space_info_update_bytes_pinned(fs_info,
4601 sinfo: cache->space_info, bytes: head->num_bytes);
4602 cache->reserved -= head->num_bytes;
4603 cache->space_info->bytes_reserved -= head->num_bytes;
4604 spin_unlock(lock: &cache->lock);
4605 spin_unlock(lock: &cache->space_info->lock);
4606
4607 btrfs_put_block_group(cache);
4608
4609 btrfs_error_unpin_extent_range(fs_info, start: head->bytenr,
4610 end: head->bytenr + head->num_bytes - 1);
4611 }
4612 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
4613 btrfs_put_delayed_ref_head(head);
4614 cond_resched();
4615 spin_lock(lock: &delayed_refs->lock);
4616 }
4617 btrfs_qgroup_destroy_extent_records(trans);
4618
4619 spin_unlock(lock: &delayed_refs->lock);
4620}
4621
4622static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
4623{
4624 struct btrfs_inode *btrfs_inode;
4625 LIST_HEAD(splice);
4626
4627 spin_lock(lock: &root->delalloc_lock);
4628 list_splice_init(list: &root->delalloc_inodes, head: &splice);
4629
4630 while (!list_empty(head: &splice)) {
4631 struct inode *inode = NULL;
4632 btrfs_inode = list_first_entry(&splice, struct btrfs_inode,
4633 delalloc_inodes);
4634 __btrfs_del_delalloc_inode(root, inode: btrfs_inode);
4635 spin_unlock(lock: &root->delalloc_lock);
4636
4637 /*
4638 * Make sure we get a live inode and that it'll not disappear
4639 * meanwhile.
4640 */
4641 inode = igrab(&btrfs_inode->vfs_inode);
4642 if (inode) {
4643 unsigned int nofs_flag;
4644
4645 nofs_flag = memalloc_nofs_save();
4646 invalidate_inode_pages2(mapping: inode->i_mapping);
4647 memalloc_nofs_restore(flags: nofs_flag);
4648 iput(inode);
4649 }
4650 spin_lock(lock: &root->delalloc_lock);
4651 }
4652 spin_unlock(lock: &root->delalloc_lock);
4653}
4654
4655static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info)
4656{
4657 struct btrfs_root *root;
4658 LIST_HEAD(splice);
4659
4660 spin_lock(lock: &fs_info->delalloc_root_lock);
4661 list_splice_init(list: &fs_info->delalloc_roots, head: &splice);
4662 while (!list_empty(head: &splice)) {
4663 root = list_first_entry(&splice, struct btrfs_root,
4664 delalloc_root);
4665 root = btrfs_grab_root(root);
4666 BUG_ON(!root);
4667 spin_unlock(lock: &fs_info->delalloc_root_lock);
4668
4669 btrfs_destroy_delalloc_inodes(root);
4670 btrfs_put_root(root);
4671
4672 spin_lock(lock: &fs_info->delalloc_root_lock);
4673 }
4674 spin_unlock(lock: &fs_info->delalloc_root_lock);
4675}
4676
4677static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info,
4678 struct extent_io_tree *dirty_pages,
4679 int mark)
4680{
4681 struct extent_buffer *eb;
4682 u64 start = 0;
4683 u64 end;
4684
4685 while (find_first_extent_bit(tree: dirty_pages, start, start_ret: &start, end_ret: &end,
4686 bits: mark, NULL)) {
4687 clear_extent_bits(tree: dirty_pages, start, end, bits: mark);
4688 while (start <= end) {
4689 eb = find_extent_buffer(fs_info, start);
4690 start += fs_info->nodesize;
4691 if (!eb)
4692 continue;
4693
4694 btrfs_tree_lock(eb);
4695 wait_on_extent_buffer_writeback(eb);
4696 btrfs_clear_buffer_dirty(NULL, buf: eb);
4697 btrfs_tree_unlock(eb);
4698
4699 free_extent_buffer_stale(eb);
4700 }
4701 }
4702}
4703
4704static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info,
4705 struct extent_io_tree *unpin)
4706{
4707 u64 start;
4708 u64 end;
4709
4710 while (1) {
4711 struct extent_state *cached_state = NULL;
4712
4713 /*
4714 * The btrfs_finish_extent_commit() may get the same range as
4715 * ours between find_first_extent_bit and clear_extent_dirty.
4716 * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
4717 * the same extent range.
4718 */
4719 mutex_lock(&fs_info->unused_bg_unpin_mutex);
4720 if (!find_first_extent_bit(tree: unpin, start: 0, start_ret: &start, end_ret: &end,
4721 bits: EXTENT_DIRTY, cached_state: &cached_state)) {
4722 mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex);
4723 break;
4724 }
4725
4726 clear_extent_dirty(tree: unpin, start, end, cached: &cached_state);
4727 free_extent_state(state: cached_state);
4728 btrfs_error_unpin_extent_range(fs_info, start, end);
4729 mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex);
4730 cond_resched();
4731 }
4732}
4733
4734static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
4735{
4736 struct inode *inode;
4737
4738 inode = cache->io_ctl.inode;
4739 if (inode) {
4740 unsigned int nofs_flag;
4741
4742 nofs_flag = memalloc_nofs_save();
4743 invalidate_inode_pages2(mapping: inode->i_mapping);
4744 memalloc_nofs_restore(flags: nofs_flag);
4745
4746 BTRFS_I(inode)->generation = 0;
4747 cache->io_ctl.inode = NULL;
4748 iput(inode);
4749 }
4750 ASSERT(cache->io_ctl.pages == NULL);
4751 btrfs_put_block_group(cache);
4752}
4753
4754void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans,
4755 struct btrfs_fs_info *fs_info)
4756{
4757 struct btrfs_block_group *cache;
4758
4759 spin_lock(lock: &cur_trans->dirty_bgs_lock);
4760 while (!list_empty(head: &cur_trans->dirty_bgs)) {
4761 cache = list_first_entry(&cur_trans->dirty_bgs,
4762 struct btrfs_block_group,
4763 dirty_list);
4764
4765 if (!list_empty(head: &cache->io_list)) {
4766 spin_unlock(lock: &cur_trans->dirty_bgs_lock);
4767 list_del_init(entry: &cache->io_list);
4768 btrfs_cleanup_bg_io(cache);
4769 spin_lock(lock: &cur_trans->dirty_bgs_lock);
4770 }
4771
4772 list_del_init(entry: &cache->dirty_list);
4773 spin_lock(lock: &cache->lock);
4774 cache->disk_cache_state = BTRFS_DC_ERROR;
4775 spin_unlock(lock: &cache->lock);
4776
4777 spin_unlock(lock: &cur_trans->dirty_bgs_lock);
4778 btrfs_put_block_group(cache);
4779 btrfs_dec_delayed_refs_rsv_bg_updates(fs_info);
4780 spin_lock(lock: &cur_trans->dirty_bgs_lock);
4781 }
4782 spin_unlock(lock: &cur_trans->dirty_bgs_lock);
4783
4784 /*
4785 * Refer to the definition of io_bgs member for details why it's safe
4786 * to use it without any locking
4787 */
4788 while (!list_empty(head: &cur_trans->io_bgs)) {
4789 cache = list_first_entry(&cur_trans->io_bgs,
4790 struct btrfs_block_group,
4791 io_list);
4792
4793 list_del_init(entry: &cache->io_list);
4794 spin_lock(lock: &cache->lock);
4795 cache->disk_cache_state = BTRFS_DC_ERROR;
4796 spin_unlock(lock: &cache->lock);
4797 btrfs_cleanup_bg_io(cache);
4798 }
4799}
4800
4801void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4802 struct btrfs_fs_info *fs_info)
4803{
4804 struct btrfs_device *dev, *tmp;
4805
4806 btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
4807 ASSERT(list_empty(&cur_trans->dirty_bgs));
4808 ASSERT(list_empty(&cur_trans->io_bgs));
4809
4810 list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list,
4811 post_commit_list) {
4812 list_del_init(entry: &dev->post_commit_list);
4813 }
4814
4815 btrfs_destroy_delayed_refs(trans: cur_trans, fs_info);
4816
4817 cur_trans->state = TRANS_STATE_COMMIT_START;
4818 wake_up(&fs_info->transaction_blocked_wait);
4819
4820 cur_trans->state = TRANS_STATE_UNBLOCKED;
4821 wake_up(&fs_info->transaction_wait);
4822
4823 btrfs_destroy_delayed_inodes(fs_info);
4824
4825 btrfs_destroy_marked_extents(fs_info, dirty_pages: &cur_trans->dirty_pages,
4826 mark: EXTENT_DIRTY);
4827 btrfs_destroy_pinned_extent(fs_info, unpin: &cur_trans->pinned_extents);
4828
4829 cur_trans->state =TRANS_STATE_COMPLETED;
4830 wake_up(&cur_trans->commit_wait);
4831}
4832
4833static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info)
4834{
4835 struct btrfs_transaction *t;
4836
4837 mutex_lock(&fs_info->transaction_kthread_mutex);
4838
4839 spin_lock(lock: &fs_info->trans_lock);
4840 while (!list_empty(head: &fs_info->trans_list)) {
4841 t = list_first_entry(&fs_info->trans_list,
4842 struct btrfs_transaction, list);
4843 if (t->state >= TRANS_STATE_COMMIT_PREP) {
4844 refcount_inc(r: &t->use_count);
4845 spin_unlock(lock: &fs_info->trans_lock);
4846 btrfs_wait_for_commit(fs_info, transid: t->transid);
4847 btrfs_put_transaction(transaction: t);
4848 spin_lock(lock: &fs_info->trans_lock);
4849 continue;
4850 }
4851 if (t == fs_info->running_transaction) {
4852 t->state = TRANS_STATE_COMMIT_DOING;
4853 spin_unlock(lock: &fs_info->trans_lock);
4854 /*
4855 * We wait for 0 num_writers since we don't hold a trans
4856 * handle open currently for this transaction.
4857 */
4858 wait_event(t->writer_wait,
4859 atomic_read(&t->num_writers) == 0);
4860 } else {
4861 spin_unlock(lock: &fs_info->trans_lock);
4862 }
4863 btrfs_cleanup_one_transaction(cur_trans: t, fs_info);
4864
4865 spin_lock(lock: &fs_info->trans_lock);
4866 if (t == fs_info->running_transaction)
4867 fs_info->running_transaction = NULL;
4868 list_del_init(entry: &t->list);
4869 spin_unlock(lock: &fs_info->trans_lock);
4870
4871 btrfs_put_transaction(transaction: t);
4872 trace_btrfs_transaction_commit(fs_info);
4873 spin_lock(lock: &fs_info->trans_lock);
4874 }
4875 spin_unlock(lock: &fs_info->trans_lock);
4876 btrfs_destroy_all_ordered_extents(fs_info);
4877 btrfs_destroy_delayed_inodes(fs_info);
4878 btrfs_assert_delayed_root_empty(fs_info);
4879 btrfs_destroy_all_delalloc_inodes(fs_info);
4880 btrfs_drop_all_logs(fs_info);
4881 mutex_unlock(lock: &fs_info->transaction_kthread_mutex);
4882
4883 return 0;
4884}
4885
4886int btrfs_init_root_free_objectid(struct btrfs_root *root)
4887{
4888 struct btrfs_path *path;
4889 int ret;
4890 struct extent_buffer *l;
4891 struct btrfs_key search_key;
4892 struct btrfs_key found_key;
4893 int slot;
4894
4895 path = btrfs_alloc_path();
4896 if (!path)
4897 return -ENOMEM;
4898
4899 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
4900 search_key.type = -1;
4901 search_key.offset = (u64)-1;
4902 ret = btrfs_search_slot(NULL, root, key: &search_key, p: path, ins_len: 0, cow: 0);
4903 if (ret < 0)
4904 goto error;
4905 BUG_ON(ret == 0); /* Corruption */
4906 if (path->slots[0] > 0) {
4907 slot = path->slots[0] - 1;
4908 l = path->nodes[0];
4909 btrfs_item_key_to_cpu(eb: l, cpu_key: &found_key, nr: slot);
4910 root->free_objectid = max_t(u64, found_key.objectid + 1,
4911 BTRFS_FIRST_FREE_OBJECTID);
4912 } else {
4913 root->free_objectid = BTRFS_FIRST_FREE_OBJECTID;
4914 }
4915 ret = 0;
4916error:
4917 btrfs_free_path(p: path);
4918 return ret;
4919}
4920
4921int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid)
4922{
4923 int ret;
4924 mutex_lock(&root->objectid_mutex);
4925
4926 if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
4927 btrfs_warn(root->fs_info,
4928 "the objectid of root %llu reaches its highest value",
4929 root->root_key.objectid);
4930 ret = -ENOSPC;
4931 goto out;
4932 }
4933
4934 *objectid = root->free_objectid++;
4935 ret = 0;
4936out:
4937 mutex_unlock(lock: &root->objectid_mutex);
4938 return ret;
4939}
4940

source code of linux/fs/btrfs/disk-io.c