1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/sched.h> |
7 | #include <linux/sched/signal.h> |
8 | #include <linux/pagemap.h> |
9 | #include <linux/writeback.h> |
10 | #include <linux/blkdev.h> |
11 | #include <linux/sort.h> |
12 | #include <linux/rcupdate.h> |
13 | #include <linux/kthread.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/ratelimit.h> |
16 | #include <linux/percpu_counter.h> |
17 | #include <linux/lockdep.h> |
18 | #include <linux/crc32c.h> |
19 | #include "ctree.h" |
20 | #include "extent-tree.h" |
21 | #include "tree-log.h" |
22 | #include "disk-io.h" |
23 | #include "print-tree.h" |
24 | #include "volumes.h" |
25 | #include "raid56.h" |
26 | #include "locking.h" |
27 | #include "free-space-cache.h" |
28 | #include "free-space-tree.h" |
29 | #include "sysfs.h" |
30 | #include "qgroup.h" |
31 | #include "ref-verify.h" |
32 | #include "space-info.h" |
33 | #include "block-rsv.h" |
34 | #include "delalloc-space.h" |
35 | #include "discard.h" |
36 | #include "rcu-string.h" |
37 | #include "zoned.h" |
38 | #include "dev-replace.h" |
39 | #include "fs.h" |
40 | #include "accessors.h" |
41 | #include "root-tree.h" |
42 | #include "file-item.h" |
43 | #include "orphan.h" |
44 | #include "tree-checker.h" |
45 | #include "raid-stripe-tree.h" |
46 | |
47 | #undef SCRAMBLE_DELAYED_REFS |
48 | |
49 | |
50 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
51 | struct btrfs_delayed_ref_head *href, |
52 | struct btrfs_delayed_ref_node *node, u64 parent, |
53 | u64 root_objectid, u64 owner_objectid, |
54 | u64 owner_offset, |
55 | struct btrfs_delayed_extent_op *); |
56 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, |
57 | struct extent_buffer *leaf, |
58 | struct btrfs_extent_item *ei); |
59 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
60 | u64 parent, u64 root_objectid, |
61 | u64 flags, u64 owner, u64 offset, |
62 | struct btrfs_key *ins, int ref_mod, u64 oref_root); |
63 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, |
64 | struct btrfs_delayed_ref_node *node, |
65 | struct btrfs_delayed_extent_op *extent_op); |
66 | static int find_next_key(struct btrfs_path *path, int level, |
67 | struct btrfs_key *key); |
68 | |
69 | static int block_group_bits(struct btrfs_block_group *cache, u64 bits) |
70 | { |
71 | return (cache->flags & bits) == bits; |
72 | } |
73 | |
74 | /* simple helper to search for an existing data extent at a given offset */ |
75 | int btrfs_lookup_data_extent(struct btrfs_fs_info *fs_info, u64 start, u64 len) |
76 | { |
77 | struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr: start); |
78 | int ret; |
79 | struct btrfs_key key; |
80 | struct btrfs_path *path; |
81 | |
82 | path = btrfs_alloc_path(); |
83 | if (!path) |
84 | return -ENOMEM; |
85 | |
86 | key.objectid = start; |
87 | key.offset = len; |
88 | key.type = BTRFS_EXTENT_ITEM_KEY; |
89 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
90 | btrfs_free_path(p: path); |
91 | return ret; |
92 | } |
93 | |
94 | /* |
95 | * helper function to lookup reference count and flags of a tree block. |
96 | * |
97 | * the head node for delayed ref is used to store the sum of all the |
98 | * reference count modifications queued up in the rbtree. the head |
99 | * node may also store the extent flags to set. This way you can check |
100 | * to see what the reference count and extent flags would be if all of |
101 | * the delayed refs are not processed. |
102 | */ |
103 | int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, |
104 | struct btrfs_fs_info *fs_info, u64 bytenr, |
105 | u64 offset, int metadata, u64 *refs, u64 *flags) |
106 | { |
107 | struct btrfs_root *extent_root; |
108 | struct btrfs_delayed_ref_head *head; |
109 | struct btrfs_delayed_ref_root *delayed_refs; |
110 | struct btrfs_path *path; |
111 | struct btrfs_extent_item *ei; |
112 | struct extent_buffer *leaf; |
113 | struct btrfs_key key; |
114 | u32 item_size; |
115 | u64 num_refs; |
116 | u64 extent_flags; |
117 | int ret; |
118 | |
119 | /* |
120 | * If we don't have skinny metadata, don't bother doing anything |
121 | * different |
122 | */ |
123 | if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) { |
124 | offset = fs_info->nodesize; |
125 | metadata = 0; |
126 | } |
127 | |
128 | path = btrfs_alloc_path(); |
129 | if (!path) |
130 | return -ENOMEM; |
131 | |
132 | if (!trans) { |
133 | path->skip_locking = 1; |
134 | path->search_commit_root = 1; |
135 | } |
136 | |
137 | search_again: |
138 | key.objectid = bytenr; |
139 | key.offset = offset; |
140 | if (metadata) |
141 | key.type = BTRFS_METADATA_ITEM_KEY; |
142 | else |
143 | key.type = BTRFS_EXTENT_ITEM_KEY; |
144 | |
145 | extent_root = btrfs_extent_root(fs_info, bytenr); |
146 | ret = btrfs_search_slot(NULL, root: extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
147 | if (ret < 0) |
148 | goto out_free; |
149 | |
150 | if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) { |
151 | if (path->slots[0]) { |
152 | path->slots[0]--; |
153 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
154 | nr: path->slots[0]); |
155 | if (key.objectid == bytenr && |
156 | key.type == BTRFS_EXTENT_ITEM_KEY && |
157 | key.offset == fs_info->nodesize) |
158 | ret = 0; |
159 | } |
160 | } |
161 | |
162 | if (ret == 0) { |
163 | leaf = path->nodes[0]; |
164 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
165 | if (item_size >= sizeof(*ei)) { |
166 | ei = btrfs_item_ptr(leaf, path->slots[0], |
167 | struct btrfs_extent_item); |
168 | num_refs = btrfs_extent_refs(eb: leaf, s: ei); |
169 | extent_flags = btrfs_extent_flags(eb: leaf, s: ei); |
170 | } else { |
171 | ret = -EUCLEAN; |
172 | btrfs_err(fs_info, |
173 | "unexpected extent item size, has %u expect >= %zu" , |
174 | item_size, sizeof(*ei)); |
175 | if (trans) |
176 | btrfs_abort_transaction(trans, ret); |
177 | else |
178 | btrfs_handle_fs_error(fs_info, ret, NULL); |
179 | |
180 | goto out_free; |
181 | } |
182 | |
183 | BUG_ON(num_refs == 0); |
184 | } else { |
185 | num_refs = 0; |
186 | extent_flags = 0; |
187 | ret = 0; |
188 | } |
189 | |
190 | if (!trans) |
191 | goto out; |
192 | |
193 | delayed_refs = &trans->transaction->delayed_refs; |
194 | spin_lock(lock: &delayed_refs->lock); |
195 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
196 | if (head) { |
197 | if (!mutex_trylock(lock: &head->mutex)) { |
198 | refcount_inc(r: &head->refs); |
199 | spin_unlock(lock: &delayed_refs->lock); |
200 | |
201 | btrfs_release_path(p: path); |
202 | |
203 | /* |
204 | * Mutex was contended, block until it's released and try |
205 | * again |
206 | */ |
207 | mutex_lock(&head->mutex); |
208 | mutex_unlock(lock: &head->mutex); |
209 | btrfs_put_delayed_ref_head(head); |
210 | goto search_again; |
211 | } |
212 | spin_lock(lock: &head->lock); |
213 | if (head->extent_op && head->extent_op->update_flags) |
214 | extent_flags |= head->extent_op->flags_to_set; |
215 | else |
216 | BUG_ON(num_refs == 0); |
217 | |
218 | num_refs += head->ref_mod; |
219 | spin_unlock(lock: &head->lock); |
220 | mutex_unlock(lock: &head->mutex); |
221 | } |
222 | spin_unlock(lock: &delayed_refs->lock); |
223 | out: |
224 | WARN_ON(num_refs == 0); |
225 | if (refs) |
226 | *refs = num_refs; |
227 | if (flags) |
228 | *flags = extent_flags; |
229 | out_free: |
230 | btrfs_free_path(p: path); |
231 | return ret; |
232 | } |
233 | |
234 | /* |
235 | * Back reference rules. Back refs have three main goals: |
236 | * |
237 | * 1) differentiate between all holders of references to an extent so that |
238 | * when a reference is dropped we can make sure it was a valid reference |
239 | * before freeing the extent. |
240 | * |
241 | * 2) Provide enough information to quickly find the holders of an extent |
242 | * if we notice a given block is corrupted or bad. |
243 | * |
244 | * 3) Make it easy to migrate blocks for FS shrinking or storage pool |
245 | * maintenance. This is actually the same as #2, but with a slightly |
246 | * different use case. |
247 | * |
248 | * There are two kinds of back refs. The implicit back refs is optimized |
249 | * for pointers in non-shared tree blocks. For a given pointer in a block, |
250 | * back refs of this kind provide information about the block's owner tree |
251 | * and the pointer's key. These information allow us to find the block by |
252 | * b-tree searching. The full back refs is for pointers in tree blocks not |
253 | * referenced by their owner trees. The location of tree block is recorded |
254 | * in the back refs. Actually the full back refs is generic, and can be |
255 | * used in all cases the implicit back refs is used. The major shortcoming |
256 | * of the full back refs is its overhead. Every time a tree block gets |
257 | * COWed, we have to update back refs entry for all pointers in it. |
258 | * |
259 | * For a newly allocated tree block, we use implicit back refs for |
260 | * pointers in it. This means most tree related operations only involve |
261 | * implicit back refs. For a tree block created in old transaction, the |
262 | * only way to drop a reference to it is COW it. So we can detect the |
263 | * event that tree block loses its owner tree's reference and do the |
264 | * back refs conversion. |
265 | * |
266 | * When a tree block is COWed through a tree, there are four cases: |
267 | * |
268 | * The reference count of the block is one and the tree is the block's |
269 | * owner tree. Nothing to do in this case. |
270 | * |
271 | * The reference count of the block is one and the tree is not the |
272 | * block's owner tree. In this case, full back refs is used for pointers |
273 | * in the block. Remove these full back refs, add implicit back refs for |
274 | * every pointers in the new block. |
275 | * |
276 | * The reference count of the block is greater than one and the tree is |
277 | * the block's owner tree. In this case, implicit back refs is used for |
278 | * pointers in the block. Add full back refs for every pointers in the |
279 | * block, increase lower level extents' reference counts. The original |
280 | * implicit back refs are entailed to the new block. |
281 | * |
282 | * The reference count of the block is greater than one and the tree is |
283 | * not the block's owner tree. Add implicit back refs for every pointer in |
284 | * the new block, increase lower level extents' reference count. |
285 | * |
286 | * Back Reference Key composing: |
287 | * |
288 | * The key objectid corresponds to the first byte in the extent, |
289 | * The key type is used to differentiate between types of back refs. |
290 | * There are different meanings of the key offset for different types |
291 | * of back refs. |
292 | * |
293 | * File extents can be referenced by: |
294 | * |
295 | * - multiple snapshots, subvolumes, or different generations in one subvol |
296 | * - different files inside a single subvolume |
297 | * - different offsets inside a file (bookend extents in file.c) |
298 | * |
299 | * The extent ref structure for the implicit back refs has fields for: |
300 | * |
301 | * - Objectid of the subvolume root |
302 | * - objectid of the file holding the reference |
303 | * - original offset in the file |
304 | * - how many bookend extents |
305 | * |
306 | * The key offset for the implicit back refs is hash of the first |
307 | * three fields. |
308 | * |
309 | * The extent ref structure for the full back refs has field for: |
310 | * |
311 | * - number of pointers in the tree leaf |
312 | * |
313 | * The key offset for the implicit back refs is the first byte of |
314 | * the tree leaf |
315 | * |
316 | * When a file extent is allocated, The implicit back refs is used. |
317 | * the fields are filled in: |
318 | * |
319 | * (root_key.objectid, inode objectid, offset in file, 1) |
320 | * |
321 | * When a file extent is removed file truncation, we find the |
322 | * corresponding implicit back refs and check the following fields: |
323 | * |
324 | * (btrfs_header_owner(leaf), inode objectid, offset in file) |
325 | * |
326 | * Btree extents can be referenced by: |
327 | * |
328 | * - Different subvolumes |
329 | * |
330 | * Both the implicit back refs and the full back refs for tree blocks |
331 | * only consist of key. The key offset for the implicit back refs is |
332 | * objectid of block's owner tree. The key offset for the full back refs |
333 | * is the first byte of parent block. |
334 | * |
335 | * When implicit back refs is used, information about the lowest key and |
336 | * level of the tree block are required. These information are stored in |
337 | * tree block info structure. |
338 | */ |
339 | |
340 | /* |
341 | * is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required, |
342 | * is_data == BTRFS_REF_TYPE_DATA, data type is requiried, |
343 | * is_data == BTRFS_REF_TYPE_ANY, either type is OK. |
344 | */ |
345 | int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb, |
346 | struct btrfs_extent_inline_ref *iref, |
347 | enum btrfs_inline_ref_type is_data) |
348 | { |
349 | struct btrfs_fs_info *fs_info = eb->fs_info; |
350 | int type = btrfs_extent_inline_ref_type(eb, s: iref); |
351 | u64 offset = btrfs_extent_inline_ref_offset(eb, s: iref); |
352 | |
353 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
354 | ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); |
355 | return type; |
356 | } |
357 | |
358 | if (type == BTRFS_TREE_BLOCK_REF_KEY || |
359 | type == BTRFS_SHARED_BLOCK_REF_KEY || |
360 | type == BTRFS_SHARED_DATA_REF_KEY || |
361 | type == BTRFS_EXTENT_DATA_REF_KEY) { |
362 | if (is_data == BTRFS_REF_TYPE_BLOCK) { |
363 | if (type == BTRFS_TREE_BLOCK_REF_KEY) |
364 | return type; |
365 | if (type == BTRFS_SHARED_BLOCK_REF_KEY) { |
366 | ASSERT(fs_info); |
367 | /* |
368 | * Every shared one has parent tree block, |
369 | * which must be aligned to sector size. |
370 | */ |
371 | if (offset && IS_ALIGNED(offset, fs_info->sectorsize)) |
372 | return type; |
373 | } |
374 | } else if (is_data == BTRFS_REF_TYPE_DATA) { |
375 | if (type == BTRFS_EXTENT_DATA_REF_KEY) |
376 | return type; |
377 | if (type == BTRFS_SHARED_DATA_REF_KEY) { |
378 | ASSERT(fs_info); |
379 | /* |
380 | * Every shared one has parent tree block, |
381 | * which must be aligned to sector size. |
382 | */ |
383 | if (offset && |
384 | IS_ALIGNED(offset, fs_info->sectorsize)) |
385 | return type; |
386 | } |
387 | } else { |
388 | ASSERT(is_data == BTRFS_REF_TYPE_ANY); |
389 | return type; |
390 | } |
391 | } |
392 | |
393 | WARN_ON(1); |
394 | btrfs_print_leaf(l: eb); |
395 | btrfs_err(fs_info, |
396 | "eb %llu iref 0x%lx invalid extent inline ref type %d" , |
397 | eb->start, (unsigned long)iref, type); |
398 | |
399 | return BTRFS_REF_TYPE_INVALID; |
400 | } |
401 | |
402 | u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset) |
403 | { |
404 | u32 high_crc = ~(u32)0; |
405 | u32 low_crc = ~(u32)0; |
406 | __le64 lenum; |
407 | |
408 | lenum = cpu_to_le64(root_objectid); |
409 | high_crc = crc32c(crc: high_crc, address: &lenum, length: sizeof(lenum)); |
410 | lenum = cpu_to_le64(owner); |
411 | low_crc = crc32c(crc: low_crc, address: &lenum, length: sizeof(lenum)); |
412 | lenum = cpu_to_le64(offset); |
413 | low_crc = crc32c(crc: low_crc, address: &lenum, length: sizeof(lenum)); |
414 | |
415 | return ((u64)high_crc << 31) ^ (u64)low_crc; |
416 | } |
417 | |
418 | static u64 hash_extent_data_ref_item(struct extent_buffer *leaf, |
419 | struct btrfs_extent_data_ref *ref) |
420 | { |
421 | return hash_extent_data_ref(root_objectid: btrfs_extent_data_ref_root(eb: leaf, s: ref), |
422 | owner: btrfs_extent_data_ref_objectid(eb: leaf, s: ref), |
423 | offset: btrfs_extent_data_ref_offset(eb: leaf, s: ref)); |
424 | } |
425 | |
426 | static int match_extent_data_ref(struct extent_buffer *leaf, |
427 | struct btrfs_extent_data_ref *ref, |
428 | u64 root_objectid, u64 owner, u64 offset) |
429 | { |
430 | if (btrfs_extent_data_ref_root(eb: leaf, s: ref) != root_objectid || |
431 | btrfs_extent_data_ref_objectid(eb: leaf, s: ref) != owner || |
432 | btrfs_extent_data_ref_offset(eb: leaf, s: ref) != offset) |
433 | return 0; |
434 | return 1; |
435 | } |
436 | |
437 | static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans, |
438 | struct btrfs_path *path, |
439 | u64 bytenr, u64 parent, |
440 | u64 root_objectid, |
441 | u64 owner, u64 offset) |
442 | { |
443 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
444 | struct btrfs_key key; |
445 | struct btrfs_extent_data_ref *ref; |
446 | struct extent_buffer *leaf; |
447 | u32 nritems; |
448 | int ret; |
449 | int recow; |
450 | int err = -ENOENT; |
451 | |
452 | key.objectid = bytenr; |
453 | if (parent) { |
454 | key.type = BTRFS_SHARED_DATA_REF_KEY; |
455 | key.offset = parent; |
456 | } else { |
457 | key.type = BTRFS_EXTENT_DATA_REF_KEY; |
458 | key.offset = hash_extent_data_ref(root_objectid, |
459 | owner, offset); |
460 | } |
461 | again: |
462 | recow = 0; |
463 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: -1, cow: 1); |
464 | if (ret < 0) { |
465 | err = ret; |
466 | goto fail; |
467 | } |
468 | |
469 | if (parent) { |
470 | if (!ret) |
471 | return 0; |
472 | goto fail; |
473 | } |
474 | |
475 | leaf = path->nodes[0]; |
476 | nritems = btrfs_header_nritems(eb: leaf); |
477 | while (1) { |
478 | if (path->slots[0] >= nritems) { |
479 | ret = btrfs_next_leaf(root, path); |
480 | if (ret < 0) |
481 | err = ret; |
482 | if (ret) |
483 | goto fail; |
484 | |
485 | leaf = path->nodes[0]; |
486 | nritems = btrfs_header_nritems(eb: leaf); |
487 | recow = 1; |
488 | } |
489 | |
490 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
491 | if (key.objectid != bytenr || |
492 | key.type != BTRFS_EXTENT_DATA_REF_KEY) |
493 | goto fail; |
494 | |
495 | ref = btrfs_item_ptr(leaf, path->slots[0], |
496 | struct btrfs_extent_data_ref); |
497 | |
498 | if (match_extent_data_ref(leaf, ref, root_objectid, |
499 | owner, offset)) { |
500 | if (recow) { |
501 | btrfs_release_path(p: path); |
502 | goto again; |
503 | } |
504 | err = 0; |
505 | break; |
506 | } |
507 | path->slots[0]++; |
508 | } |
509 | fail: |
510 | return err; |
511 | } |
512 | |
513 | static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, |
514 | struct btrfs_path *path, |
515 | u64 bytenr, u64 parent, |
516 | u64 root_objectid, u64 owner, |
517 | u64 offset, int refs_to_add) |
518 | { |
519 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
520 | struct btrfs_key key; |
521 | struct extent_buffer *leaf; |
522 | u32 size; |
523 | u32 num_refs; |
524 | int ret; |
525 | |
526 | key.objectid = bytenr; |
527 | if (parent) { |
528 | key.type = BTRFS_SHARED_DATA_REF_KEY; |
529 | key.offset = parent; |
530 | size = sizeof(struct btrfs_shared_data_ref); |
531 | } else { |
532 | key.type = BTRFS_EXTENT_DATA_REF_KEY; |
533 | key.offset = hash_extent_data_ref(root_objectid, |
534 | owner, offset); |
535 | size = sizeof(struct btrfs_extent_data_ref); |
536 | } |
537 | |
538 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, data_size: size); |
539 | if (ret && ret != -EEXIST) |
540 | goto fail; |
541 | |
542 | leaf = path->nodes[0]; |
543 | if (parent) { |
544 | struct btrfs_shared_data_ref *ref; |
545 | ref = btrfs_item_ptr(leaf, path->slots[0], |
546 | struct btrfs_shared_data_ref); |
547 | if (ret == 0) { |
548 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: refs_to_add); |
549 | } else { |
550 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref); |
551 | num_refs += refs_to_add; |
552 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: num_refs); |
553 | } |
554 | } else { |
555 | struct btrfs_extent_data_ref *ref; |
556 | while (ret == -EEXIST) { |
557 | ref = btrfs_item_ptr(leaf, path->slots[0], |
558 | struct btrfs_extent_data_ref); |
559 | if (match_extent_data_ref(leaf, ref, root_objectid, |
560 | owner, offset)) |
561 | break; |
562 | btrfs_release_path(p: path); |
563 | key.offset++; |
564 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, |
565 | data_size: size); |
566 | if (ret && ret != -EEXIST) |
567 | goto fail; |
568 | |
569 | leaf = path->nodes[0]; |
570 | } |
571 | ref = btrfs_item_ptr(leaf, path->slots[0], |
572 | struct btrfs_extent_data_ref); |
573 | if (ret == 0) { |
574 | btrfs_set_extent_data_ref_root(eb: leaf, s: ref, |
575 | val: root_objectid); |
576 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: ref, val: owner); |
577 | btrfs_set_extent_data_ref_offset(eb: leaf, s: ref, val: offset); |
578 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: refs_to_add); |
579 | } else { |
580 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref); |
581 | num_refs += refs_to_add; |
582 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: num_refs); |
583 | } |
584 | } |
585 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
586 | ret = 0; |
587 | fail: |
588 | btrfs_release_path(p: path); |
589 | return ret; |
590 | } |
591 | |
592 | static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans, |
593 | struct btrfs_root *root, |
594 | struct btrfs_path *path, |
595 | int refs_to_drop) |
596 | { |
597 | struct btrfs_key key; |
598 | struct btrfs_extent_data_ref *ref1 = NULL; |
599 | struct btrfs_shared_data_ref *ref2 = NULL; |
600 | struct extent_buffer *leaf; |
601 | u32 num_refs = 0; |
602 | int ret = 0; |
603 | |
604 | leaf = path->nodes[0]; |
605 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
606 | |
607 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { |
608 | ref1 = btrfs_item_ptr(leaf, path->slots[0], |
609 | struct btrfs_extent_data_ref); |
610 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
611 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { |
612 | ref2 = btrfs_item_ptr(leaf, path->slots[0], |
613 | struct btrfs_shared_data_ref); |
614 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
615 | } else { |
616 | btrfs_err(trans->fs_info, |
617 | "unrecognized backref key (%llu %u %llu)" , |
618 | key.objectid, key.type, key.offset); |
619 | btrfs_abort_transaction(trans, -EUCLEAN); |
620 | return -EUCLEAN; |
621 | } |
622 | |
623 | BUG_ON(num_refs < refs_to_drop); |
624 | num_refs -= refs_to_drop; |
625 | |
626 | if (num_refs == 0) { |
627 | ret = btrfs_del_item(trans, root, path); |
628 | } else { |
629 | if (key.type == BTRFS_EXTENT_DATA_REF_KEY) |
630 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref1, val: num_refs); |
631 | else if (key.type == BTRFS_SHARED_DATA_REF_KEY) |
632 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref2, val: num_refs); |
633 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
634 | } |
635 | return ret; |
636 | } |
637 | |
638 | static noinline u32 extent_data_ref_count(struct btrfs_path *path, |
639 | struct btrfs_extent_inline_ref *iref) |
640 | { |
641 | struct btrfs_key key; |
642 | struct extent_buffer *leaf; |
643 | struct btrfs_extent_data_ref *ref1; |
644 | struct btrfs_shared_data_ref *ref2; |
645 | u32 num_refs = 0; |
646 | int type; |
647 | |
648 | leaf = path->nodes[0]; |
649 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
650 | |
651 | if (iref) { |
652 | /* |
653 | * If type is invalid, we should have bailed out earlier than |
654 | * this call. |
655 | */ |
656 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
657 | ASSERT(type != BTRFS_REF_TYPE_INVALID); |
658 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
659 | ref1 = (struct btrfs_extent_data_ref *)(&iref->offset); |
660 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
661 | } else { |
662 | ref2 = (struct btrfs_shared_data_ref *)(iref + 1); |
663 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
664 | } |
665 | } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) { |
666 | ref1 = btrfs_item_ptr(leaf, path->slots[0], |
667 | struct btrfs_extent_data_ref); |
668 | num_refs = btrfs_extent_data_ref_count(eb: leaf, s: ref1); |
669 | } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) { |
670 | ref2 = btrfs_item_ptr(leaf, path->slots[0], |
671 | struct btrfs_shared_data_ref); |
672 | num_refs = btrfs_shared_data_ref_count(eb: leaf, s: ref2); |
673 | } else { |
674 | WARN_ON(1); |
675 | } |
676 | return num_refs; |
677 | } |
678 | |
679 | static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, |
680 | struct btrfs_path *path, |
681 | u64 bytenr, u64 parent, |
682 | u64 root_objectid) |
683 | { |
684 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
685 | struct btrfs_key key; |
686 | int ret; |
687 | |
688 | key.objectid = bytenr; |
689 | if (parent) { |
690 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; |
691 | key.offset = parent; |
692 | } else { |
693 | key.type = BTRFS_TREE_BLOCK_REF_KEY; |
694 | key.offset = root_objectid; |
695 | } |
696 | |
697 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: -1, cow: 1); |
698 | if (ret > 0) |
699 | ret = -ENOENT; |
700 | return ret; |
701 | } |
702 | |
703 | static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, |
704 | struct btrfs_path *path, |
705 | u64 bytenr, u64 parent, |
706 | u64 root_objectid) |
707 | { |
708 | struct btrfs_root *root = btrfs_extent_root(fs_info: trans->fs_info, bytenr); |
709 | struct btrfs_key key; |
710 | int ret; |
711 | |
712 | key.objectid = bytenr; |
713 | if (parent) { |
714 | key.type = BTRFS_SHARED_BLOCK_REF_KEY; |
715 | key.offset = parent; |
716 | } else { |
717 | key.type = BTRFS_TREE_BLOCK_REF_KEY; |
718 | key.offset = root_objectid; |
719 | } |
720 | |
721 | ret = btrfs_insert_empty_item(trans, root, path, key: &key, data_size: 0); |
722 | btrfs_release_path(p: path); |
723 | return ret; |
724 | } |
725 | |
726 | static inline int extent_ref_type(u64 parent, u64 owner) |
727 | { |
728 | int type; |
729 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
730 | if (parent > 0) |
731 | type = BTRFS_SHARED_BLOCK_REF_KEY; |
732 | else |
733 | type = BTRFS_TREE_BLOCK_REF_KEY; |
734 | } else { |
735 | if (parent > 0) |
736 | type = BTRFS_SHARED_DATA_REF_KEY; |
737 | else |
738 | type = BTRFS_EXTENT_DATA_REF_KEY; |
739 | } |
740 | return type; |
741 | } |
742 | |
743 | static int find_next_key(struct btrfs_path *path, int level, |
744 | struct btrfs_key *key) |
745 | |
746 | { |
747 | for (; level < BTRFS_MAX_LEVEL; level++) { |
748 | if (!path->nodes[level]) |
749 | break; |
750 | if (path->slots[level] + 1 >= |
751 | btrfs_header_nritems(eb: path->nodes[level])) |
752 | continue; |
753 | if (level == 0) |
754 | btrfs_item_key_to_cpu(eb: path->nodes[level], cpu_key: key, |
755 | nr: path->slots[level] + 1); |
756 | else |
757 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: key, |
758 | nr: path->slots[level] + 1); |
759 | return 0; |
760 | } |
761 | return 1; |
762 | } |
763 | |
764 | /* |
765 | * look for inline back ref. if back ref is found, *ref_ret is set |
766 | * to the address of inline back ref, and 0 is returned. |
767 | * |
768 | * if back ref isn't found, *ref_ret is set to the address where it |
769 | * should be inserted, and -ENOENT is returned. |
770 | * |
771 | * if insert is true and there are too many inline back refs, the path |
772 | * points to the extent item, and -EAGAIN is returned. |
773 | * |
774 | * NOTE: inline back refs are ordered in the same way that back ref |
775 | * items in the tree are ordered. |
776 | */ |
777 | static noinline_for_stack |
778 | int lookup_inline_extent_backref(struct btrfs_trans_handle *trans, |
779 | struct btrfs_path *path, |
780 | struct btrfs_extent_inline_ref **ref_ret, |
781 | u64 bytenr, u64 num_bytes, |
782 | u64 parent, u64 root_objectid, |
783 | u64 owner, u64 offset, int insert) |
784 | { |
785 | struct btrfs_fs_info *fs_info = trans->fs_info; |
786 | struct btrfs_root *root = btrfs_extent_root(fs_info, bytenr); |
787 | struct btrfs_key key; |
788 | struct extent_buffer *leaf; |
789 | struct btrfs_extent_item *ei; |
790 | struct btrfs_extent_inline_ref *iref; |
791 | u64 flags; |
792 | u64 item_size; |
793 | unsigned long ptr; |
794 | unsigned long end; |
795 | int ; |
796 | int type; |
797 | int want; |
798 | int ret; |
799 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
800 | int needed; |
801 | |
802 | key.objectid = bytenr; |
803 | key.type = BTRFS_EXTENT_ITEM_KEY; |
804 | key.offset = num_bytes; |
805 | |
806 | want = extent_ref_type(parent, owner); |
807 | if (insert) { |
808 | extra_size = btrfs_extent_inline_ref_size(type: want); |
809 | path->search_for_extension = 1; |
810 | path->keep_locks = 1; |
811 | } else |
812 | extra_size = -1; |
813 | |
814 | /* |
815 | * Owner is our level, so we can just add one to get the level for the |
816 | * block we are interested in. |
817 | */ |
818 | if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) { |
819 | key.type = BTRFS_METADATA_ITEM_KEY; |
820 | key.offset = owner; |
821 | } |
822 | |
823 | again: |
824 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: extra_size, cow: 1); |
825 | if (ret < 0) |
826 | goto out; |
827 | |
828 | /* |
829 | * We may be a newly converted file system which still has the old fat |
830 | * extent entries for metadata, so try and see if we have one of those. |
831 | */ |
832 | if (ret > 0 && skinny_metadata) { |
833 | skinny_metadata = false; |
834 | if (path->slots[0]) { |
835 | path->slots[0]--; |
836 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
837 | nr: path->slots[0]); |
838 | if (key.objectid == bytenr && |
839 | key.type == BTRFS_EXTENT_ITEM_KEY && |
840 | key.offset == num_bytes) |
841 | ret = 0; |
842 | } |
843 | if (ret) { |
844 | key.objectid = bytenr; |
845 | key.type = BTRFS_EXTENT_ITEM_KEY; |
846 | key.offset = num_bytes; |
847 | btrfs_release_path(p: path); |
848 | goto again; |
849 | } |
850 | } |
851 | |
852 | if (ret && !insert) { |
853 | ret = -ENOENT; |
854 | goto out; |
855 | } else if (WARN_ON(ret)) { |
856 | btrfs_print_leaf(l: path->nodes[0]); |
857 | btrfs_err(fs_info, |
858 | "extent item not found for insert, bytenr %llu num_bytes %llu parent %llu root_objectid %llu owner %llu offset %llu" , |
859 | bytenr, num_bytes, parent, root_objectid, owner, |
860 | offset); |
861 | ret = -EUCLEAN; |
862 | goto out; |
863 | } |
864 | |
865 | leaf = path->nodes[0]; |
866 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
867 | if (unlikely(item_size < sizeof(*ei))) { |
868 | ret = -EUCLEAN; |
869 | btrfs_err(fs_info, |
870 | "unexpected extent item size, has %llu expect >= %zu" , |
871 | item_size, sizeof(*ei)); |
872 | btrfs_abort_transaction(trans, ret); |
873 | goto out; |
874 | } |
875 | |
876 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
877 | flags = btrfs_extent_flags(eb: leaf, s: ei); |
878 | |
879 | ptr = (unsigned long)(ei + 1); |
880 | end = (unsigned long)ei + item_size; |
881 | |
882 | if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) { |
883 | ptr += sizeof(struct btrfs_tree_block_info); |
884 | BUG_ON(ptr > end); |
885 | } |
886 | |
887 | if (owner >= BTRFS_FIRST_FREE_OBJECTID) |
888 | needed = BTRFS_REF_TYPE_DATA; |
889 | else |
890 | needed = BTRFS_REF_TYPE_BLOCK; |
891 | |
892 | ret = -ENOENT; |
893 | while (ptr < end) { |
894 | iref = (struct btrfs_extent_inline_ref *)ptr; |
895 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: needed); |
896 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
897 | ASSERT(btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)); |
898 | ptr += btrfs_extent_inline_ref_size(type); |
899 | continue; |
900 | } |
901 | if (type == BTRFS_REF_TYPE_INVALID) { |
902 | ret = -EUCLEAN; |
903 | goto out; |
904 | } |
905 | |
906 | if (want < type) |
907 | break; |
908 | if (want > type) { |
909 | ptr += btrfs_extent_inline_ref_size(type); |
910 | continue; |
911 | } |
912 | |
913 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
914 | struct btrfs_extent_data_ref *dref; |
915 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
916 | if (match_extent_data_ref(leaf, ref: dref, root_objectid, |
917 | owner, offset)) { |
918 | ret = 0; |
919 | break; |
920 | } |
921 | if (hash_extent_data_ref_item(leaf, ref: dref) < |
922 | hash_extent_data_ref(root_objectid, owner, offset)) |
923 | break; |
924 | } else { |
925 | u64 ref_offset; |
926 | ref_offset = btrfs_extent_inline_ref_offset(eb: leaf, s: iref); |
927 | if (parent > 0) { |
928 | if (parent == ref_offset) { |
929 | ret = 0; |
930 | break; |
931 | } |
932 | if (ref_offset < parent) |
933 | break; |
934 | } else { |
935 | if (root_objectid == ref_offset) { |
936 | ret = 0; |
937 | break; |
938 | } |
939 | if (ref_offset < root_objectid) |
940 | break; |
941 | } |
942 | } |
943 | ptr += btrfs_extent_inline_ref_size(type); |
944 | } |
945 | |
946 | if (unlikely(ptr > end)) { |
947 | ret = -EUCLEAN; |
948 | btrfs_print_leaf(l: path->nodes[0]); |
949 | btrfs_crit(fs_info, |
950 | "overrun extent record at slot %d while looking for inline extent for root %llu owner %llu offset %llu parent %llu" , |
951 | path->slots[0], root_objectid, owner, offset, parent); |
952 | goto out; |
953 | } |
954 | |
955 | if (ret == -ENOENT && insert) { |
956 | if (item_size + extra_size >= |
957 | BTRFS_MAX_EXTENT_ITEM_SIZE(root)) { |
958 | ret = -EAGAIN; |
959 | goto out; |
960 | } |
961 | /* |
962 | * To add new inline back ref, we have to make sure |
963 | * there is no corresponding back ref item. |
964 | * For simplicity, we just do not add new inline back |
965 | * ref if there is any kind of item for this block |
966 | */ |
967 | if (find_next_key(path, level: 0, key: &key) == 0 && |
968 | key.objectid == bytenr && |
969 | key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) { |
970 | ret = -EAGAIN; |
971 | goto out; |
972 | } |
973 | } |
974 | *ref_ret = (struct btrfs_extent_inline_ref *)ptr; |
975 | out: |
976 | if (insert) { |
977 | path->keep_locks = 0; |
978 | path->search_for_extension = 0; |
979 | btrfs_unlock_up_safe(path, level: 1); |
980 | } |
981 | return ret; |
982 | } |
983 | |
984 | /* |
985 | * helper to add new inline back ref |
986 | */ |
987 | static noinline_for_stack |
988 | void setup_inline_extent_backref(struct btrfs_trans_handle *trans, |
989 | struct btrfs_path *path, |
990 | struct btrfs_extent_inline_ref *iref, |
991 | u64 parent, u64 root_objectid, |
992 | u64 owner, u64 offset, int refs_to_add, |
993 | struct btrfs_delayed_extent_op *extent_op) |
994 | { |
995 | struct extent_buffer *leaf; |
996 | struct btrfs_extent_item *ei; |
997 | unsigned long ptr; |
998 | unsigned long end; |
999 | unsigned long item_offset; |
1000 | u64 refs; |
1001 | int size; |
1002 | int type; |
1003 | |
1004 | leaf = path->nodes[0]; |
1005 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1006 | item_offset = (unsigned long)iref - (unsigned long)ei; |
1007 | |
1008 | type = extent_ref_type(parent, owner); |
1009 | size = btrfs_extent_inline_ref_size(type); |
1010 | |
1011 | btrfs_extend_item(trans, path, data_size: size); |
1012 | |
1013 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1014 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
1015 | refs += refs_to_add; |
1016 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
1017 | if (extent_op) |
1018 | __run_delayed_extent_op(extent_op, leaf, ei); |
1019 | |
1020 | ptr = (unsigned long)ei + item_offset; |
1021 | end = (unsigned long)ei + btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1022 | if (ptr < end - size) |
1023 | memmove_extent_buffer(dst: leaf, dst_offset: ptr + size, src_offset: ptr, |
1024 | len: end - size - ptr); |
1025 | |
1026 | iref = (struct btrfs_extent_inline_ref *)ptr; |
1027 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, val: type); |
1028 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
1029 | struct btrfs_extent_data_ref *dref; |
1030 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
1031 | btrfs_set_extent_data_ref_root(eb: leaf, s: dref, val: root_objectid); |
1032 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: dref, val: owner); |
1033 | btrfs_set_extent_data_ref_offset(eb: leaf, s: dref, val: offset); |
1034 | btrfs_set_extent_data_ref_count(eb: leaf, s: dref, val: refs_to_add); |
1035 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { |
1036 | struct btrfs_shared_data_ref *sref; |
1037 | sref = (struct btrfs_shared_data_ref *)(iref + 1); |
1038 | btrfs_set_shared_data_ref_count(eb: leaf, s: sref, val: refs_to_add); |
1039 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
1040 | } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) { |
1041 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
1042 | } else { |
1043 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: root_objectid); |
1044 | } |
1045 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1046 | } |
1047 | |
1048 | static int lookup_extent_backref(struct btrfs_trans_handle *trans, |
1049 | struct btrfs_path *path, |
1050 | struct btrfs_extent_inline_ref **ref_ret, |
1051 | u64 bytenr, u64 num_bytes, u64 parent, |
1052 | u64 root_objectid, u64 owner, u64 offset) |
1053 | { |
1054 | int ret; |
1055 | |
1056 | ret = lookup_inline_extent_backref(trans, path, ref_ret, bytenr, |
1057 | num_bytes, parent, root_objectid, |
1058 | owner, offset, insert: 0); |
1059 | if (ret != -ENOENT) |
1060 | return ret; |
1061 | |
1062 | btrfs_release_path(p: path); |
1063 | *ref_ret = NULL; |
1064 | |
1065 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
1066 | ret = lookup_tree_block_ref(trans, path, bytenr, parent, |
1067 | root_objectid); |
1068 | } else { |
1069 | ret = lookup_extent_data_ref(trans, path, bytenr, parent, |
1070 | root_objectid, owner, offset); |
1071 | } |
1072 | return ret; |
1073 | } |
1074 | |
1075 | /* |
1076 | * helper to update/remove inline back ref |
1077 | */ |
1078 | static noinline_for_stack int update_inline_extent_backref( |
1079 | struct btrfs_trans_handle *trans, |
1080 | struct btrfs_path *path, |
1081 | struct btrfs_extent_inline_ref *iref, |
1082 | int refs_to_mod, |
1083 | struct btrfs_delayed_extent_op *extent_op) |
1084 | { |
1085 | struct extent_buffer *leaf = path->nodes[0]; |
1086 | struct btrfs_fs_info *fs_info = leaf->fs_info; |
1087 | struct btrfs_extent_item *ei; |
1088 | struct btrfs_extent_data_ref *dref = NULL; |
1089 | struct btrfs_shared_data_ref *sref = NULL; |
1090 | unsigned long ptr; |
1091 | unsigned long end; |
1092 | u32 item_size; |
1093 | int size; |
1094 | int type; |
1095 | u64 refs; |
1096 | |
1097 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1098 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
1099 | if (unlikely(refs_to_mod < 0 && refs + refs_to_mod <= 0)) { |
1100 | struct btrfs_key key; |
1101 | u32 extent_size; |
1102 | |
1103 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1104 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1105 | extent_size = fs_info->nodesize; |
1106 | else |
1107 | extent_size = key.offset; |
1108 | btrfs_print_leaf(l: leaf); |
1109 | btrfs_err(fs_info, |
1110 | "invalid refs_to_mod for extent %llu num_bytes %u, has %d expect >= -%llu" , |
1111 | key.objectid, extent_size, refs_to_mod, refs); |
1112 | return -EUCLEAN; |
1113 | } |
1114 | refs += refs_to_mod; |
1115 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
1116 | if (extent_op) |
1117 | __run_delayed_extent_op(extent_op, leaf, ei); |
1118 | |
1119 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_ANY); |
1120 | /* |
1121 | * Function btrfs_get_extent_inline_ref_type() has already printed |
1122 | * error messages. |
1123 | */ |
1124 | if (unlikely(type == BTRFS_REF_TYPE_INVALID)) |
1125 | return -EUCLEAN; |
1126 | |
1127 | if (type == BTRFS_EXTENT_DATA_REF_KEY) { |
1128 | dref = (struct btrfs_extent_data_ref *)(&iref->offset); |
1129 | refs = btrfs_extent_data_ref_count(eb: leaf, s: dref); |
1130 | } else if (type == BTRFS_SHARED_DATA_REF_KEY) { |
1131 | sref = (struct btrfs_shared_data_ref *)(iref + 1); |
1132 | refs = btrfs_shared_data_ref_count(eb: leaf, s: sref); |
1133 | } else { |
1134 | refs = 1; |
1135 | /* |
1136 | * For tree blocks we can only drop one ref for it, and tree |
1137 | * blocks should not have refs > 1. |
1138 | * |
1139 | * Furthermore if we're inserting a new inline backref, we |
1140 | * won't reach this path either. That would be |
1141 | * setup_inline_extent_backref(). |
1142 | */ |
1143 | if (unlikely(refs_to_mod != -1)) { |
1144 | struct btrfs_key key; |
1145 | |
1146 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1147 | |
1148 | btrfs_print_leaf(l: leaf); |
1149 | btrfs_err(fs_info, |
1150 | "invalid refs_to_mod for tree block %llu, has %d expect -1" , |
1151 | key.objectid, refs_to_mod); |
1152 | return -EUCLEAN; |
1153 | } |
1154 | } |
1155 | |
1156 | if (unlikely(refs_to_mod < 0 && refs < -refs_to_mod)) { |
1157 | struct btrfs_key key; |
1158 | u32 extent_size; |
1159 | |
1160 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1161 | if (key.type == BTRFS_METADATA_ITEM_KEY) |
1162 | extent_size = fs_info->nodesize; |
1163 | else |
1164 | extent_size = key.offset; |
1165 | btrfs_print_leaf(l: leaf); |
1166 | btrfs_err(fs_info, |
1167 | "invalid refs_to_mod for backref entry, iref %lu extent %llu num_bytes %u, has %d expect >= -%llu" , |
1168 | (unsigned long)iref, key.objectid, extent_size, |
1169 | refs_to_mod, refs); |
1170 | return -EUCLEAN; |
1171 | } |
1172 | refs += refs_to_mod; |
1173 | |
1174 | if (refs > 0) { |
1175 | if (type == BTRFS_EXTENT_DATA_REF_KEY) |
1176 | btrfs_set_extent_data_ref_count(eb: leaf, s: dref, val: refs); |
1177 | else |
1178 | btrfs_set_shared_data_ref_count(eb: leaf, s: sref, val: refs); |
1179 | } else { |
1180 | size = btrfs_extent_inline_ref_size(type); |
1181 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1182 | ptr = (unsigned long)iref; |
1183 | end = (unsigned long)ei + item_size; |
1184 | if (ptr + size < end) |
1185 | memmove_extent_buffer(dst: leaf, dst_offset: ptr, src_offset: ptr + size, |
1186 | len: end - ptr - size); |
1187 | item_size -= size; |
1188 | btrfs_truncate_item(trans, path, new_size: item_size, from_end: 1); |
1189 | } |
1190 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1191 | return 0; |
1192 | } |
1193 | |
1194 | static noinline_for_stack |
1195 | int insert_inline_extent_backref(struct btrfs_trans_handle *trans, |
1196 | struct btrfs_path *path, |
1197 | u64 bytenr, u64 num_bytes, u64 parent, |
1198 | u64 root_objectid, u64 owner, |
1199 | u64 offset, int refs_to_add, |
1200 | struct btrfs_delayed_extent_op *extent_op) |
1201 | { |
1202 | struct btrfs_extent_inline_ref *iref; |
1203 | int ret; |
1204 | |
1205 | ret = lookup_inline_extent_backref(trans, path, ref_ret: &iref, bytenr, |
1206 | num_bytes, parent, root_objectid, |
1207 | owner, offset, insert: 1); |
1208 | if (ret == 0) { |
1209 | /* |
1210 | * We're adding refs to a tree block we already own, this |
1211 | * should not happen at all. |
1212 | */ |
1213 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
1214 | btrfs_print_leaf(l: path->nodes[0]); |
1215 | btrfs_crit(trans->fs_info, |
1216 | "adding refs to an existing tree ref, bytenr %llu num_bytes %llu root_objectid %llu slot %u" , |
1217 | bytenr, num_bytes, root_objectid, path->slots[0]); |
1218 | return -EUCLEAN; |
1219 | } |
1220 | ret = update_inline_extent_backref(trans, path, iref, |
1221 | refs_to_mod: refs_to_add, extent_op); |
1222 | } else if (ret == -ENOENT) { |
1223 | setup_inline_extent_backref(trans, path, iref, parent, |
1224 | root_objectid, owner, offset, |
1225 | refs_to_add, extent_op); |
1226 | ret = 0; |
1227 | } |
1228 | return ret; |
1229 | } |
1230 | |
1231 | static int remove_extent_backref(struct btrfs_trans_handle *trans, |
1232 | struct btrfs_root *root, |
1233 | struct btrfs_path *path, |
1234 | struct btrfs_extent_inline_ref *iref, |
1235 | int refs_to_drop, int is_data) |
1236 | { |
1237 | int ret = 0; |
1238 | |
1239 | BUG_ON(!is_data && refs_to_drop != 1); |
1240 | if (iref) |
1241 | ret = update_inline_extent_backref(trans, path, iref, |
1242 | refs_to_mod: -refs_to_drop, NULL); |
1243 | else if (is_data) |
1244 | ret = remove_extent_data_ref(trans, root, path, refs_to_drop); |
1245 | else |
1246 | ret = btrfs_del_item(trans, root, path); |
1247 | return ret; |
1248 | } |
1249 | |
1250 | static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len, |
1251 | u64 *discarded_bytes) |
1252 | { |
1253 | int j, ret = 0; |
1254 | u64 bytes_left, end; |
1255 | u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT); |
1256 | |
1257 | if (WARN_ON(start != aligned_start)) { |
1258 | len -= aligned_start - start; |
1259 | len = round_down(len, 1 << SECTOR_SHIFT); |
1260 | start = aligned_start; |
1261 | } |
1262 | |
1263 | *discarded_bytes = 0; |
1264 | |
1265 | if (!len) |
1266 | return 0; |
1267 | |
1268 | end = start + len; |
1269 | bytes_left = len; |
1270 | |
1271 | /* Skip any superblocks on this device. */ |
1272 | for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) { |
1273 | u64 sb_start = btrfs_sb_offset(mirror: j); |
1274 | u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE; |
1275 | u64 size = sb_start - start; |
1276 | |
1277 | if (!in_range(sb_start, start, bytes_left) && |
1278 | !in_range(sb_end, start, bytes_left) && |
1279 | !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE)) |
1280 | continue; |
1281 | |
1282 | /* |
1283 | * Superblock spans beginning of range. Adjust start and |
1284 | * try again. |
1285 | */ |
1286 | if (sb_start <= start) { |
1287 | start += sb_end - start; |
1288 | if (start > end) { |
1289 | bytes_left = 0; |
1290 | break; |
1291 | } |
1292 | bytes_left = end - start; |
1293 | continue; |
1294 | } |
1295 | |
1296 | if (size) { |
1297 | ret = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
1298 | nr_sects: size >> SECTOR_SHIFT, |
1299 | GFP_NOFS); |
1300 | if (!ret) |
1301 | *discarded_bytes += size; |
1302 | else if (ret != -EOPNOTSUPP) |
1303 | return ret; |
1304 | } |
1305 | |
1306 | start = sb_end; |
1307 | if (start > end) { |
1308 | bytes_left = 0; |
1309 | break; |
1310 | } |
1311 | bytes_left = end - start; |
1312 | } |
1313 | |
1314 | if (bytes_left) { |
1315 | ret = blkdev_issue_discard(bdev, sector: start >> SECTOR_SHIFT, |
1316 | nr_sects: bytes_left >> SECTOR_SHIFT, |
1317 | GFP_NOFS); |
1318 | if (!ret) |
1319 | *discarded_bytes += bytes_left; |
1320 | } |
1321 | return ret; |
1322 | } |
1323 | |
1324 | static int do_discard_extent(struct btrfs_discard_stripe *stripe, u64 *bytes) |
1325 | { |
1326 | struct btrfs_device *dev = stripe->dev; |
1327 | struct btrfs_fs_info *fs_info = dev->fs_info; |
1328 | struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; |
1329 | u64 phys = stripe->physical; |
1330 | u64 len = stripe->length; |
1331 | u64 discarded = 0; |
1332 | int ret = 0; |
1333 | |
1334 | /* Zone reset on a zoned filesystem */ |
1335 | if (btrfs_can_zone_reset(device: dev, physical: phys, length: len)) { |
1336 | u64 src_disc; |
1337 | |
1338 | ret = btrfs_reset_device_zone(device: dev, physical: phys, length: len, bytes: &discarded); |
1339 | if (ret) |
1340 | goto out; |
1341 | |
1342 | if (!btrfs_dev_replace_is_ongoing(dev_replace) || |
1343 | dev != dev_replace->srcdev) |
1344 | goto out; |
1345 | |
1346 | src_disc = discarded; |
1347 | |
1348 | /* Send to replace target as well */ |
1349 | ret = btrfs_reset_device_zone(device: dev_replace->tgtdev, physical: phys, length: len, |
1350 | bytes: &discarded); |
1351 | discarded += src_disc; |
1352 | } else if (bdev_max_discard_sectors(bdev: stripe->dev->bdev)) { |
1353 | ret = btrfs_issue_discard(bdev: dev->bdev, start: phys, len, discarded_bytes: &discarded); |
1354 | } else { |
1355 | ret = 0; |
1356 | *bytes = 0; |
1357 | } |
1358 | |
1359 | out: |
1360 | *bytes = discarded; |
1361 | return ret; |
1362 | } |
1363 | |
1364 | int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr, |
1365 | u64 num_bytes, u64 *actual_bytes) |
1366 | { |
1367 | int ret = 0; |
1368 | u64 discarded_bytes = 0; |
1369 | u64 end = bytenr + num_bytes; |
1370 | u64 cur = bytenr; |
1371 | |
1372 | /* |
1373 | * Avoid races with device replace and make sure the devices in the |
1374 | * stripes don't go away while we are discarding. |
1375 | */ |
1376 | btrfs_bio_counter_inc_blocked(fs_info); |
1377 | while (cur < end) { |
1378 | struct btrfs_discard_stripe *stripes; |
1379 | unsigned int num_stripes; |
1380 | int i; |
1381 | |
1382 | num_bytes = end - cur; |
1383 | stripes = btrfs_map_discard(fs_info, logical: cur, length_ret: &num_bytes, num_stripes: &num_stripes); |
1384 | if (IS_ERR(ptr: stripes)) { |
1385 | ret = PTR_ERR(ptr: stripes); |
1386 | if (ret == -EOPNOTSUPP) |
1387 | ret = 0; |
1388 | break; |
1389 | } |
1390 | |
1391 | for (i = 0; i < num_stripes; i++) { |
1392 | struct btrfs_discard_stripe *stripe = stripes + i; |
1393 | u64 bytes; |
1394 | |
1395 | if (!stripe->dev->bdev) { |
1396 | ASSERT(btrfs_test_opt(fs_info, DEGRADED)); |
1397 | continue; |
1398 | } |
1399 | |
1400 | if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, |
1401 | &stripe->dev->dev_state)) |
1402 | continue; |
1403 | |
1404 | ret = do_discard_extent(stripe, bytes: &bytes); |
1405 | if (ret) { |
1406 | /* |
1407 | * Keep going if discard is not supported by the |
1408 | * device. |
1409 | */ |
1410 | if (ret != -EOPNOTSUPP) |
1411 | break; |
1412 | ret = 0; |
1413 | } else { |
1414 | discarded_bytes += bytes; |
1415 | } |
1416 | } |
1417 | kfree(objp: stripes); |
1418 | if (ret) |
1419 | break; |
1420 | cur += num_bytes; |
1421 | } |
1422 | btrfs_bio_counter_dec(fs_info); |
1423 | if (actual_bytes) |
1424 | *actual_bytes = discarded_bytes; |
1425 | return ret; |
1426 | } |
1427 | |
1428 | /* Can return -ENOMEM */ |
1429 | int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, |
1430 | struct btrfs_ref *generic_ref) |
1431 | { |
1432 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1433 | int ret; |
1434 | |
1435 | ASSERT(generic_ref->type != BTRFS_REF_NOT_SET && |
1436 | generic_ref->action); |
1437 | BUG_ON(generic_ref->type == BTRFS_REF_METADATA && |
1438 | generic_ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID); |
1439 | |
1440 | if (generic_ref->type == BTRFS_REF_METADATA) |
1441 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL); |
1442 | else |
1443 | ret = btrfs_add_delayed_data_ref(trans, generic_ref, reserved: 0); |
1444 | |
1445 | btrfs_ref_tree_mod(fs_info, generic_ref); |
1446 | |
1447 | return ret; |
1448 | } |
1449 | |
1450 | /* |
1451 | * Insert backreference for a given extent. |
1452 | * |
1453 | * The counterpart is in __btrfs_free_extent(), with examples and more details |
1454 | * how it works. |
1455 | * |
1456 | * @trans: Handle of transaction |
1457 | * |
1458 | * @node: The delayed ref node used to get the bytenr/length for |
1459 | * extent whose references are incremented. |
1460 | * |
1461 | * @parent: If this is a shared extent (BTRFS_SHARED_DATA_REF_KEY/ |
1462 | * BTRFS_SHARED_BLOCK_REF_KEY) then it holds the logical |
1463 | * bytenr of the parent block. Since new extents are always |
1464 | * created with indirect references, this will only be the case |
1465 | * when relocating a shared extent. In that case, root_objectid |
1466 | * will be BTRFS_TREE_RELOC_OBJECTID. Otherwise, parent must |
1467 | * be 0 |
1468 | * |
1469 | * @root_objectid: The id of the root where this modification has originated, |
1470 | * this can be either one of the well-known metadata trees or |
1471 | * the subvolume id which references this extent. |
1472 | * |
1473 | * @owner: For data extents it is the inode number of the owning file. |
1474 | * For metadata extents this parameter holds the level in the |
1475 | * tree of the extent. |
1476 | * |
1477 | * @offset: For metadata extents the offset is ignored and is currently |
1478 | * always passed as 0. For data extents it is the fileoffset |
1479 | * this extent belongs to. |
1480 | * |
1481 | * @extent_op Pointer to a structure, holding information necessary when |
1482 | * updating a tree block's flags |
1483 | * |
1484 | */ |
1485 | static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, |
1486 | struct btrfs_delayed_ref_node *node, |
1487 | u64 parent, u64 root_objectid, |
1488 | u64 owner, u64 offset, |
1489 | struct btrfs_delayed_extent_op *extent_op) |
1490 | { |
1491 | struct btrfs_path *path; |
1492 | struct extent_buffer *leaf; |
1493 | struct btrfs_extent_item *item; |
1494 | struct btrfs_key key; |
1495 | u64 bytenr = node->bytenr; |
1496 | u64 num_bytes = node->num_bytes; |
1497 | u64 refs; |
1498 | int refs_to_add = node->ref_mod; |
1499 | int ret; |
1500 | |
1501 | path = btrfs_alloc_path(); |
1502 | if (!path) |
1503 | return -ENOMEM; |
1504 | |
1505 | /* this will setup the path even if it fails to insert the back ref */ |
1506 | ret = insert_inline_extent_backref(trans, path, bytenr, num_bytes, |
1507 | parent, root_objectid, owner, |
1508 | offset, refs_to_add, extent_op); |
1509 | if ((ret < 0 && ret != -EAGAIN) || !ret) |
1510 | goto out; |
1511 | |
1512 | /* |
1513 | * Ok we had -EAGAIN which means we didn't have space to insert and |
1514 | * inline extent ref, so just update the reference count and add a |
1515 | * normal backref. |
1516 | */ |
1517 | leaf = path->nodes[0]; |
1518 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
1519 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1520 | refs = btrfs_extent_refs(eb: leaf, s: item); |
1521 | btrfs_set_extent_refs(eb: leaf, s: item, val: refs + refs_to_add); |
1522 | if (extent_op) |
1523 | __run_delayed_extent_op(extent_op, leaf, ei: item); |
1524 | |
1525 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1526 | btrfs_release_path(p: path); |
1527 | |
1528 | /* now insert the actual backref */ |
1529 | if (owner < BTRFS_FIRST_FREE_OBJECTID) |
1530 | ret = insert_tree_block_ref(trans, path, bytenr, parent, |
1531 | root_objectid); |
1532 | else |
1533 | ret = insert_extent_data_ref(trans, path, bytenr, parent, |
1534 | root_objectid, owner, offset, |
1535 | refs_to_add); |
1536 | |
1537 | if (ret) |
1538 | btrfs_abort_transaction(trans, ret); |
1539 | out: |
1540 | btrfs_free_path(p: path); |
1541 | return ret; |
1542 | } |
1543 | |
1544 | static int run_delayed_data_ref(struct btrfs_trans_handle *trans, |
1545 | struct btrfs_delayed_ref_head *href, |
1546 | struct btrfs_delayed_ref_node *node, |
1547 | struct btrfs_delayed_extent_op *extent_op, |
1548 | bool insert_reserved) |
1549 | { |
1550 | int ret = 0; |
1551 | struct btrfs_delayed_data_ref *ref; |
1552 | u64 parent = 0; |
1553 | u64 flags = 0; |
1554 | |
1555 | ref = btrfs_delayed_node_to_data_ref(node); |
1556 | trace_run_delayed_data_ref(fs_info: trans->fs_info, ref: node, full_ref: ref, action: node->action); |
1557 | |
1558 | if (node->type == BTRFS_SHARED_DATA_REF_KEY) |
1559 | parent = ref->parent; |
1560 | |
1561 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { |
1562 | struct btrfs_key key; |
1563 | struct btrfs_squota_delta delta = { |
1564 | .root = href->owning_root, |
1565 | .num_bytes = node->num_bytes, |
1566 | .rsv_bytes = href->reserved_bytes, |
1567 | .is_data = true, |
1568 | .is_inc = true, |
1569 | .generation = trans->transid, |
1570 | }; |
1571 | |
1572 | if (extent_op) |
1573 | flags |= extent_op->flags_to_set; |
1574 | |
1575 | key.objectid = node->bytenr; |
1576 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1577 | key.offset = node->num_bytes; |
1578 | |
1579 | ret = alloc_reserved_file_extent(trans, parent, root_objectid: ref->root, |
1580 | flags, owner: ref->objectid, |
1581 | offset: ref->offset, ins: &key, |
1582 | ref_mod: node->ref_mod, oref_root: href->owning_root); |
1583 | if (!ret) |
1584 | ret = btrfs_record_squota_delta(fs_info: trans->fs_info, delta: &delta); |
1585 | else |
1586 | btrfs_qgroup_free_refroot(fs_info: trans->fs_info, ref_root: delta.root, |
1587 | num_bytes: delta.rsv_bytes, type: BTRFS_QGROUP_RSV_DATA); |
1588 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { |
1589 | ret = __btrfs_inc_extent_ref(trans, node, parent, root_objectid: ref->root, |
1590 | owner: ref->objectid, offset: ref->offset, |
1591 | extent_op); |
1592 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { |
1593 | ret = __btrfs_free_extent(trans, href, node, parent, |
1594 | root_objectid: ref->root, owner_objectid: ref->objectid, |
1595 | owner_offset: ref->offset, extra_op: extent_op); |
1596 | } else { |
1597 | BUG(); |
1598 | } |
1599 | return ret; |
1600 | } |
1601 | |
1602 | static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op, |
1603 | struct extent_buffer *leaf, |
1604 | struct btrfs_extent_item *ei) |
1605 | { |
1606 | u64 flags = btrfs_extent_flags(eb: leaf, s: ei); |
1607 | if (extent_op->update_flags) { |
1608 | flags |= extent_op->flags_to_set; |
1609 | btrfs_set_extent_flags(eb: leaf, s: ei, val: flags); |
1610 | } |
1611 | |
1612 | if (extent_op->update_key) { |
1613 | struct btrfs_tree_block_info *bi; |
1614 | BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)); |
1615 | bi = (struct btrfs_tree_block_info *)(ei + 1); |
1616 | btrfs_set_tree_block_key(eb: leaf, item: bi, key: &extent_op->key); |
1617 | } |
1618 | } |
1619 | |
1620 | static int run_delayed_extent_op(struct btrfs_trans_handle *trans, |
1621 | struct btrfs_delayed_ref_head *head, |
1622 | struct btrfs_delayed_extent_op *extent_op) |
1623 | { |
1624 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1625 | struct btrfs_root *root; |
1626 | struct btrfs_key key; |
1627 | struct btrfs_path *path; |
1628 | struct btrfs_extent_item *ei; |
1629 | struct extent_buffer *leaf; |
1630 | u32 item_size; |
1631 | int ret; |
1632 | int metadata = 1; |
1633 | |
1634 | if (TRANS_ABORTED(trans)) |
1635 | return 0; |
1636 | |
1637 | if (!btrfs_fs_incompat(fs_info, SKINNY_METADATA)) |
1638 | metadata = 0; |
1639 | |
1640 | path = btrfs_alloc_path(); |
1641 | if (!path) |
1642 | return -ENOMEM; |
1643 | |
1644 | key.objectid = head->bytenr; |
1645 | |
1646 | if (metadata) { |
1647 | key.type = BTRFS_METADATA_ITEM_KEY; |
1648 | key.offset = extent_op->level; |
1649 | } else { |
1650 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1651 | key.offset = head->num_bytes; |
1652 | } |
1653 | |
1654 | root = btrfs_extent_root(fs_info, bytenr: key.objectid); |
1655 | again: |
1656 | ret = btrfs_search_slot(trans, root, key: &key, p: path, ins_len: 0, cow: 1); |
1657 | if (ret < 0) { |
1658 | goto out; |
1659 | } else if (ret > 0) { |
1660 | if (metadata) { |
1661 | if (path->slots[0] > 0) { |
1662 | path->slots[0]--; |
1663 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
1664 | nr: path->slots[0]); |
1665 | if (key.objectid == head->bytenr && |
1666 | key.type == BTRFS_EXTENT_ITEM_KEY && |
1667 | key.offset == head->num_bytes) |
1668 | ret = 0; |
1669 | } |
1670 | if (ret > 0) { |
1671 | btrfs_release_path(p: path); |
1672 | metadata = 0; |
1673 | |
1674 | key.objectid = head->bytenr; |
1675 | key.offset = head->num_bytes; |
1676 | key.type = BTRFS_EXTENT_ITEM_KEY; |
1677 | goto again; |
1678 | } |
1679 | } else { |
1680 | ret = -EUCLEAN; |
1681 | btrfs_err(fs_info, |
1682 | "missing extent item for extent %llu num_bytes %llu level %d" , |
1683 | head->bytenr, head->num_bytes, extent_op->level); |
1684 | goto out; |
1685 | } |
1686 | } |
1687 | |
1688 | leaf = path->nodes[0]; |
1689 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
1690 | |
1691 | if (unlikely(item_size < sizeof(*ei))) { |
1692 | ret = -EUCLEAN; |
1693 | btrfs_err(fs_info, |
1694 | "unexpected extent item size, has %u expect >= %zu" , |
1695 | item_size, sizeof(*ei)); |
1696 | btrfs_abort_transaction(trans, ret); |
1697 | goto out; |
1698 | } |
1699 | |
1700 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1701 | __run_delayed_extent_op(extent_op, leaf, ei); |
1702 | |
1703 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
1704 | out: |
1705 | btrfs_free_path(p: path); |
1706 | return ret; |
1707 | } |
1708 | |
1709 | static int run_delayed_tree_ref(struct btrfs_trans_handle *trans, |
1710 | struct btrfs_delayed_ref_head *href, |
1711 | struct btrfs_delayed_ref_node *node, |
1712 | struct btrfs_delayed_extent_op *extent_op, |
1713 | bool insert_reserved) |
1714 | { |
1715 | int ret = 0; |
1716 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1717 | struct btrfs_delayed_tree_ref *ref; |
1718 | u64 parent = 0; |
1719 | u64 ref_root = 0; |
1720 | |
1721 | ref = btrfs_delayed_node_to_tree_ref(node); |
1722 | trace_run_delayed_tree_ref(fs_info: trans->fs_info, ref: node, full_ref: ref, action: node->action); |
1723 | |
1724 | if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) |
1725 | parent = ref->parent; |
1726 | ref_root = ref->root; |
1727 | |
1728 | if (unlikely(node->ref_mod != 1)) { |
1729 | btrfs_err(trans->fs_info, |
1730 | "btree block %llu has %d references rather than 1: action %d ref_root %llu parent %llu" , |
1731 | node->bytenr, node->ref_mod, node->action, ref_root, |
1732 | parent); |
1733 | return -EUCLEAN; |
1734 | } |
1735 | if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) { |
1736 | struct btrfs_squota_delta delta = { |
1737 | .root = href->owning_root, |
1738 | .num_bytes = fs_info->nodesize, |
1739 | .rsv_bytes = 0, |
1740 | .is_data = false, |
1741 | .is_inc = true, |
1742 | .generation = trans->transid, |
1743 | }; |
1744 | |
1745 | BUG_ON(!extent_op || !extent_op->update_flags); |
1746 | ret = alloc_reserved_tree_block(trans, node, extent_op); |
1747 | if (!ret) |
1748 | btrfs_record_squota_delta(fs_info, delta: &delta); |
1749 | } else if (node->action == BTRFS_ADD_DELAYED_REF) { |
1750 | ret = __btrfs_inc_extent_ref(trans, node, parent, root_objectid: ref_root, |
1751 | owner: ref->level, offset: 0, extent_op); |
1752 | } else if (node->action == BTRFS_DROP_DELAYED_REF) { |
1753 | ret = __btrfs_free_extent(trans, href, node, parent, root_objectid: ref_root, |
1754 | owner_objectid: ref->level, owner_offset: 0, extra_op: extent_op); |
1755 | } else { |
1756 | BUG(); |
1757 | } |
1758 | return ret; |
1759 | } |
1760 | |
1761 | /* helper function to actually process a single delayed ref entry */ |
1762 | static int run_one_delayed_ref(struct btrfs_trans_handle *trans, |
1763 | struct btrfs_delayed_ref_head *href, |
1764 | struct btrfs_delayed_ref_node *node, |
1765 | struct btrfs_delayed_extent_op *extent_op, |
1766 | bool insert_reserved) |
1767 | { |
1768 | int ret = 0; |
1769 | |
1770 | if (TRANS_ABORTED(trans)) { |
1771 | if (insert_reserved) |
1772 | btrfs_pin_extent(trans, bytenr: node->bytenr, num: node->num_bytes, reserved: 1); |
1773 | return 0; |
1774 | } |
1775 | |
1776 | if (node->type == BTRFS_TREE_BLOCK_REF_KEY || |
1777 | node->type == BTRFS_SHARED_BLOCK_REF_KEY) |
1778 | ret = run_delayed_tree_ref(trans, href, node, extent_op, |
1779 | insert_reserved); |
1780 | else if (node->type == BTRFS_EXTENT_DATA_REF_KEY || |
1781 | node->type == BTRFS_SHARED_DATA_REF_KEY) |
1782 | ret = run_delayed_data_ref(trans, href, node, extent_op, |
1783 | insert_reserved); |
1784 | else if (node->type == BTRFS_EXTENT_OWNER_REF_KEY) |
1785 | ret = 0; |
1786 | else |
1787 | BUG(); |
1788 | if (ret && insert_reserved) |
1789 | btrfs_pin_extent(trans, bytenr: node->bytenr, num: node->num_bytes, reserved: 1); |
1790 | if (ret < 0) |
1791 | btrfs_err(trans->fs_info, |
1792 | "failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d" , |
1793 | node->bytenr, node->num_bytes, node->type, |
1794 | node->action, node->ref_mod, ret); |
1795 | return ret; |
1796 | } |
1797 | |
1798 | static inline struct btrfs_delayed_ref_node * |
1799 | select_delayed_ref(struct btrfs_delayed_ref_head *head) |
1800 | { |
1801 | struct btrfs_delayed_ref_node *ref; |
1802 | |
1803 | if (RB_EMPTY_ROOT(&head->ref_tree.rb_root)) |
1804 | return NULL; |
1805 | |
1806 | /* |
1807 | * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first. |
1808 | * This is to prevent a ref count from going down to zero, which deletes |
1809 | * the extent item from the extent tree, when there still are references |
1810 | * to add, which would fail because they would not find the extent item. |
1811 | */ |
1812 | if (!list_empty(head: &head->ref_add_list)) |
1813 | return list_first_entry(&head->ref_add_list, |
1814 | struct btrfs_delayed_ref_node, add_list); |
1815 | |
1816 | ref = rb_entry(rb_first_cached(&head->ref_tree), |
1817 | struct btrfs_delayed_ref_node, ref_node); |
1818 | ASSERT(list_empty(&ref->add_list)); |
1819 | return ref; |
1820 | } |
1821 | |
1822 | static void unselect_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, |
1823 | struct btrfs_delayed_ref_head *head) |
1824 | { |
1825 | spin_lock(lock: &delayed_refs->lock); |
1826 | head->processing = false; |
1827 | delayed_refs->num_heads_ready++; |
1828 | spin_unlock(lock: &delayed_refs->lock); |
1829 | btrfs_delayed_ref_unlock(head); |
1830 | } |
1831 | |
1832 | static struct btrfs_delayed_extent_op *cleanup_extent_op( |
1833 | struct btrfs_delayed_ref_head *head) |
1834 | { |
1835 | struct btrfs_delayed_extent_op *extent_op = head->extent_op; |
1836 | |
1837 | if (!extent_op) |
1838 | return NULL; |
1839 | |
1840 | if (head->must_insert_reserved) { |
1841 | head->extent_op = NULL; |
1842 | btrfs_free_delayed_extent_op(op: extent_op); |
1843 | return NULL; |
1844 | } |
1845 | return extent_op; |
1846 | } |
1847 | |
1848 | static int run_and_cleanup_extent_op(struct btrfs_trans_handle *trans, |
1849 | struct btrfs_delayed_ref_head *head) |
1850 | { |
1851 | struct btrfs_delayed_extent_op *extent_op; |
1852 | int ret; |
1853 | |
1854 | extent_op = cleanup_extent_op(head); |
1855 | if (!extent_op) |
1856 | return 0; |
1857 | head->extent_op = NULL; |
1858 | spin_unlock(lock: &head->lock); |
1859 | ret = run_delayed_extent_op(trans, head, extent_op); |
1860 | btrfs_free_delayed_extent_op(op: extent_op); |
1861 | return ret ? ret : 1; |
1862 | } |
1863 | |
1864 | u64 btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, |
1865 | struct btrfs_delayed_ref_root *delayed_refs, |
1866 | struct btrfs_delayed_ref_head *head) |
1867 | { |
1868 | /* |
1869 | * We had csum deletions accounted for in our delayed refs rsv, we need |
1870 | * to drop the csum leaves for this update from our delayed_refs_rsv. |
1871 | */ |
1872 | if (head->total_ref_mod < 0 && head->is_data) { |
1873 | int nr_csums; |
1874 | |
1875 | spin_lock(lock: &delayed_refs->lock); |
1876 | delayed_refs->pending_csums -= head->num_bytes; |
1877 | spin_unlock(lock: &delayed_refs->lock); |
1878 | nr_csums = btrfs_csum_bytes_to_leaves(fs_info, csum_bytes: head->num_bytes); |
1879 | |
1880 | btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 0, nr_csums); |
1881 | |
1882 | return btrfs_calc_delayed_ref_csum_bytes(fs_info, num_csum_items: nr_csums); |
1883 | } |
1884 | if (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE && |
1885 | head->must_insert_reserved && head->is_data) |
1886 | btrfs_qgroup_free_refroot(fs_info, ref_root: head->owning_root, |
1887 | num_bytes: head->reserved_bytes, type: BTRFS_QGROUP_RSV_DATA); |
1888 | |
1889 | return 0; |
1890 | } |
1891 | |
1892 | static int cleanup_ref_head(struct btrfs_trans_handle *trans, |
1893 | struct btrfs_delayed_ref_head *head, |
1894 | u64 *bytes_released) |
1895 | { |
1896 | |
1897 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1898 | struct btrfs_delayed_ref_root *delayed_refs; |
1899 | int ret; |
1900 | |
1901 | delayed_refs = &trans->transaction->delayed_refs; |
1902 | |
1903 | ret = run_and_cleanup_extent_op(trans, head); |
1904 | if (ret < 0) { |
1905 | unselect_delayed_ref_head(delayed_refs, head); |
1906 | btrfs_debug(fs_info, "run_delayed_extent_op returned %d" , ret); |
1907 | return ret; |
1908 | } else if (ret) { |
1909 | return ret; |
1910 | } |
1911 | |
1912 | /* |
1913 | * Need to drop our head ref lock and re-acquire the delayed ref lock |
1914 | * and then re-check to make sure nobody got added. |
1915 | */ |
1916 | spin_unlock(lock: &head->lock); |
1917 | spin_lock(lock: &delayed_refs->lock); |
1918 | spin_lock(lock: &head->lock); |
1919 | if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root) || head->extent_op) { |
1920 | spin_unlock(lock: &head->lock); |
1921 | spin_unlock(lock: &delayed_refs->lock); |
1922 | return 1; |
1923 | } |
1924 | btrfs_delete_ref_head(delayed_refs, head); |
1925 | spin_unlock(lock: &head->lock); |
1926 | spin_unlock(lock: &delayed_refs->lock); |
1927 | |
1928 | if (head->must_insert_reserved) { |
1929 | btrfs_pin_extent(trans, bytenr: head->bytenr, num: head->num_bytes, reserved: 1); |
1930 | if (head->is_data) { |
1931 | struct btrfs_root *csum_root; |
1932 | |
1933 | csum_root = btrfs_csum_root(fs_info, bytenr: head->bytenr); |
1934 | ret = btrfs_del_csums(trans, root: csum_root, bytenr: head->bytenr, |
1935 | len: head->num_bytes); |
1936 | } |
1937 | } |
1938 | |
1939 | *bytes_released += btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head); |
1940 | |
1941 | trace_run_delayed_ref_head(fs_info, head_ref: head, action: 0); |
1942 | btrfs_delayed_ref_unlock(head); |
1943 | btrfs_put_delayed_ref_head(head); |
1944 | return ret; |
1945 | } |
1946 | |
1947 | static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( |
1948 | struct btrfs_trans_handle *trans) |
1949 | { |
1950 | struct btrfs_delayed_ref_root *delayed_refs = |
1951 | &trans->transaction->delayed_refs; |
1952 | struct btrfs_delayed_ref_head *head = NULL; |
1953 | int ret; |
1954 | |
1955 | spin_lock(lock: &delayed_refs->lock); |
1956 | head = btrfs_select_ref_head(delayed_refs); |
1957 | if (!head) { |
1958 | spin_unlock(lock: &delayed_refs->lock); |
1959 | return head; |
1960 | } |
1961 | |
1962 | /* |
1963 | * Grab the lock that says we are going to process all the refs for |
1964 | * this head |
1965 | */ |
1966 | ret = btrfs_delayed_ref_lock(delayed_refs, head); |
1967 | spin_unlock(lock: &delayed_refs->lock); |
1968 | |
1969 | /* |
1970 | * We may have dropped the spin lock to get the head mutex lock, and |
1971 | * that might have given someone else time to free the head. If that's |
1972 | * true, it has been removed from our list and we can move on. |
1973 | */ |
1974 | if (ret == -EAGAIN) |
1975 | head = ERR_PTR(error: -EAGAIN); |
1976 | |
1977 | return head; |
1978 | } |
1979 | |
1980 | static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans, |
1981 | struct btrfs_delayed_ref_head *locked_ref, |
1982 | u64 *bytes_released) |
1983 | { |
1984 | struct btrfs_fs_info *fs_info = trans->fs_info; |
1985 | struct btrfs_delayed_ref_root *delayed_refs; |
1986 | struct btrfs_delayed_extent_op *extent_op; |
1987 | struct btrfs_delayed_ref_node *ref; |
1988 | bool must_insert_reserved; |
1989 | int ret; |
1990 | |
1991 | delayed_refs = &trans->transaction->delayed_refs; |
1992 | |
1993 | lockdep_assert_held(&locked_ref->mutex); |
1994 | lockdep_assert_held(&locked_ref->lock); |
1995 | |
1996 | while ((ref = select_delayed_ref(head: locked_ref))) { |
1997 | if (ref->seq && |
1998 | btrfs_check_delayed_seq(fs_info, seq: ref->seq)) { |
1999 | spin_unlock(lock: &locked_ref->lock); |
2000 | unselect_delayed_ref_head(delayed_refs, head: locked_ref); |
2001 | return -EAGAIN; |
2002 | } |
2003 | |
2004 | rb_erase_cached(node: &ref->ref_node, root: &locked_ref->ref_tree); |
2005 | RB_CLEAR_NODE(&ref->ref_node); |
2006 | if (!list_empty(head: &ref->add_list)) |
2007 | list_del(entry: &ref->add_list); |
2008 | /* |
2009 | * When we play the delayed ref, also correct the ref_mod on |
2010 | * head |
2011 | */ |
2012 | switch (ref->action) { |
2013 | case BTRFS_ADD_DELAYED_REF: |
2014 | case BTRFS_ADD_DELAYED_EXTENT: |
2015 | locked_ref->ref_mod -= ref->ref_mod; |
2016 | break; |
2017 | case BTRFS_DROP_DELAYED_REF: |
2018 | locked_ref->ref_mod += ref->ref_mod; |
2019 | break; |
2020 | default: |
2021 | WARN_ON(1); |
2022 | } |
2023 | atomic_dec(v: &delayed_refs->num_entries); |
2024 | |
2025 | /* |
2026 | * Record the must_insert_reserved flag before we drop the |
2027 | * spin lock. |
2028 | */ |
2029 | must_insert_reserved = locked_ref->must_insert_reserved; |
2030 | locked_ref->must_insert_reserved = false; |
2031 | |
2032 | extent_op = locked_ref->extent_op; |
2033 | locked_ref->extent_op = NULL; |
2034 | spin_unlock(lock: &locked_ref->lock); |
2035 | |
2036 | ret = run_one_delayed_ref(trans, href: locked_ref, node: ref, extent_op, |
2037 | insert_reserved: must_insert_reserved); |
2038 | btrfs_delayed_refs_rsv_release(fs_info, nr_refs: 1, nr_csums: 0); |
2039 | *bytes_released += btrfs_calc_delayed_ref_bytes(fs_info, num_delayed_refs: 1); |
2040 | |
2041 | btrfs_free_delayed_extent_op(op: extent_op); |
2042 | if (ret) { |
2043 | unselect_delayed_ref_head(delayed_refs, head: locked_ref); |
2044 | btrfs_put_delayed_ref(ref); |
2045 | return ret; |
2046 | } |
2047 | |
2048 | btrfs_put_delayed_ref(ref); |
2049 | cond_resched(); |
2050 | |
2051 | spin_lock(lock: &locked_ref->lock); |
2052 | btrfs_merge_delayed_refs(fs_info, delayed_refs, head: locked_ref); |
2053 | } |
2054 | |
2055 | return 0; |
2056 | } |
2057 | |
2058 | /* |
2059 | * Returns 0 on success or if called with an already aborted transaction. |
2060 | * Returns -ENOMEM or -EIO on failure and will abort the transaction. |
2061 | */ |
2062 | static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, |
2063 | u64 min_bytes) |
2064 | { |
2065 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2066 | struct btrfs_delayed_ref_root *delayed_refs; |
2067 | struct btrfs_delayed_ref_head *locked_ref = NULL; |
2068 | int ret; |
2069 | unsigned long count = 0; |
2070 | unsigned long max_count = 0; |
2071 | u64 bytes_processed = 0; |
2072 | |
2073 | delayed_refs = &trans->transaction->delayed_refs; |
2074 | if (min_bytes == 0) { |
2075 | max_count = delayed_refs->num_heads_ready; |
2076 | min_bytes = U64_MAX; |
2077 | } |
2078 | |
2079 | do { |
2080 | if (!locked_ref) { |
2081 | locked_ref = btrfs_obtain_ref_head(trans); |
2082 | if (IS_ERR_OR_NULL(ptr: locked_ref)) { |
2083 | if (PTR_ERR(ptr: locked_ref) == -EAGAIN) { |
2084 | continue; |
2085 | } else { |
2086 | break; |
2087 | } |
2088 | } |
2089 | count++; |
2090 | } |
2091 | /* |
2092 | * We need to try and merge add/drops of the same ref since we |
2093 | * can run into issues with relocate dropping the implicit ref |
2094 | * and then it being added back again before the drop can |
2095 | * finish. If we merged anything we need to re-loop so we can |
2096 | * get a good ref. |
2097 | * Or we can get node references of the same type that weren't |
2098 | * merged when created due to bumps in the tree mod seq, and |
2099 | * we need to merge them to prevent adding an inline extent |
2100 | * backref before dropping it (triggering a BUG_ON at |
2101 | * insert_inline_extent_backref()). |
2102 | */ |
2103 | spin_lock(lock: &locked_ref->lock); |
2104 | btrfs_merge_delayed_refs(fs_info, delayed_refs, head: locked_ref); |
2105 | |
2106 | ret = btrfs_run_delayed_refs_for_head(trans, locked_ref, bytes_released: &bytes_processed); |
2107 | if (ret < 0 && ret != -EAGAIN) { |
2108 | /* |
2109 | * Error, btrfs_run_delayed_refs_for_head already |
2110 | * unlocked everything so just bail out |
2111 | */ |
2112 | return ret; |
2113 | } else if (!ret) { |
2114 | /* |
2115 | * Success, perform the usual cleanup of a processed |
2116 | * head |
2117 | */ |
2118 | ret = cleanup_ref_head(trans, head: locked_ref, bytes_released: &bytes_processed); |
2119 | if (ret > 0 ) { |
2120 | /* We dropped our lock, we need to loop. */ |
2121 | ret = 0; |
2122 | continue; |
2123 | } else if (ret) { |
2124 | return ret; |
2125 | } |
2126 | } |
2127 | |
2128 | /* |
2129 | * Either success case or btrfs_run_delayed_refs_for_head |
2130 | * returned -EAGAIN, meaning we need to select another head |
2131 | */ |
2132 | |
2133 | locked_ref = NULL; |
2134 | cond_resched(); |
2135 | } while ((min_bytes != U64_MAX && bytes_processed < min_bytes) || |
2136 | (max_count > 0 && count < max_count) || |
2137 | locked_ref); |
2138 | |
2139 | return 0; |
2140 | } |
2141 | |
2142 | #ifdef SCRAMBLE_DELAYED_REFS |
2143 | /* |
2144 | * Normally delayed refs get processed in ascending bytenr order. This |
2145 | * correlates in most cases to the order added. To expose dependencies on this |
2146 | * order, we start to process the tree in the middle instead of the beginning |
2147 | */ |
2148 | static u64 find_middle(struct rb_root *root) |
2149 | { |
2150 | struct rb_node *n = root->rb_node; |
2151 | struct btrfs_delayed_ref_node *entry; |
2152 | int alt = 1; |
2153 | u64 middle; |
2154 | u64 first = 0, last = 0; |
2155 | |
2156 | n = rb_first(root); |
2157 | if (n) { |
2158 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2159 | first = entry->bytenr; |
2160 | } |
2161 | n = rb_last(root); |
2162 | if (n) { |
2163 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2164 | last = entry->bytenr; |
2165 | } |
2166 | n = root->rb_node; |
2167 | |
2168 | while (n) { |
2169 | entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node); |
2170 | WARN_ON(!entry->in_tree); |
2171 | |
2172 | middle = entry->bytenr; |
2173 | |
2174 | if (alt) |
2175 | n = n->rb_left; |
2176 | else |
2177 | n = n->rb_right; |
2178 | |
2179 | alt = 1 - alt; |
2180 | } |
2181 | return middle; |
2182 | } |
2183 | #endif |
2184 | |
2185 | /* |
2186 | * Start processing the delayed reference count updates and extent insertions |
2187 | * we have queued up so far. |
2188 | * |
2189 | * @trans: Transaction handle. |
2190 | * @min_bytes: How many bytes of delayed references to process. After this |
2191 | * many bytes we stop processing delayed references if there are |
2192 | * any more. If 0 it means to run all existing delayed references, |
2193 | * but not new ones added after running all existing ones. |
2194 | * Use (u64)-1 (U64_MAX) to run all existing delayed references |
2195 | * plus any new ones that are added. |
2196 | * |
2197 | * Returns 0 on success or if called with an aborted transaction |
2198 | * Returns <0 on error and aborts the transaction |
2199 | */ |
2200 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, u64 min_bytes) |
2201 | { |
2202 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2203 | struct btrfs_delayed_ref_root *delayed_refs; |
2204 | int ret; |
2205 | |
2206 | /* We'll clean this up in btrfs_cleanup_transaction */ |
2207 | if (TRANS_ABORTED(trans)) |
2208 | return 0; |
2209 | |
2210 | if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags)) |
2211 | return 0; |
2212 | |
2213 | delayed_refs = &trans->transaction->delayed_refs; |
2214 | again: |
2215 | #ifdef SCRAMBLE_DELAYED_REFS |
2216 | delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); |
2217 | #endif |
2218 | ret = __btrfs_run_delayed_refs(trans, min_bytes); |
2219 | if (ret < 0) { |
2220 | btrfs_abort_transaction(trans, ret); |
2221 | return ret; |
2222 | } |
2223 | |
2224 | if (min_bytes == U64_MAX) { |
2225 | btrfs_create_pending_block_groups(trans); |
2226 | |
2227 | spin_lock(lock: &delayed_refs->lock); |
2228 | if (RB_EMPTY_ROOT(&delayed_refs->href_root.rb_root)) { |
2229 | spin_unlock(lock: &delayed_refs->lock); |
2230 | return 0; |
2231 | } |
2232 | spin_unlock(lock: &delayed_refs->lock); |
2233 | |
2234 | cond_resched(); |
2235 | goto again; |
2236 | } |
2237 | |
2238 | return 0; |
2239 | } |
2240 | |
2241 | int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, |
2242 | struct extent_buffer *eb, u64 flags) |
2243 | { |
2244 | struct btrfs_delayed_extent_op *extent_op; |
2245 | int level = btrfs_header_level(eb); |
2246 | int ret; |
2247 | |
2248 | extent_op = btrfs_alloc_delayed_extent_op(); |
2249 | if (!extent_op) |
2250 | return -ENOMEM; |
2251 | |
2252 | extent_op->flags_to_set = flags; |
2253 | extent_op->update_flags = true; |
2254 | extent_op->update_key = false; |
2255 | extent_op->level = level; |
2256 | |
2257 | ret = btrfs_add_delayed_extent_op(trans, bytenr: eb->start, num_bytes: eb->len, extent_op); |
2258 | if (ret) |
2259 | btrfs_free_delayed_extent_op(op: extent_op); |
2260 | return ret; |
2261 | } |
2262 | |
2263 | static noinline int check_delayed_ref(struct btrfs_root *root, |
2264 | struct btrfs_path *path, |
2265 | u64 objectid, u64 offset, u64 bytenr) |
2266 | { |
2267 | struct btrfs_delayed_ref_head *head; |
2268 | struct btrfs_delayed_ref_node *ref; |
2269 | struct btrfs_delayed_data_ref *data_ref; |
2270 | struct btrfs_delayed_ref_root *delayed_refs; |
2271 | struct btrfs_transaction *cur_trans; |
2272 | struct rb_node *node; |
2273 | int ret = 0; |
2274 | |
2275 | spin_lock(lock: &root->fs_info->trans_lock); |
2276 | cur_trans = root->fs_info->running_transaction; |
2277 | if (cur_trans) |
2278 | refcount_inc(r: &cur_trans->use_count); |
2279 | spin_unlock(lock: &root->fs_info->trans_lock); |
2280 | if (!cur_trans) |
2281 | return 0; |
2282 | |
2283 | delayed_refs = &cur_trans->delayed_refs; |
2284 | spin_lock(lock: &delayed_refs->lock); |
2285 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
2286 | if (!head) { |
2287 | spin_unlock(lock: &delayed_refs->lock); |
2288 | btrfs_put_transaction(transaction: cur_trans); |
2289 | return 0; |
2290 | } |
2291 | |
2292 | if (!mutex_trylock(lock: &head->mutex)) { |
2293 | if (path->nowait) { |
2294 | spin_unlock(lock: &delayed_refs->lock); |
2295 | btrfs_put_transaction(transaction: cur_trans); |
2296 | return -EAGAIN; |
2297 | } |
2298 | |
2299 | refcount_inc(r: &head->refs); |
2300 | spin_unlock(lock: &delayed_refs->lock); |
2301 | |
2302 | btrfs_release_path(p: path); |
2303 | |
2304 | /* |
2305 | * Mutex was contended, block until it's released and let |
2306 | * caller try again |
2307 | */ |
2308 | mutex_lock(&head->mutex); |
2309 | mutex_unlock(lock: &head->mutex); |
2310 | btrfs_put_delayed_ref_head(head); |
2311 | btrfs_put_transaction(transaction: cur_trans); |
2312 | return -EAGAIN; |
2313 | } |
2314 | spin_unlock(lock: &delayed_refs->lock); |
2315 | |
2316 | spin_lock(lock: &head->lock); |
2317 | /* |
2318 | * XXX: We should replace this with a proper search function in the |
2319 | * future. |
2320 | */ |
2321 | for (node = rb_first_cached(&head->ref_tree); node; |
2322 | node = rb_next(node)) { |
2323 | ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node); |
2324 | /* If it's a shared ref we know a cross reference exists */ |
2325 | if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) { |
2326 | ret = 1; |
2327 | break; |
2328 | } |
2329 | |
2330 | data_ref = btrfs_delayed_node_to_data_ref(node: ref); |
2331 | |
2332 | /* |
2333 | * If our ref doesn't match the one we're currently looking at |
2334 | * then we have a cross reference. |
2335 | */ |
2336 | if (data_ref->root != root->root_key.objectid || |
2337 | data_ref->objectid != objectid || |
2338 | data_ref->offset != offset) { |
2339 | ret = 1; |
2340 | break; |
2341 | } |
2342 | } |
2343 | spin_unlock(lock: &head->lock); |
2344 | mutex_unlock(lock: &head->mutex); |
2345 | btrfs_put_transaction(transaction: cur_trans); |
2346 | return ret; |
2347 | } |
2348 | |
2349 | static noinline int check_committed_ref(struct btrfs_root *root, |
2350 | struct btrfs_path *path, |
2351 | u64 objectid, u64 offset, u64 bytenr, |
2352 | bool strict) |
2353 | { |
2354 | struct btrfs_fs_info *fs_info = root->fs_info; |
2355 | struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr); |
2356 | struct extent_buffer *leaf; |
2357 | struct btrfs_extent_data_ref *ref; |
2358 | struct btrfs_extent_inline_ref *iref; |
2359 | struct btrfs_extent_item *ei; |
2360 | struct btrfs_key key; |
2361 | u32 item_size; |
2362 | u32 expected_size; |
2363 | int type; |
2364 | int ret; |
2365 | |
2366 | key.objectid = bytenr; |
2367 | key.offset = (u64)-1; |
2368 | key.type = BTRFS_EXTENT_ITEM_KEY; |
2369 | |
2370 | ret = btrfs_search_slot(NULL, root: extent_root, key: &key, p: path, ins_len: 0, cow: 0); |
2371 | if (ret < 0) |
2372 | goto out; |
2373 | BUG_ON(ret == 0); /* Corruption */ |
2374 | |
2375 | ret = -ENOENT; |
2376 | if (path->slots[0] == 0) |
2377 | goto out; |
2378 | |
2379 | path->slots[0]--; |
2380 | leaf = path->nodes[0]; |
2381 | btrfs_item_key_to_cpu(eb: leaf, cpu_key: &key, nr: path->slots[0]); |
2382 | |
2383 | if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY) |
2384 | goto out; |
2385 | |
2386 | ret = 1; |
2387 | item_size = btrfs_item_size(eb: leaf, slot: path->slots[0]); |
2388 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
2389 | expected_size = sizeof(*ei) + btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY); |
2390 | |
2391 | /* No inline refs; we need to bail before checking for owner ref. */ |
2392 | if (item_size == sizeof(*ei)) |
2393 | goto out; |
2394 | |
2395 | /* Check for an owner ref; skip over it to the real inline refs. */ |
2396 | iref = (struct btrfs_extent_inline_ref *)(ei + 1); |
2397 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
2398 | if (btrfs_fs_incompat(fs_info, SIMPLE_QUOTA) && type == BTRFS_EXTENT_OWNER_REF_KEY) { |
2399 | expected_size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY); |
2400 | iref = (struct btrfs_extent_inline_ref *)(iref + 1); |
2401 | } |
2402 | |
2403 | /* If extent item has more than 1 inline ref then it's shared */ |
2404 | if (item_size != expected_size) |
2405 | goto out; |
2406 | |
2407 | /* |
2408 | * If extent created before last snapshot => it's shared unless the |
2409 | * snapshot has been deleted. Use the heuristic if strict is false. |
2410 | */ |
2411 | if (!strict && |
2412 | (btrfs_extent_generation(eb: leaf, s: ei) <= |
2413 | btrfs_root_last_snapshot(s: &root->root_item))) |
2414 | goto out; |
2415 | |
2416 | /* If this extent has SHARED_DATA_REF then it's shared */ |
2417 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_DATA); |
2418 | if (type != BTRFS_EXTENT_DATA_REF_KEY) |
2419 | goto out; |
2420 | |
2421 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); |
2422 | if (btrfs_extent_refs(eb: leaf, s: ei) != |
2423 | btrfs_extent_data_ref_count(eb: leaf, s: ref) || |
2424 | btrfs_extent_data_ref_root(eb: leaf, s: ref) != |
2425 | root->root_key.objectid || |
2426 | btrfs_extent_data_ref_objectid(eb: leaf, s: ref) != objectid || |
2427 | btrfs_extent_data_ref_offset(eb: leaf, s: ref) != offset) |
2428 | goto out; |
2429 | |
2430 | ret = 0; |
2431 | out: |
2432 | return ret; |
2433 | } |
2434 | |
2435 | int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset, |
2436 | u64 bytenr, bool strict, struct btrfs_path *path) |
2437 | { |
2438 | int ret; |
2439 | |
2440 | do { |
2441 | ret = check_committed_ref(root, path, objectid, |
2442 | offset, bytenr, strict); |
2443 | if (ret && ret != -ENOENT) |
2444 | goto out; |
2445 | |
2446 | ret = check_delayed_ref(root, path, objectid, offset, bytenr); |
2447 | } while (ret == -EAGAIN); |
2448 | |
2449 | out: |
2450 | btrfs_release_path(p: path); |
2451 | if (btrfs_is_data_reloc_root(root)) |
2452 | WARN_ON(ret > 0); |
2453 | return ret; |
2454 | } |
2455 | |
2456 | static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, |
2457 | struct btrfs_root *root, |
2458 | struct extent_buffer *buf, |
2459 | int full_backref, int inc) |
2460 | { |
2461 | struct btrfs_fs_info *fs_info = root->fs_info; |
2462 | u64 bytenr; |
2463 | u64 num_bytes; |
2464 | u64 parent; |
2465 | u64 ref_root; |
2466 | u32 nritems; |
2467 | struct btrfs_key key; |
2468 | struct btrfs_file_extent_item *fi; |
2469 | struct btrfs_ref generic_ref = { 0 }; |
2470 | bool for_reloc = btrfs_header_flag(eb: buf, BTRFS_HEADER_FLAG_RELOC); |
2471 | int i; |
2472 | int action; |
2473 | int level; |
2474 | int ret = 0; |
2475 | |
2476 | if (btrfs_is_testing(fs_info)) |
2477 | return 0; |
2478 | |
2479 | ref_root = btrfs_header_owner(eb: buf); |
2480 | nritems = btrfs_header_nritems(eb: buf); |
2481 | level = btrfs_header_level(eb: buf); |
2482 | |
2483 | if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state) && level == 0) |
2484 | return 0; |
2485 | |
2486 | if (full_backref) |
2487 | parent = buf->start; |
2488 | else |
2489 | parent = 0; |
2490 | if (inc) |
2491 | action = BTRFS_ADD_DELAYED_REF; |
2492 | else |
2493 | action = BTRFS_DROP_DELAYED_REF; |
2494 | |
2495 | for (i = 0; i < nritems; i++) { |
2496 | if (level == 0) { |
2497 | btrfs_item_key_to_cpu(eb: buf, cpu_key: &key, nr: i); |
2498 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
2499 | continue; |
2500 | fi = btrfs_item_ptr(buf, i, |
2501 | struct btrfs_file_extent_item); |
2502 | if (btrfs_file_extent_type(eb: buf, s: fi) == |
2503 | BTRFS_FILE_EXTENT_INLINE) |
2504 | continue; |
2505 | bytenr = btrfs_file_extent_disk_bytenr(eb: buf, s: fi); |
2506 | if (bytenr == 0) |
2507 | continue; |
2508 | |
2509 | num_bytes = btrfs_file_extent_disk_num_bytes(eb: buf, s: fi); |
2510 | key.offset -= btrfs_file_extent_offset(eb: buf, s: fi); |
2511 | btrfs_init_generic_ref(generic_ref: &generic_ref, action, bytenr, |
2512 | len: num_bytes, parent, owning_root: ref_root); |
2513 | btrfs_init_data_ref(generic_ref: &generic_ref, ref_root, ino: key.objectid, |
2514 | offset: key.offset, mod_root: root->root_key.objectid, |
2515 | skip_qgroup: for_reloc); |
2516 | if (inc) |
2517 | ret = btrfs_inc_extent_ref(trans, generic_ref: &generic_ref); |
2518 | else |
2519 | ret = btrfs_free_extent(trans, ref: &generic_ref); |
2520 | if (ret) |
2521 | goto fail; |
2522 | } else { |
2523 | bytenr = btrfs_node_blockptr(eb: buf, nr: i); |
2524 | num_bytes = fs_info->nodesize; |
2525 | /* We don't know the owning_root, use 0. */ |
2526 | btrfs_init_generic_ref(generic_ref: &generic_ref, action, bytenr, |
2527 | len: num_bytes, parent, owning_root: 0); |
2528 | btrfs_init_tree_ref(generic_ref: &generic_ref, level: level - 1, root: ref_root, |
2529 | mod_root: root->root_key.objectid, skip_qgroup: for_reloc); |
2530 | if (inc) |
2531 | ret = btrfs_inc_extent_ref(trans, generic_ref: &generic_ref); |
2532 | else |
2533 | ret = btrfs_free_extent(trans, ref: &generic_ref); |
2534 | if (ret) |
2535 | goto fail; |
2536 | } |
2537 | } |
2538 | return 0; |
2539 | fail: |
2540 | return ret; |
2541 | } |
2542 | |
2543 | int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
2544 | struct extent_buffer *buf, int full_backref) |
2545 | { |
2546 | return __btrfs_mod_ref(trans, root, buf, full_backref, inc: 1); |
2547 | } |
2548 | |
2549 | int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
2550 | struct extent_buffer *buf, int full_backref) |
2551 | { |
2552 | return __btrfs_mod_ref(trans, root, buf, full_backref, inc: 0); |
2553 | } |
2554 | |
2555 | static u64 get_alloc_profile_by_root(struct btrfs_root *root, int data) |
2556 | { |
2557 | struct btrfs_fs_info *fs_info = root->fs_info; |
2558 | u64 flags; |
2559 | u64 ret; |
2560 | |
2561 | if (data) |
2562 | flags = BTRFS_BLOCK_GROUP_DATA; |
2563 | else if (root == fs_info->chunk_root) |
2564 | flags = BTRFS_BLOCK_GROUP_SYSTEM; |
2565 | else |
2566 | flags = BTRFS_BLOCK_GROUP_METADATA; |
2567 | |
2568 | ret = btrfs_get_alloc_profile(fs_info, orig_flags: flags); |
2569 | return ret; |
2570 | } |
2571 | |
2572 | static u64 first_logical_byte(struct btrfs_fs_info *fs_info) |
2573 | { |
2574 | struct rb_node *leftmost; |
2575 | u64 bytenr = 0; |
2576 | |
2577 | read_lock(&fs_info->block_group_cache_lock); |
2578 | /* Get the block group with the lowest logical start address. */ |
2579 | leftmost = rb_first_cached(&fs_info->block_group_cache_tree); |
2580 | if (leftmost) { |
2581 | struct btrfs_block_group *bg; |
2582 | |
2583 | bg = rb_entry(leftmost, struct btrfs_block_group, cache_node); |
2584 | bytenr = bg->start; |
2585 | } |
2586 | read_unlock(&fs_info->block_group_cache_lock); |
2587 | |
2588 | return bytenr; |
2589 | } |
2590 | |
2591 | static int pin_down_extent(struct btrfs_trans_handle *trans, |
2592 | struct btrfs_block_group *cache, |
2593 | u64 bytenr, u64 num_bytes, int reserved) |
2594 | { |
2595 | struct btrfs_fs_info *fs_info = cache->fs_info; |
2596 | |
2597 | spin_lock(lock: &cache->space_info->lock); |
2598 | spin_lock(lock: &cache->lock); |
2599 | cache->pinned += num_bytes; |
2600 | btrfs_space_info_update_bytes_pinned(fs_info, sinfo: cache->space_info, |
2601 | bytes: num_bytes); |
2602 | if (reserved) { |
2603 | cache->reserved -= num_bytes; |
2604 | cache->space_info->bytes_reserved -= num_bytes; |
2605 | } |
2606 | spin_unlock(lock: &cache->lock); |
2607 | spin_unlock(lock: &cache->space_info->lock); |
2608 | |
2609 | set_extent_bit(tree: &trans->transaction->pinned_extents, start: bytenr, |
2610 | end: bytenr + num_bytes - 1, bits: EXTENT_DIRTY, NULL); |
2611 | return 0; |
2612 | } |
2613 | |
2614 | int btrfs_pin_extent(struct btrfs_trans_handle *trans, |
2615 | u64 bytenr, u64 num_bytes, int reserved) |
2616 | { |
2617 | struct btrfs_block_group *cache; |
2618 | |
2619 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr); |
2620 | BUG_ON(!cache); /* Logic error */ |
2621 | |
2622 | pin_down_extent(trans, cache, bytenr, num_bytes, reserved); |
2623 | |
2624 | btrfs_put_block_group(cache); |
2625 | return 0; |
2626 | } |
2627 | |
2628 | int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans, |
2629 | const struct extent_buffer *eb) |
2630 | { |
2631 | struct btrfs_block_group *cache; |
2632 | int ret; |
2633 | |
2634 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr: eb->start); |
2635 | if (!cache) |
2636 | return -EINVAL; |
2637 | |
2638 | /* |
2639 | * Fully cache the free space first so that our pin removes the free space |
2640 | * from the cache. |
2641 | */ |
2642 | ret = btrfs_cache_block_group(cache, wait: true); |
2643 | if (ret) |
2644 | goto out; |
2645 | |
2646 | pin_down_extent(trans, cache, bytenr: eb->start, num_bytes: eb->len, reserved: 0); |
2647 | |
2648 | /* remove us from the free space cache (if we're there at all) */ |
2649 | ret = btrfs_remove_free_space(block_group: cache, bytenr: eb->start, size: eb->len); |
2650 | out: |
2651 | btrfs_put_block_group(cache); |
2652 | return ret; |
2653 | } |
2654 | |
2655 | static int __exclude_logged_extent(struct btrfs_fs_info *fs_info, |
2656 | u64 start, u64 num_bytes) |
2657 | { |
2658 | int ret; |
2659 | struct btrfs_block_group *block_group; |
2660 | |
2661 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
2662 | if (!block_group) |
2663 | return -EINVAL; |
2664 | |
2665 | ret = btrfs_cache_block_group(cache: block_group, wait: true); |
2666 | if (ret) |
2667 | goto out; |
2668 | |
2669 | ret = btrfs_remove_free_space(block_group, bytenr: start, size: num_bytes); |
2670 | out: |
2671 | btrfs_put_block_group(cache: block_group); |
2672 | return ret; |
2673 | } |
2674 | |
2675 | int btrfs_exclude_logged_extents(struct extent_buffer *eb) |
2676 | { |
2677 | struct btrfs_fs_info *fs_info = eb->fs_info; |
2678 | struct btrfs_file_extent_item *item; |
2679 | struct btrfs_key key; |
2680 | int found_type; |
2681 | int i; |
2682 | int ret = 0; |
2683 | |
2684 | if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) |
2685 | return 0; |
2686 | |
2687 | for (i = 0; i < btrfs_header_nritems(eb); i++) { |
2688 | btrfs_item_key_to_cpu(eb, cpu_key: &key, nr: i); |
2689 | if (key.type != BTRFS_EXTENT_DATA_KEY) |
2690 | continue; |
2691 | item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item); |
2692 | found_type = btrfs_file_extent_type(eb, s: item); |
2693 | if (found_type == BTRFS_FILE_EXTENT_INLINE) |
2694 | continue; |
2695 | if (btrfs_file_extent_disk_bytenr(eb, s: item) == 0) |
2696 | continue; |
2697 | key.objectid = btrfs_file_extent_disk_bytenr(eb, s: item); |
2698 | key.offset = btrfs_file_extent_disk_num_bytes(eb, s: item); |
2699 | ret = __exclude_logged_extent(fs_info, start: key.objectid, num_bytes: key.offset); |
2700 | if (ret) |
2701 | break; |
2702 | } |
2703 | |
2704 | return ret; |
2705 | } |
2706 | |
2707 | static void |
2708 | btrfs_inc_block_group_reservations(struct btrfs_block_group *bg) |
2709 | { |
2710 | atomic_inc(v: &bg->reservations); |
2711 | } |
2712 | |
2713 | /* |
2714 | * Returns the free cluster for the given space info and sets empty_cluster to |
2715 | * what it should be based on the mount options. |
2716 | */ |
2717 | static struct btrfs_free_cluster * |
2718 | fetch_cluster_info(struct btrfs_fs_info *fs_info, |
2719 | struct btrfs_space_info *space_info, u64 *empty_cluster) |
2720 | { |
2721 | struct btrfs_free_cluster *ret = NULL; |
2722 | |
2723 | *empty_cluster = 0; |
2724 | if (btrfs_mixed_space_info(space_info)) |
2725 | return ret; |
2726 | |
2727 | if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) { |
2728 | ret = &fs_info->meta_alloc_cluster; |
2729 | if (btrfs_test_opt(fs_info, SSD)) |
2730 | *empty_cluster = SZ_2M; |
2731 | else |
2732 | *empty_cluster = SZ_64K; |
2733 | } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && |
2734 | btrfs_test_opt(fs_info, SSD_SPREAD)) { |
2735 | *empty_cluster = SZ_2M; |
2736 | ret = &fs_info->data_alloc_cluster; |
2737 | } |
2738 | |
2739 | return ret; |
2740 | } |
2741 | |
2742 | static int unpin_extent_range(struct btrfs_fs_info *fs_info, |
2743 | u64 start, u64 end, |
2744 | const bool return_free_space) |
2745 | { |
2746 | struct btrfs_block_group *cache = NULL; |
2747 | struct btrfs_space_info *space_info; |
2748 | struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; |
2749 | struct btrfs_free_cluster *cluster = NULL; |
2750 | u64 len; |
2751 | u64 total_unpinned = 0; |
2752 | u64 empty_cluster = 0; |
2753 | bool readonly; |
2754 | |
2755 | while (start <= end) { |
2756 | readonly = false; |
2757 | if (!cache || |
2758 | start >= cache->start + cache->length) { |
2759 | if (cache) |
2760 | btrfs_put_block_group(cache); |
2761 | total_unpinned = 0; |
2762 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
2763 | BUG_ON(!cache); /* Logic error */ |
2764 | |
2765 | cluster = fetch_cluster_info(fs_info, |
2766 | space_info: cache->space_info, |
2767 | empty_cluster: &empty_cluster); |
2768 | empty_cluster <<= 1; |
2769 | } |
2770 | |
2771 | len = cache->start + cache->length - start; |
2772 | len = min(len, end + 1 - start); |
2773 | |
2774 | if (return_free_space) |
2775 | btrfs_add_free_space(block_group: cache, bytenr: start, size: len); |
2776 | |
2777 | start += len; |
2778 | total_unpinned += len; |
2779 | space_info = cache->space_info; |
2780 | |
2781 | /* |
2782 | * If this space cluster has been marked as fragmented and we've |
2783 | * unpinned enough in this block group to potentially allow a |
2784 | * cluster to be created inside of it go ahead and clear the |
2785 | * fragmented check. |
2786 | */ |
2787 | if (cluster && cluster->fragmented && |
2788 | total_unpinned > empty_cluster) { |
2789 | spin_lock(lock: &cluster->lock); |
2790 | cluster->fragmented = 0; |
2791 | spin_unlock(lock: &cluster->lock); |
2792 | } |
2793 | |
2794 | spin_lock(lock: &space_info->lock); |
2795 | spin_lock(lock: &cache->lock); |
2796 | cache->pinned -= len; |
2797 | btrfs_space_info_update_bytes_pinned(fs_info, sinfo: space_info, bytes: -len); |
2798 | space_info->max_extent_size = 0; |
2799 | if (cache->ro) { |
2800 | space_info->bytes_readonly += len; |
2801 | readonly = true; |
2802 | } else if (btrfs_is_zoned(fs_info)) { |
2803 | /* Need reset before reusing in a zoned block group */ |
2804 | space_info->bytes_zone_unusable += len; |
2805 | readonly = true; |
2806 | } |
2807 | spin_unlock(lock: &cache->lock); |
2808 | if (!readonly && return_free_space && |
2809 | global_rsv->space_info == space_info) { |
2810 | spin_lock(lock: &global_rsv->lock); |
2811 | if (!global_rsv->full) { |
2812 | u64 to_add = min(len, global_rsv->size - |
2813 | global_rsv->reserved); |
2814 | |
2815 | global_rsv->reserved += to_add; |
2816 | btrfs_space_info_update_bytes_may_use(fs_info, |
2817 | sinfo: space_info, bytes: to_add); |
2818 | if (global_rsv->reserved >= global_rsv->size) |
2819 | global_rsv->full = 1; |
2820 | len -= to_add; |
2821 | } |
2822 | spin_unlock(lock: &global_rsv->lock); |
2823 | } |
2824 | /* Add to any tickets we may have */ |
2825 | if (!readonly && return_free_space && len) |
2826 | btrfs_try_granting_tickets(fs_info, space_info); |
2827 | spin_unlock(lock: &space_info->lock); |
2828 | } |
2829 | |
2830 | if (cache) |
2831 | btrfs_put_block_group(cache); |
2832 | return 0; |
2833 | } |
2834 | |
2835 | int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans) |
2836 | { |
2837 | struct btrfs_fs_info *fs_info = trans->fs_info; |
2838 | struct btrfs_block_group *block_group, *tmp; |
2839 | struct list_head *deleted_bgs; |
2840 | struct extent_io_tree *unpin; |
2841 | u64 start; |
2842 | u64 end; |
2843 | int ret; |
2844 | |
2845 | unpin = &trans->transaction->pinned_extents; |
2846 | |
2847 | while (!TRANS_ABORTED(trans)) { |
2848 | struct extent_state *cached_state = NULL; |
2849 | |
2850 | mutex_lock(&fs_info->unused_bg_unpin_mutex); |
2851 | if (!find_first_extent_bit(tree: unpin, start: 0, start_ret: &start, end_ret: &end, |
2852 | bits: EXTENT_DIRTY, cached_state: &cached_state)) { |
2853 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
2854 | break; |
2855 | } |
2856 | |
2857 | if (btrfs_test_opt(fs_info, DISCARD_SYNC)) |
2858 | ret = btrfs_discard_extent(fs_info, bytenr: start, |
2859 | num_bytes: end + 1 - start, NULL); |
2860 | |
2861 | clear_extent_dirty(tree: unpin, start, end, cached: &cached_state); |
2862 | unpin_extent_range(fs_info, start, end, return_free_space: true); |
2863 | mutex_unlock(lock: &fs_info->unused_bg_unpin_mutex); |
2864 | free_extent_state(state: cached_state); |
2865 | cond_resched(); |
2866 | } |
2867 | |
2868 | if (btrfs_test_opt(fs_info, DISCARD_ASYNC)) { |
2869 | btrfs_discard_calc_delay(discard_ctl: &fs_info->discard_ctl); |
2870 | btrfs_discard_schedule_work(discard_ctl: &fs_info->discard_ctl, override: true); |
2871 | } |
2872 | |
2873 | /* |
2874 | * Transaction is finished. We don't need the lock anymore. We |
2875 | * do need to clean up the block groups in case of a transaction |
2876 | * abort. |
2877 | */ |
2878 | deleted_bgs = &trans->transaction->deleted_bgs; |
2879 | list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) { |
2880 | u64 trimmed = 0; |
2881 | |
2882 | ret = -EROFS; |
2883 | if (!TRANS_ABORTED(trans)) |
2884 | ret = btrfs_discard_extent(fs_info, |
2885 | bytenr: block_group->start, |
2886 | num_bytes: block_group->length, |
2887 | actual_bytes: &trimmed); |
2888 | |
2889 | list_del_init(entry: &block_group->bg_list); |
2890 | btrfs_unfreeze_block_group(cache: block_group); |
2891 | btrfs_put_block_group(cache: block_group); |
2892 | |
2893 | if (ret) { |
2894 | const char *errstr = btrfs_decode_error(error: ret); |
2895 | btrfs_warn(fs_info, |
2896 | "discard failed while removing blockgroup: errno=%d %s" , |
2897 | ret, errstr); |
2898 | } |
2899 | } |
2900 | |
2901 | return 0; |
2902 | } |
2903 | |
2904 | /* |
2905 | * Parse an extent item's inline extents looking for a simple quotas owner ref. |
2906 | * |
2907 | * @fs_info: the btrfs_fs_info for this mount |
2908 | * @leaf: a leaf in the extent tree containing the extent item |
2909 | * @slot: the slot in the leaf where the extent item is found |
2910 | * |
2911 | * Returns the objectid of the root that originally allocated the extent item |
2912 | * if the inline owner ref is expected and present, otherwise 0. |
2913 | * |
2914 | * If an extent item has an owner ref item, it will be the first inline ref |
2915 | * item. Therefore the logic is to check whether there are any inline ref |
2916 | * items, then check the type of the first one. |
2917 | */ |
2918 | u64 btrfs_get_extent_owner_root(struct btrfs_fs_info *fs_info, |
2919 | struct extent_buffer *leaf, int slot) |
2920 | { |
2921 | struct btrfs_extent_item *ei; |
2922 | struct btrfs_extent_inline_ref *iref; |
2923 | struct btrfs_extent_owner_ref *oref; |
2924 | unsigned long ptr; |
2925 | unsigned long end; |
2926 | int type; |
2927 | |
2928 | if (!btrfs_fs_incompat(fs_info, SIMPLE_QUOTA)) |
2929 | return 0; |
2930 | |
2931 | ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item); |
2932 | ptr = (unsigned long)(ei + 1); |
2933 | end = (unsigned long)ei + btrfs_item_size(eb: leaf, slot); |
2934 | |
2935 | /* No inline ref items of any kind, can't check type. */ |
2936 | if (ptr == end) |
2937 | return 0; |
2938 | |
2939 | iref = (struct btrfs_extent_inline_ref *)ptr; |
2940 | type = btrfs_get_extent_inline_ref_type(eb: leaf, iref, is_data: BTRFS_REF_TYPE_ANY); |
2941 | |
2942 | /* We found an owner ref, get the root out of it. */ |
2943 | if (type == BTRFS_EXTENT_OWNER_REF_KEY) { |
2944 | oref = (struct btrfs_extent_owner_ref *)(&iref->offset); |
2945 | return btrfs_extent_owner_ref_root_id(eb: leaf, s: oref); |
2946 | } |
2947 | |
2948 | /* We have inline refs, but not an owner ref. */ |
2949 | return 0; |
2950 | } |
2951 | |
2952 | static int do_free_extent_accounting(struct btrfs_trans_handle *trans, |
2953 | u64 bytenr, struct btrfs_squota_delta *delta) |
2954 | { |
2955 | int ret; |
2956 | u64 num_bytes = delta->num_bytes; |
2957 | |
2958 | if (delta->is_data) { |
2959 | struct btrfs_root *csum_root; |
2960 | |
2961 | csum_root = btrfs_csum_root(fs_info: trans->fs_info, bytenr); |
2962 | ret = btrfs_del_csums(trans, root: csum_root, bytenr, len: num_bytes); |
2963 | if (ret) { |
2964 | btrfs_abort_transaction(trans, ret); |
2965 | return ret; |
2966 | } |
2967 | |
2968 | ret = btrfs_delete_raid_extent(trans, start: bytenr, length: num_bytes); |
2969 | if (ret) { |
2970 | btrfs_abort_transaction(trans, ret); |
2971 | return ret; |
2972 | } |
2973 | } |
2974 | |
2975 | ret = btrfs_record_squota_delta(fs_info: trans->fs_info, delta); |
2976 | if (ret) { |
2977 | btrfs_abort_transaction(trans, ret); |
2978 | return ret; |
2979 | } |
2980 | |
2981 | ret = add_to_free_space_tree(trans, start: bytenr, size: num_bytes); |
2982 | if (ret) { |
2983 | btrfs_abort_transaction(trans, ret); |
2984 | return ret; |
2985 | } |
2986 | |
2987 | ret = btrfs_update_block_group(trans, bytenr, num_bytes, alloc: false); |
2988 | if (ret) |
2989 | btrfs_abort_transaction(trans, ret); |
2990 | |
2991 | return ret; |
2992 | } |
2993 | |
2994 | #define abort_and_dump(trans, path, fmt, args...) \ |
2995 | ({ \ |
2996 | btrfs_abort_transaction(trans, -EUCLEAN); \ |
2997 | btrfs_print_leaf(path->nodes[0]); \ |
2998 | btrfs_crit(trans->fs_info, fmt, ##args); \ |
2999 | }) |
3000 | |
3001 | /* |
3002 | * Drop one or more refs of @node. |
3003 | * |
3004 | * 1. Locate the extent refs. |
3005 | * It's either inline in EXTENT/METADATA_ITEM or in keyed SHARED_* item. |
3006 | * Locate it, then reduce the refs number or remove the ref line completely. |
3007 | * |
3008 | * 2. Update the refs count in EXTENT/METADATA_ITEM |
3009 | * |
3010 | * Inline backref case: |
3011 | * |
3012 | * in extent tree we have: |
3013 | * |
3014 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 |
3015 | * refs 2 gen 6 flags DATA |
3016 | * extent data backref root FS_TREE objectid 258 offset 0 count 1 |
3017 | * extent data backref root FS_TREE objectid 257 offset 0 count 1 |
3018 | * |
3019 | * This function gets called with: |
3020 | * |
3021 | * node->bytenr = 13631488 |
3022 | * node->num_bytes = 1048576 |
3023 | * root_objectid = FS_TREE |
3024 | * owner_objectid = 257 |
3025 | * owner_offset = 0 |
3026 | * refs_to_drop = 1 |
3027 | * |
3028 | * Then we should get some like: |
3029 | * |
3030 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 16201 itemsize 82 |
3031 | * refs 1 gen 6 flags DATA |
3032 | * extent data backref root FS_TREE objectid 258 offset 0 count 1 |
3033 | * |
3034 | * Keyed backref case: |
3035 | * |
3036 | * in extent tree we have: |
3037 | * |
3038 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 |
3039 | * refs 754 gen 6 flags DATA |
3040 | * [...] |
3041 | * item 2 key (13631488 EXTENT_DATA_REF <HASH>) itemoff 3915 itemsize 28 |
3042 | * extent data backref root FS_TREE objectid 866 offset 0 count 1 |
3043 | * |
3044 | * This function get called with: |
3045 | * |
3046 | * node->bytenr = 13631488 |
3047 | * node->num_bytes = 1048576 |
3048 | * root_objectid = FS_TREE |
3049 | * owner_objectid = 866 |
3050 | * owner_offset = 0 |
3051 | * refs_to_drop = 1 |
3052 | * |
3053 | * Then we should get some like: |
3054 | * |
3055 | * item 0 key (13631488 EXTENT_ITEM 1048576) itemoff 3971 itemsize 24 |
3056 | * refs 753 gen 6 flags DATA |
3057 | * |
3058 | * And that (13631488 EXTENT_DATA_REF <HASH>) gets removed. |
3059 | */ |
3060 | static int __btrfs_free_extent(struct btrfs_trans_handle *trans, |
3061 | struct btrfs_delayed_ref_head *href, |
3062 | struct btrfs_delayed_ref_node *node, u64 parent, |
3063 | u64 root_objectid, u64 owner_objectid, |
3064 | u64 owner_offset, |
3065 | struct btrfs_delayed_extent_op *extent_op) |
3066 | { |
3067 | struct btrfs_fs_info *info = trans->fs_info; |
3068 | struct btrfs_key key; |
3069 | struct btrfs_path *path; |
3070 | struct btrfs_root *extent_root; |
3071 | struct extent_buffer *leaf; |
3072 | struct btrfs_extent_item *ei; |
3073 | struct btrfs_extent_inline_ref *iref; |
3074 | int ret; |
3075 | int is_data; |
3076 | int extent_slot = 0; |
3077 | int found_extent = 0; |
3078 | int num_to_del = 1; |
3079 | int refs_to_drop = node->ref_mod; |
3080 | u32 item_size; |
3081 | u64 refs; |
3082 | u64 bytenr = node->bytenr; |
3083 | u64 num_bytes = node->num_bytes; |
3084 | bool skinny_metadata = btrfs_fs_incompat(info, SKINNY_METADATA); |
3085 | u64 delayed_ref_root = href->owning_root; |
3086 | |
3087 | extent_root = btrfs_extent_root(fs_info: info, bytenr); |
3088 | ASSERT(extent_root); |
3089 | |
3090 | path = btrfs_alloc_path(); |
3091 | if (!path) |
3092 | return -ENOMEM; |
3093 | |
3094 | is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID; |
3095 | |
3096 | if (!is_data && refs_to_drop != 1) { |
3097 | btrfs_crit(info, |
3098 | "invalid refs_to_drop, dropping more than 1 refs for tree block %llu refs_to_drop %u" , |
3099 | node->bytenr, refs_to_drop); |
3100 | ret = -EINVAL; |
3101 | btrfs_abort_transaction(trans, ret); |
3102 | goto out; |
3103 | } |
3104 | |
3105 | if (is_data) |
3106 | skinny_metadata = false; |
3107 | |
3108 | ret = lookup_extent_backref(trans, path, ref_ret: &iref, bytenr, num_bytes, |
3109 | parent, root_objectid, owner: owner_objectid, |
3110 | offset: owner_offset); |
3111 | if (ret == 0) { |
3112 | /* |
3113 | * Either the inline backref or the SHARED_DATA_REF/ |
3114 | * SHARED_BLOCK_REF is found |
3115 | * |
3116 | * Here is a quick path to locate EXTENT/METADATA_ITEM. |
3117 | * It's possible the EXTENT/METADATA_ITEM is near current slot. |
3118 | */ |
3119 | extent_slot = path->slots[0]; |
3120 | while (extent_slot >= 0) { |
3121 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
3122 | nr: extent_slot); |
3123 | if (key.objectid != bytenr) |
3124 | break; |
3125 | if (key.type == BTRFS_EXTENT_ITEM_KEY && |
3126 | key.offset == num_bytes) { |
3127 | found_extent = 1; |
3128 | break; |
3129 | } |
3130 | if (key.type == BTRFS_METADATA_ITEM_KEY && |
3131 | key.offset == owner_objectid) { |
3132 | found_extent = 1; |
3133 | break; |
3134 | } |
3135 | |
3136 | /* Quick path didn't find the EXTEMT/METADATA_ITEM */ |
3137 | if (path->slots[0] - extent_slot > 5) |
3138 | break; |
3139 | extent_slot--; |
3140 | } |
3141 | |
3142 | if (!found_extent) { |
3143 | if (iref) { |
3144 | abort_and_dump(trans, path, |
3145 | "invalid iref slot %u, no EXTENT/METADATA_ITEM found but has inline extent ref" , |
3146 | path->slots[0]); |
3147 | ret = -EUCLEAN; |
3148 | goto out; |
3149 | } |
3150 | /* Must be SHARED_* item, remove the backref first */ |
3151 | ret = remove_extent_backref(trans, root: extent_root, path, |
3152 | NULL, refs_to_drop, is_data); |
3153 | if (ret) { |
3154 | btrfs_abort_transaction(trans, ret); |
3155 | goto out; |
3156 | } |
3157 | btrfs_release_path(p: path); |
3158 | |
3159 | /* Slow path to locate EXTENT/METADATA_ITEM */ |
3160 | key.objectid = bytenr; |
3161 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3162 | key.offset = num_bytes; |
3163 | |
3164 | if (!is_data && skinny_metadata) { |
3165 | key.type = BTRFS_METADATA_ITEM_KEY; |
3166 | key.offset = owner_objectid; |
3167 | } |
3168 | |
3169 | ret = btrfs_search_slot(trans, root: extent_root, |
3170 | key: &key, p: path, ins_len: -1, cow: 1); |
3171 | if (ret > 0 && skinny_metadata && path->slots[0]) { |
3172 | /* |
3173 | * Couldn't find our skinny metadata item, |
3174 | * see if we have ye olde extent item. |
3175 | */ |
3176 | path->slots[0]--; |
3177 | btrfs_item_key_to_cpu(eb: path->nodes[0], cpu_key: &key, |
3178 | nr: path->slots[0]); |
3179 | if (key.objectid == bytenr && |
3180 | key.type == BTRFS_EXTENT_ITEM_KEY && |
3181 | key.offset == num_bytes) |
3182 | ret = 0; |
3183 | } |
3184 | |
3185 | if (ret > 0 && skinny_metadata) { |
3186 | skinny_metadata = false; |
3187 | key.objectid = bytenr; |
3188 | key.type = BTRFS_EXTENT_ITEM_KEY; |
3189 | key.offset = num_bytes; |
3190 | btrfs_release_path(p: path); |
3191 | ret = btrfs_search_slot(trans, root: extent_root, |
3192 | key: &key, p: path, ins_len: -1, cow: 1); |
3193 | } |
3194 | |
3195 | if (ret) { |
3196 | if (ret > 0) |
3197 | btrfs_print_leaf(l: path->nodes[0]); |
3198 | btrfs_err(info, |
3199 | "umm, got %d back from search, was looking for %llu, slot %d" , |
3200 | ret, bytenr, path->slots[0]); |
3201 | } |
3202 | if (ret < 0) { |
3203 | btrfs_abort_transaction(trans, ret); |
3204 | goto out; |
3205 | } |
3206 | extent_slot = path->slots[0]; |
3207 | } |
3208 | } else if (WARN_ON(ret == -ENOENT)) { |
3209 | abort_and_dump(trans, path, |
3210 | "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu slot %d" , |
3211 | bytenr, parent, root_objectid, owner_objectid, |
3212 | owner_offset, path->slots[0]); |
3213 | goto out; |
3214 | } else { |
3215 | btrfs_abort_transaction(trans, ret); |
3216 | goto out; |
3217 | } |
3218 | |
3219 | leaf = path->nodes[0]; |
3220 | item_size = btrfs_item_size(eb: leaf, slot: extent_slot); |
3221 | if (unlikely(item_size < sizeof(*ei))) { |
3222 | ret = -EUCLEAN; |
3223 | btrfs_err(trans->fs_info, |
3224 | "unexpected extent item size, has %u expect >= %zu" , |
3225 | item_size, sizeof(*ei)); |
3226 | btrfs_abort_transaction(trans, ret); |
3227 | goto out; |
3228 | } |
3229 | ei = btrfs_item_ptr(leaf, extent_slot, |
3230 | struct btrfs_extent_item); |
3231 | if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID && |
3232 | key.type == BTRFS_EXTENT_ITEM_KEY) { |
3233 | struct btrfs_tree_block_info *bi; |
3234 | |
3235 | if (item_size < sizeof(*ei) + sizeof(*bi)) { |
3236 | abort_and_dump(trans, path, |
3237 | "invalid extent item size for key (%llu, %u, %llu) slot %u owner %llu, has %u expect >= %zu" , |
3238 | key.objectid, key.type, key.offset, |
3239 | path->slots[0], owner_objectid, item_size, |
3240 | sizeof(*ei) + sizeof(*bi)); |
3241 | ret = -EUCLEAN; |
3242 | goto out; |
3243 | } |
3244 | bi = (struct btrfs_tree_block_info *)(ei + 1); |
3245 | WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi)); |
3246 | } |
3247 | |
3248 | refs = btrfs_extent_refs(eb: leaf, s: ei); |
3249 | if (refs < refs_to_drop) { |
3250 | abort_and_dump(trans, path, |
3251 | "trying to drop %d refs but we only have %llu for bytenr %llu slot %u" , |
3252 | refs_to_drop, refs, bytenr, path->slots[0]); |
3253 | ret = -EUCLEAN; |
3254 | goto out; |
3255 | } |
3256 | refs -= refs_to_drop; |
3257 | |
3258 | if (refs > 0) { |
3259 | if (extent_op) |
3260 | __run_delayed_extent_op(extent_op, leaf, ei); |
3261 | /* |
3262 | * In the case of inline back ref, reference count will |
3263 | * be updated by remove_extent_backref |
3264 | */ |
3265 | if (iref) { |
3266 | if (!found_extent) { |
3267 | abort_and_dump(trans, path, |
3268 | "invalid iref, got inlined extent ref but no EXTENT/METADATA_ITEM found, slot %u" , |
3269 | path->slots[0]); |
3270 | ret = -EUCLEAN; |
3271 | goto out; |
3272 | } |
3273 | } else { |
3274 | btrfs_set_extent_refs(eb: leaf, s: ei, val: refs); |
3275 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
3276 | } |
3277 | if (found_extent) { |
3278 | ret = remove_extent_backref(trans, root: extent_root, path, |
3279 | iref, refs_to_drop, is_data); |
3280 | if (ret) { |
3281 | btrfs_abort_transaction(trans, ret); |
3282 | goto out; |
3283 | } |
3284 | } |
3285 | } else { |
3286 | struct btrfs_squota_delta delta = { |
3287 | .root = delayed_ref_root, |
3288 | .num_bytes = num_bytes, |
3289 | .rsv_bytes = 0, |
3290 | .is_data = is_data, |
3291 | .is_inc = false, |
3292 | .generation = btrfs_extent_generation(eb: leaf, s: ei), |
3293 | }; |
3294 | |
3295 | /* In this branch refs == 1 */ |
3296 | if (found_extent) { |
3297 | if (is_data && refs_to_drop != |
3298 | extent_data_ref_count(path, iref)) { |
3299 | abort_and_dump(trans, path, |
3300 | "invalid refs_to_drop, current refs %u refs_to_drop %u slot %u" , |
3301 | extent_data_ref_count(path, iref), |
3302 | refs_to_drop, path->slots[0]); |
3303 | ret = -EUCLEAN; |
3304 | goto out; |
3305 | } |
3306 | if (iref) { |
3307 | if (path->slots[0] != extent_slot) { |
3308 | abort_and_dump(trans, path, |
3309 | "invalid iref, extent item key (%llu %u %llu) slot %u doesn't have wanted iref" , |
3310 | key.objectid, key.type, |
3311 | key.offset, path->slots[0]); |
3312 | ret = -EUCLEAN; |
3313 | goto out; |
3314 | } |
3315 | } else { |
3316 | /* |
3317 | * No inline ref, we must be at SHARED_* item, |
3318 | * And it's single ref, it must be: |
3319 | * | extent_slot ||extent_slot + 1| |
3320 | * [ EXTENT/METADATA_ITEM ][ SHARED_* ITEM ] |
3321 | */ |
3322 | if (path->slots[0] != extent_slot + 1) { |
3323 | abort_and_dump(trans, path, |
3324 | "invalid SHARED_* item slot %u, previous item is not EXTENT/METADATA_ITEM" , |
3325 | path->slots[0]); |
3326 | ret = -EUCLEAN; |
3327 | goto out; |
3328 | } |
3329 | path->slots[0] = extent_slot; |
3330 | num_to_del = 2; |
3331 | } |
3332 | } |
3333 | /* |
3334 | * We can't infer the data owner from the delayed ref, so we need |
3335 | * to try to get it from the owning ref item. |
3336 | * |
3337 | * If it is not present, then that extent was not written under |
3338 | * simple quotas mode, so we don't need to account for its deletion. |
3339 | */ |
3340 | if (is_data) |
3341 | delta.root = btrfs_get_extent_owner_root(fs_info: trans->fs_info, |
3342 | leaf, slot: extent_slot); |
3343 | |
3344 | ret = btrfs_del_items(trans, root: extent_root, path, slot: path->slots[0], |
3345 | nr: num_to_del); |
3346 | if (ret) { |
3347 | btrfs_abort_transaction(trans, ret); |
3348 | goto out; |
3349 | } |
3350 | btrfs_release_path(p: path); |
3351 | |
3352 | ret = do_free_extent_accounting(trans, bytenr, delta: &delta); |
3353 | } |
3354 | btrfs_release_path(p: path); |
3355 | |
3356 | out: |
3357 | btrfs_free_path(p: path); |
3358 | return ret; |
3359 | } |
3360 | |
3361 | /* |
3362 | * when we free an block, it is possible (and likely) that we free the last |
3363 | * delayed ref for that extent as well. This searches the delayed ref tree for |
3364 | * a given extent, and if there are no other delayed refs to be processed, it |
3365 | * removes it from the tree. |
3366 | */ |
3367 | static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, |
3368 | u64 bytenr) |
3369 | { |
3370 | struct btrfs_delayed_ref_head *head; |
3371 | struct btrfs_delayed_ref_root *delayed_refs; |
3372 | int ret = 0; |
3373 | |
3374 | delayed_refs = &trans->transaction->delayed_refs; |
3375 | spin_lock(lock: &delayed_refs->lock); |
3376 | head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); |
3377 | if (!head) |
3378 | goto out_delayed_unlock; |
3379 | |
3380 | spin_lock(lock: &head->lock); |
3381 | if (!RB_EMPTY_ROOT(&head->ref_tree.rb_root)) |
3382 | goto out; |
3383 | |
3384 | if (cleanup_extent_op(head) != NULL) |
3385 | goto out; |
3386 | |
3387 | /* |
3388 | * waiting for the lock here would deadlock. If someone else has it |
3389 | * locked they are already in the process of dropping it anyway |
3390 | */ |
3391 | if (!mutex_trylock(lock: &head->mutex)) |
3392 | goto out; |
3393 | |
3394 | btrfs_delete_ref_head(delayed_refs, head); |
3395 | head->processing = false; |
3396 | |
3397 | spin_unlock(lock: &head->lock); |
3398 | spin_unlock(lock: &delayed_refs->lock); |
3399 | |
3400 | BUG_ON(head->extent_op); |
3401 | if (head->must_insert_reserved) |
3402 | ret = 1; |
3403 | |
3404 | btrfs_cleanup_ref_head_accounting(fs_info: trans->fs_info, delayed_refs, head); |
3405 | mutex_unlock(lock: &head->mutex); |
3406 | btrfs_put_delayed_ref_head(head); |
3407 | return ret; |
3408 | out: |
3409 | spin_unlock(lock: &head->lock); |
3410 | |
3411 | out_delayed_unlock: |
3412 | spin_unlock(lock: &delayed_refs->lock); |
3413 | return 0; |
3414 | } |
3415 | |
3416 | void btrfs_free_tree_block(struct btrfs_trans_handle *trans, |
3417 | u64 root_id, |
3418 | struct extent_buffer *buf, |
3419 | u64 parent, int last_ref) |
3420 | { |
3421 | struct btrfs_fs_info *fs_info = trans->fs_info; |
3422 | struct btrfs_ref generic_ref = { 0 }; |
3423 | int ret; |
3424 | |
3425 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_DROP_DELAYED_REF, |
3426 | bytenr: buf->start, len: buf->len, parent, owning_root: btrfs_header_owner(eb: buf)); |
3427 | btrfs_init_tree_ref(generic_ref: &generic_ref, level: btrfs_header_level(eb: buf), |
3428 | root: root_id, mod_root: 0, skip_qgroup: false); |
3429 | |
3430 | if (root_id != BTRFS_TREE_LOG_OBJECTID) { |
3431 | btrfs_ref_tree_mod(fs_info, generic_ref: &generic_ref); |
3432 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: &generic_ref, NULL); |
3433 | BUG_ON(ret); /* -ENOMEM */ |
3434 | } |
3435 | |
3436 | if (last_ref && btrfs_header_generation(eb: buf) == trans->transid) { |
3437 | struct btrfs_block_group *cache; |
3438 | bool must_pin = false; |
3439 | |
3440 | if (root_id != BTRFS_TREE_LOG_OBJECTID) { |
3441 | ret = check_ref_cleanup(trans, bytenr: buf->start); |
3442 | if (!ret) { |
3443 | btrfs_redirty_list_add(trans: trans->transaction, eb: buf); |
3444 | goto out; |
3445 | } |
3446 | } |
3447 | |
3448 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: buf->start); |
3449 | |
3450 | if (btrfs_header_flag(eb: buf, BTRFS_HEADER_FLAG_WRITTEN)) { |
3451 | pin_down_extent(trans, cache, bytenr: buf->start, num_bytes: buf->len, reserved: 1); |
3452 | btrfs_put_block_group(cache); |
3453 | goto out; |
3454 | } |
3455 | |
3456 | /* |
3457 | * If there are tree mod log users we may have recorded mod log |
3458 | * operations for this node. If we re-allocate this node we |
3459 | * could replay operations on this node that happened when it |
3460 | * existed in a completely different root. For example if it |
3461 | * was part of root A, then was reallocated to root B, and we |
3462 | * are doing a btrfs_old_search_slot(root b), we could replay |
3463 | * operations that happened when the block was part of root A, |
3464 | * giving us an inconsistent view of the btree. |
3465 | * |
3466 | * We are safe from races here because at this point no other |
3467 | * node or root points to this extent buffer, so if after this |
3468 | * check a new tree mod log user joins we will not have an |
3469 | * existing log of operations on this node that we have to |
3470 | * contend with. |
3471 | */ |
3472 | if (test_bit(BTRFS_FS_TREE_MOD_LOG_USERS, &fs_info->flags)) |
3473 | must_pin = true; |
3474 | |
3475 | if (must_pin || btrfs_is_zoned(fs_info)) { |
3476 | btrfs_redirty_list_add(trans: trans->transaction, eb: buf); |
3477 | pin_down_extent(trans, cache, bytenr: buf->start, num_bytes: buf->len, reserved: 1); |
3478 | btrfs_put_block_group(cache); |
3479 | goto out; |
3480 | } |
3481 | |
3482 | WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)); |
3483 | |
3484 | btrfs_add_free_space(block_group: cache, bytenr: buf->start, size: buf->len); |
3485 | btrfs_free_reserved_bytes(cache, num_bytes: buf->len, delalloc: 0); |
3486 | btrfs_put_block_group(cache); |
3487 | trace_btrfs_reserved_extent_free(fs_info, start: buf->start, len: buf->len); |
3488 | } |
3489 | out: |
3490 | if (last_ref) { |
3491 | /* |
3492 | * Deleting the buffer, clear the corrupt flag since it doesn't |
3493 | * matter anymore. |
3494 | */ |
3495 | clear_bit(nr: EXTENT_BUFFER_CORRUPT, addr: &buf->bflags); |
3496 | } |
3497 | } |
3498 | |
3499 | /* Can return -ENOMEM */ |
3500 | int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref) |
3501 | { |
3502 | struct btrfs_fs_info *fs_info = trans->fs_info; |
3503 | int ret; |
3504 | |
3505 | if (btrfs_is_testing(fs_info)) |
3506 | return 0; |
3507 | |
3508 | /* |
3509 | * tree log blocks never actually go into the extent allocation |
3510 | * tree, just update pinning info and exit early. |
3511 | */ |
3512 | if ((ref->type == BTRFS_REF_METADATA && |
3513 | ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) || |
3514 | (ref->type == BTRFS_REF_DATA && |
3515 | ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) { |
3516 | btrfs_pin_extent(trans, bytenr: ref->bytenr, num_bytes: ref->len, reserved: 1); |
3517 | ret = 0; |
3518 | } else if (ref->type == BTRFS_REF_METADATA) { |
3519 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: ref, NULL); |
3520 | } else { |
3521 | ret = btrfs_add_delayed_data_ref(trans, generic_ref: ref, reserved: 0); |
3522 | } |
3523 | |
3524 | if (!((ref->type == BTRFS_REF_METADATA && |
3525 | ref->tree_ref.ref_root == BTRFS_TREE_LOG_OBJECTID) || |
3526 | (ref->type == BTRFS_REF_DATA && |
3527 | ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID))) |
3528 | btrfs_ref_tree_mod(fs_info, generic_ref: ref); |
3529 | |
3530 | return ret; |
3531 | } |
3532 | |
3533 | enum btrfs_loop_type { |
3534 | /* |
3535 | * Start caching block groups but do not wait for progress or for them |
3536 | * to be done. |
3537 | */ |
3538 | LOOP_CACHING_NOWAIT, |
3539 | |
3540 | /* |
3541 | * Wait for the block group free_space >= the space we're waiting for if |
3542 | * the block group isn't cached. |
3543 | */ |
3544 | LOOP_CACHING_WAIT, |
3545 | |
3546 | /* |
3547 | * Allow allocations to happen from block groups that do not yet have a |
3548 | * size classification. |
3549 | */ |
3550 | LOOP_UNSET_SIZE_CLASS, |
3551 | |
3552 | /* |
3553 | * Allocate a chunk and then retry the allocation. |
3554 | */ |
3555 | LOOP_ALLOC_CHUNK, |
3556 | |
3557 | /* |
3558 | * Ignore the size class restrictions for this allocation. |
3559 | */ |
3560 | LOOP_WRONG_SIZE_CLASS, |
3561 | |
3562 | /* |
3563 | * Ignore the empty size, only try to allocate the number of bytes |
3564 | * needed for this allocation. |
3565 | */ |
3566 | LOOP_NO_EMPTY_SIZE, |
3567 | }; |
3568 | |
3569 | static inline void |
3570 | btrfs_lock_block_group(struct btrfs_block_group *cache, |
3571 | int delalloc) |
3572 | { |
3573 | if (delalloc) |
3574 | down_read(sem: &cache->data_rwsem); |
3575 | } |
3576 | |
3577 | static inline void btrfs_grab_block_group(struct btrfs_block_group *cache, |
3578 | int delalloc) |
3579 | { |
3580 | btrfs_get_block_group(cache); |
3581 | if (delalloc) |
3582 | down_read(sem: &cache->data_rwsem); |
3583 | } |
3584 | |
3585 | static struct btrfs_block_group *btrfs_lock_cluster( |
3586 | struct btrfs_block_group *block_group, |
3587 | struct btrfs_free_cluster *cluster, |
3588 | int delalloc) |
3589 | __acquires(&cluster->refill_lock) |
3590 | { |
3591 | struct btrfs_block_group *used_bg = NULL; |
3592 | |
3593 | spin_lock(lock: &cluster->refill_lock); |
3594 | while (1) { |
3595 | used_bg = cluster->block_group; |
3596 | if (!used_bg) |
3597 | return NULL; |
3598 | |
3599 | if (used_bg == block_group) |
3600 | return used_bg; |
3601 | |
3602 | btrfs_get_block_group(cache: used_bg); |
3603 | |
3604 | if (!delalloc) |
3605 | return used_bg; |
3606 | |
3607 | if (down_read_trylock(sem: &used_bg->data_rwsem)) |
3608 | return used_bg; |
3609 | |
3610 | spin_unlock(lock: &cluster->refill_lock); |
3611 | |
3612 | /* We should only have one-level nested. */ |
3613 | down_read_nested(sem: &used_bg->data_rwsem, SINGLE_DEPTH_NESTING); |
3614 | |
3615 | spin_lock(lock: &cluster->refill_lock); |
3616 | if (used_bg == cluster->block_group) |
3617 | return used_bg; |
3618 | |
3619 | up_read(sem: &used_bg->data_rwsem); |
3620 | btrfs_put_block_group(cache: used_bg); |
3621 | } |
3622 | } |
3623 | |
3624 | static inline void |
3625 | btrfs_release_block_group(struct btrfs_block_group *cache, |
3626 | int delalloc) |
3627 | { |
3628 | if (delalloc) |
3629 | up_read(sem: &cache->data_rwsem); |
3630 | btrfs_put_block_group(cache); |
3631 | } |
3632 | |
3633 | /* |
3634 | * Helper function for find_free_extent(). |
3635 | * |
3636 | * Return -ENOENT to inform caller that we need fallback to unclustered mode. |
3637 | * Return >0 to inform caller that we find nothing |
3638 | * Return 0 means we have found a location and set ffe_ctl->found_offset. |
3639 | */ |
3640 | static int find_free_extent_clustered(struct btrfs_block_group *bg, |
3641 | struct find_free_extent_ctl *ffe_ctl, |
3642 | struct btrfs_block_group **cluster_bg_ret) |
3643 | { |
3644 | struct btrfs_block_group *cluster_bg; |
3645 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
3646 | u64 aligned_cluster; |
3647 | u64 offset; |
3648 | int ret; |
3649 | |
3650 | cluster_bg = btrfs_lock_cluster(block_group: bg, cluster: last_ptr, delalloc: ffe_ctl->delalloc); |
3651 | if (!cluster_bg) |
3652 | goto refill_cluster; |
3653 | if (cluster_bg != bg && (cluster_bg->ro || |
3654 | !block_group_bits(cache: cluster_bg, bits: ffe_ctl->flags))) |
3655 | goto release_cluster; |
3656 | |
3657 | offset = btrfs_alloc_from_cluster(block_group: cluster_bg, cluster: last_ptr, |
3658 | bytes: ffe_ctl->num_bytes, min_start: cluster_bg->start, |
3659 | max_extent_size: &ffe_ctl->max_extent_size); |
3660 | if (offset) { |
3661 | /* We have a block, we're done */ |
3662 | spin_unlock(lock: &last_ptr->refill_lock); |
3663 | trace_btrfs_reserve_extent_cluster(block_group: cluster_bg, ffe_ctl); |
3664 | *cluster_bg_ret = cluster_bg; |
3665 | ffe_ctl->found_offset = offset; |
3666 | return 0; |
3667 | } |
3668 | WARN_ON(last_ptr->block_group != cluster_bg); |
3669 | |
3670 | release_cluster: |
3671 | /* |
3672 | * If we are on LOOP_NO_EMPTY_SIZE, we can't set up a new clusters, so |
3673 | * lets just skip it and let the allocator find whatever block it can |
3674 | * find. If we reach this point, we will have tried the cluster |
3675 | * allocator plenty of times and not have found anything, so we are |
3676 | * likely way too fragmented for the clustering stuff to find anything. |
3677 | * |
3678 | * However, if the cluster is taken from the current block group, |
3679 | * release the cluster first, so that we stand a better chance of |
3680 | * succeeding in the unclustered allocation. |
3681 | */ |
3682 | if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE && cluster_bg != bg) { |
3683 | spin_unlock(lock: &last_ptr->refill_lock); |
3684 | btrfs_release_block_group(cache: cluster_bg, delalloc: ffe_ctl->delalloc); |
3685 | return -ENOENT; |
3686 | } |
3687 | |
3688 | /* This cluster didn't work out, free it and start over */ |
3689 | btrfs_return_cluster_to_free_space(NULL, cluster: last_ptr); |
3690 | |
3691 | if (cluster_bg != bg) |
3692 | btrfs_release_block_group(cache: cluster_bg, delalloc: ffe_ctl->delalloc); |
3693 | |
3694 | refill_cluster: |
3695 | if (ffe_ctl->loop >= LOOP_NO_EMPTY_SIZE) { |
3696 | spin_unlock(lock: &last_ptr->refill_lock); |
3697 | return -ENOENT; |
3698 | } |
3699 | |
3700 | aligned_cluster = max_t(u64, |
3701 | ffe_ctl->empty_cluster + ffe_ctl->empty_size, |
3702 | bg->full_stripe_len); |
3703 | ret = btrfs_find_space_cluster(block_group: bg, cluster: last_ptr, offset: ffe_ctl->search_start, |
3704 | bytes: ffe_ctl->num_bytes, empty_size: aligned_cluster); |
3705 | if (ret == 0) { |
3706 | /* Now pull our allocation out of this cluster */ |
3707 | offset = btrfs_alloc_from_cluster(block_group: bg, cluster: last_ptr, |
3708 | bytes: ffe_ctl->num_bytes, min_start: ffe_ctl->search_start, |
3709 | max_extent_size: &ffe_ctl->max_extent_size); |
3710 | if (offset) { |
3711 | /* We found one, proceed */ |
3712 | spin_unlock(lock: &last_ptr->refill_lock); |
3713 | ffe_ctl->found_offset = offset; |
3714 | trace_btrfs_reserve_extent_cluster(block_group: bg, ffe_ctl); |
3715 | return 0; |
3716 | } |
3717 | } |
3718 | /* |
3719 | * At this point we either didn't find a cluster or we weren't able to |
3720 | * allocate a block from our cluster. Free the cluster we've been |
3721 | * trying to use, and go to the next block group. |
3722 | */ |
3723 | btrfs_return_cluster_to_free_space(NULL, cluster: last_ptr); |
3724 | spin_unlock(lock: &last_ptr->refill_lock); |
3725 | return 1; |
3726 | } |
3727 | |
3728 | /* |
3729 | * Return >0 to inform caller that we find nothing |
3730 | * Return 0 when we found an free extent and set ffe_ctrl->found_offset |
3731 | */ |
3732 | static int find_free_extent_unclustered(struct btrfs_block_group *bg, |
3733 | struct find_free_extent_ctl *ffe_ctl) |
3734 | { |
3735 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
3736 | u64 offset; |
3737 | |
3738 | /* |
3739 | * We are doing an unclustered allocation, set the fragmented flag so |
3740 | * we don't bother trying to setup a cluster again until we get more |
3741 | * space. |
3742 | */ |
3743 | if (unlikely(last_ptr)) { |
3744 | spin_lock(lock: &last_ptr->lock); |
3745 | last_ptr->fragmented = 1; |
3746 | spin_unlock(lock: &last_ptr->lock); |
3747 | } |
3748 | if (ffe_ctl->cached) { |
3749 | struct btrfs_free_space_ctl *free_space_ctl; |
3750 | |
3751 | free_space_ctl = bg->free_space_ctl; |
3752 | spin_lock(lock: &free_space_ctl->tree_lock); |
3753 | if (free_space_ctl->free_space < |
3754 | ffe_ctl->num_bytes + ffe_ctl->empty_cluster + |
3755 | ffe_ctl->empty_size) { |
3756 | ffe_ctl->total_free_space = max_t(u64, |
3757 | ffe_ctl->total_free_space, |
3758 | free_space_ctl->free_space); |
3759 | spin_unlock(lock: &free_space_ctl->tree_lock); |
3760 | return 1; |
3761 | } |
3762 | spin_unlock(lock: &free_space_ctl->tree_lock); |
3763 | } |
3764 | |
3765 | offset = btrfs_find_space_for_alloc(block_group: bg, offset: ffe_ctl->search_start, |
3766 | bytes: ffe_ctl->num_bytes, empty_size: ffe_ctl->empty_size, |
3767 | max_extent_size: &ffe_ctl->max_extent_size); |
3768 | if (!offset) |
3769 | return 1; |
3770 | ffe_ctl->found_offset = offset; |
3771 | return 0; |
3772 | } |
3773 | |
3774 | static int do_allocation_clustered(struct btrfs_block_group *block_group, |
3775 | struct find_free_extent_ctl *ffe_ctl, |
3776 | struct btrfs_block_group **bg_ret) |
3777 | { |
3778 | int ret; |
3779 | |
3780 | /* We want to try and use the cluster allocator, so lets look there */ |
3781 | if (ffe_ctl->last_ptr && ffe_ctl->use_cluster) { |
3782 | ret = find_free_extent_clustered(bg: block_group, ffe_ctl, cluster_bg_ret: bg_ret); |
3783 | if (ret >= 0) |
3784 | return ret; |
3785 | /* ret == -ENOENT case falls through */ |
3786 | } |
3787 | |
3788 | return find_free_extent_unclustered(bg: block_group, ffe_ctl); |
3789 | } |
3790 | |
3791 | /* |
3792 | * Tree-log block group locking |
3793 | * ============================ |
3794 | * |
3795 | * fs_info::treelog_bg_lock protects the fs_info::treelog_bg which |
3796 | * indicates the starting address of a block group, which is reserved only |
3797 | * for tree-log metadata. |
3798 | * |
3799 | * Lock nesting |
3800 | * ============ |
3801 | * |
3802 | * space_info::lock |
3803 | * block_group::lock |
3804 | * fs_info::treelog_bg_lock |
3805 | */ |
3806 | |
3807 | /* |
3808 | * Simple allocator for sequential-only block group. It only allows sequential |
3809 | * allocation. No need to play with trees. This function also reserves the |
3810 | * bytes as in btrfs_add_reserved_bytes. |
3811 | */ |
3812 | static int do_allocation_zoned(struct btrfs_block_group *block_group, |
3813 | struct find_free_extent_ctl *ffe_ctl, |
3814 | struct btrfs_block_group **bg_ret) |
3815 | { |
3816 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
3817 | struct btrfs_space_info *space_info = block_group->space_info; |
3818 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
3819 | u64 start = block_group->start; |
3820 | u64 num_bytes = ffe_ctl->num_bytes; |
3821 | u64 avail; |
3822 | u64 bytenr = block_group->start; |
3823 | u64 log_bytenr; |
3824 | u64 data_reloc_bytenr; |
3825 | int ret = 0; |
3826 | bool skip = false; |
3827 | |
3828 | ASSERT(btrfs_is_zoned(block_group->fs_info)); |
3829 | |
3830 | /* |
3831 | * Do not allow non-tree-log blocks in the dedicated tree-log block |
3832 | * group, and vice versa. |
3833 | */ |
3834 | spin_lock(lock: &fs_info->treelog_bg_lock); |
3835 | log_bytenr = fs_info->treelog_bg; |
3836 | if (log_bytenr && ((ffe_ctl->for_treelog && bytenr != log_bytenr) || |
3837 | (!ffe_ctl->for_treelog && bytenr == log_bytenr))) |
3838 | skip = true; |
3839 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
3840 | if (skip) |
3841 | return 1; |
3842 | |
3843 | /* |
3844 | * Do not allow non-relocation blocks in the dedicated relocation block |
3845 | * group, and vice versa. |
3846 | */ |
3847 | spin_lock(lock: &fs_info->relocation_bg_lock); |
3848 | data_reloc_bytenr = fs_info->data_reloc_bg; |
3849 | if (data_reloc_bytenr && |
3850 | ((ffe_ctl->for_data_reloc && bytenr != data_reloc_bytenr) || |
3851 | (!ffe_ctl->for_data_reloc && bytenr == data_reloc_bytenr))) |
3852 | skip = true; |
3853 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
3854 | if (skip) |
3855 | return 1; |
3856 | |
3857 | /* Check RO and no space case before trying to activate it */ |
3858 | spin_lock(lock: &block_group->lock); |
3859 | if (block_group->ro || btrfs_zoned_bg_is_full(bg: block_group)) { |
3860 | ret = 1; |
3861 | /* |
3862 | * May need to clear fs_info->{treelog,data_reloc}_bg. |
3863 | * Return the error after taking the locks. |
3864 | */ |
3865 | } |
3866 | spin_unlock(lock: &block_group->lock); |
3867 | |
3868 | /* Metadata block group is activated at write time. */ |
3869 | if (!ret && (block_group->flags & BTRFS_BLOCK_GROUP_DATA) && |
3870 | !btrfs_zone_activate(block_group)) { |
3871 | ret = 1; |
3872 | /* |
3873 | * May need to clear fs_info->{treelog,data_reloc}_bg. |
3874 | * Return the error after taking the locks. |
3875 | */ |
3876 | } |
3877 | |
3878 | spin_lock(lock: &space_info->lock); |
3879 | spin_lock(lock: &block_group->lock); |
3880 | spin_lock(lock: &fs_info->treelog_bg_lock); |
3881 | spin_lock(lock: &fs_info->relocation_bg_lock); |
3882 | |
3883 | if (ret) |
3884 | goto out; |
3885 | |
3886 | ASSERT(!ffe_ctl->for_treelog || |
3887 | block_group->start == fs_info->treelog_bg || |
3888 | fs_info->treelog_bg == 0); |
3889 | ASSERT(!ffe_ctl->for_data_reloc || |
3890 | block_group->start == fs_info->data_reloc_bg || |
3891 | fs_info->data_reloc_bg == 0); |
3892 | |
3893 | if (block_group->ro || |
3894 | (!ffe_ctl->for_data_reloc && |
3895 | test_bit(BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, &block_group->runtime_flags))) { |
3896 | ret = 1; |
3897 | goto out; |
3898 | } |
3899 | |
3900 | /* |
3901 | * Do not allow currently using block group to be tree-log dedicated |
3902 | * block group. |
3903 | */ |
3904 | if (ffe_ctl->for_treelog && !fs_info->treelog_bg && |
3905 | (block_group->used || block_group->reserved)) { |
3906 | ret = 1; |
3907 | goto out; |
3908 | } |
3909 | |
3910 | /* |
3911 | * Do not allow currently used block group to be the data relocation |
3912 | * dedicated block group. |
3913 | */ |
3914 | if (ffe_ctl->for_data_reloc && !fs_info->data_reloc_bg && |
3915 | (block_group->used || block_group->reserved)) { |
3916 | ret = 1; |
3917 | goto out; |
3918 | } |
3919 | |
3920 | WARN_ON_ONCE(block_group->alloc_offset > block_group->zone_capacity); |
3921 | avail = block_group->zone_capacity - block_group->alloc_offset; |
3922 | if (avail < num_bytes) { |
3923 | if (ffe_ctl->max_extent_size < avail) { |
3924 | /* |
3925 | * With sequential allocator, free space is always |
3926 | * contiguous |
3927 | */ |
3928 | ffe_ctl->max_extent_size = avail; |
3929 | ffe_ctl->total_free_space = avail; |
3930 | } |
3931 | ret = 1; |
3932 | goto out; |
3933 | } |
3934 | |
3935 | if (ffe_ctl->for_treelog && !fs_info->treelog_bg) |
3936 | fs_info->treelog_bg = block_group->start; |
3937 | |
3938 | if (ffe_ctl->for_data_reloc) { |
3939 | if (!fs_info->data_reloc_bg) |
3940 | fs_info->data_reloc_bg = block_group->start; |
3941 | /* |
3942 | * Do not allow allocations from this block group, unless it is |
3943 | * for data relocation. Compared to increasing the ->ro, setting |
3944 | * the ->zoned_data_reloc_ongoing flag still allows nocow |
3945 | * writers to come in. See btrfs_inc_nocow_writers(). |
3946 | * |
3947 | * We need to disable an allocation to avoid an allocation of |
3948 | * regular (non-relocation data) extent. With mix of relocation |
3949 | * extents and regular extents, we can dispatch WRITE commands |
3950 | * (for relocation extents) and ZONE APPEND commands (for |
3951 | * regular extents) at the same time to the same zone, which |
3952 | * easily break the write pointer. |
3953 | * |
3954 | * Also, this flag avoids this block group to be zone finished. |
3955 | */ |
3956 | set_bit(nr: BLOCK_GROUP_FLAG_ZONED_DATA_RELOC, addr: &block_group->runtime_flags); |
3957 | } |
3958 | |
3959 | ffe_ctl->found_offset = start + block_group->alloc_offset; |
3960 | block_group->alloc_offset += num_bytes; |
3961 | spin_lock(lock: &ctl->tree_lock); |
3962 | ctl->free_space -= num_bytes; |
3963 | spin_unlock(lock: &ctl->tree_lock); |
3964 | |
3965 | /* |
3966 | * We do not check if found_offset is aligned to stripesize. The |
3967 | * address is anyway rewritten when using zone append writing. |
3968 | */ |
3969 | |
3970 | ffe_ctl->search_start = ffe_ctl->found_offset; |
3971 | |
3972 | out: |
3973 | if (ret && ffe_ctl->for_treelog) |
3974 | fs_info->treelog_bg = 0; |
3975 | if (ret && ffe_ctl->for_data_reloc) |
3976 | fs_info->data_reloc_bg = 0; |
3977 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
3978 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
3979 | spin_unlock(lock: &block_group->lock); |
3980 | spin_unlock(lock: &space_info->lock); |
3981 | return ret; |
3982 | } |
3983 | |
3984 | static int do_allocation(struct btrfs_block_group *block_group, |
3985 | struct find_free_extent_ctl *ffe_ctl, |
3986 | struct btrfs_block_group **bg_ret) |
3987 | { |
3988 | switch (ffe_ctl->policy) { |
3989 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
3990 | return do_allocation_clustered(block_group, ffe_ctl, bg_ret); |
3991 | case BTRFS_EXTENT_ALLOC_ZONED: |
3992 | return do_allocation_zoned(block_group, ffe_ctl, bg_ret); |
3993 | default: |
3994 | BUG(); |
3995 | } |
3996 | } |
3997 | |
3998 | static void release_block_group(struct btrfs_block_group *block_group, |
3999 | struct find_free_extent_ctl *ffe_ctl, |
4000 | int delalloc) |
4001 | { |
4002 | switch (ffe_ctl->policy) { |
4003 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4004 | ffe_ctl->retry_uncached = false; |
4005 | break; |
4006 | case BTRFS_EXTENT_ALLOC_ZONED: |
4007 | /* Nothing to do */ |
4008 | break; |
4009 | default: |
4010 | BUG(); |
4011 | } |
4012 | |
4013 | BUG_ON(btrfs_bg_flags_to_raid_index(block_group->flags) != |
4014 | ffe_ctl->index); |
4015 | btrfs_release_block_group(cache: block_group, delalloc); |
4016 | } |
4017 | |
4018 | static void found_extent_clustered(struct find_free_extent_ctl *ffe_ctl, |
4019 | struct btrfs_key *ins) |
4020 | { |
4021 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
4022 | |
4023 | if (!ffe_ctl->use_cluster && last_ptr) { |
4024 | spin_lock(lock: &last_ptr->lock); |
4025 | last_ptr->window_start = ins->objectid; |
4026 | spin_unlock(lock: &last_ptr->lock); |
4027 | } |
4028 | } |
4029 | |
4030 | static void found_extent(struct find_free_extent_ctl *ffe_ctl, |
4031 | struct btrfs_key *ins) |
4032 | { |
4033 | switch (ffe_ctl->policy) { |
4034 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4035 | found_extent_clustered(ffe_ctl, ins); |
4036 | break; |
4037 | case BTRFS_EXTENT_ALLOC_ZONED: |
4038 | /* Nothing to do */ |
4039 | break; |
4040 | default: |
4041 | BUG(); |
4042 | } |
4043 | } |
4044 | |
4045 | static int can_allocate_chunk_zoned(struct btrfs_fs_info *fs_info, |
4046 | struct find_free_extent_ctl *ffe_ctl) |
4047 | { |
4048 | /* Block group's activeness is not a requirement for METADATA block groups. */ |
4049 | if (!(ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA)) |
4050 | return 0; |
4051 | |
4052 | /* If we can activate new zone, just allocate a chunk and use it */ |
4053 | if (btrfs_can_activate_zone(fs_devices: fs_info->fs_devices, flags: ffe_ctl->flags)) |
4054 | return 0; |
4055 | |
4056 | /* |
4057 | * We already reached the max active zones. Try to finish one block |
4058 | * group to make a room for a new block group. This is only possible |
4059 | * for a data block group because btrfs_zone_finish() may need to wait |
4060 | * for a running transaction which can cause a deadlock for metadata |
4061 | * allocation. |
4062 | */ |
4063 | if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) { |
4064 | int ret = btrfs_zone_finish_one_bg(fs_info); |
4065 | |
4066 | if (ret == 1) |
4067 | return 0; |
4068 | else if (ret < 0) |
4069 | return ret; |
4070 | } |
4071 | |
4072 | /* |
4073 | * If we have enough free space left in an already active block group |
4074 | * and we can't activate any other zone now, do not allow allocating a |
4075 | * new chunk and let find_free_extent() retry with a smaller size. |
4076 | */ |
4077 | if (ffe_ctl->max_extent_size >= ffe_ctl->min_alloc_size) |
4078 | return -ENOSPC; |
4079 | |
4080 | /* |
4081 | * Even min_alloc_size is not left in any block groups. Since we cannot |
4082 | * activate a new block group, allocating it may not help. Let's tell a |
4083 | * caller to try again and hope it progress something by writing some |
4084 | * parts of the region. That is only possible for data block groups, |
4085 | * where a part of the region can be written. |
4086 | */ |
4087 | if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) |
4088 | return -EAGAIN; |
4089 | |
4090 | /* |
4091 | * We cannot activate a new block group and no enough space left in any |
4092 | * block groups. So, allocating a new block group may not help. But, |
4093 | * there is nothing to do anyway, so let's go with it. |
4094 | */ |
4095 | return 0; |
4096 | } |
4097 | |
4098 | static int can_allocate_chunk(struct btrfs_fs_info *fs_info, |
4099 | struct find_free_extent_ctl *ffe_ctl) |
4100 | { |
4101 | switch (ffe_ctl->policy) { |
4102 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4103 | return 0; |
4104 | case BTRFS_EXTENT_ALLOC_ZONED: |
4105 | return can_allocate_chunk_zoned(fs_info, ffe_ctl); |
4106 | default: |
4107 | BUG(); |
4108 | } |
4109 | } |
4110 | |
4111 | /* |
4112 | * Return >0 means caller needs to re-search for free extent |
4113 | * Return 0 means we have the needed free extent. |
4114 | * Return <0 means we failed to locate any free extent. |
4115 | */ |
4116 | static int find_free_extent_update_loop(struct btrfs_fs_info *fs_info, |
4117 | struct btrfs_key *ins, |
4118 | struct find_free_extent_ctl *ffe_ctl, |
4119 | bool full_search) |
4120 | { |
4121 | struct btrfs_root *root = fs_info->chunk_root; |
4122 | int ret; |
4123 | |
4124 | if ((ffe_ctl->loop == LOOP_CACHING_NOWAIT) && |
4125 | ffe_ctl->have_caching_bg && !ffe_ctl->orig_have_caching_bg) |
4126 | ffe_ctl->orig_have_caching_bg = true; |
4127 | |
4128 | if (ins->objectid) { |
4129 | found_extent(ffe_ctl, ins); |
4130 | return 0; |
4131 | } |
4132 | |
4133 | if (ffe_ctl->loop >= LOOP_CACHING_WAIT && ffe_ctl->have_caching_bg) |
4134 | return 1; |
4135 | |
4136 | ffe_ctl->index++; |
4137 | if (ffe_ctl->index < BTRFS_NR_RAID_TYPES) |
4138 | return 1; |
4139 | |
4140 | /* See the comments for btrfs_loop_type for an explanation of the phases. */ |
4141 | if (ffe_ctl->loop < LOOP_NO_EMPTY_SIZE) { |
4142 | ffe_ctl->index = 0; |
4143 | /* |
4144 | * We want to skip the LOOP_CACHING_WAIT step if we don't have |
4145 | * any uncached bgs and we've already done a full search |
4146 | * through. |
4147 | */ |
4148 | if (ffe_ctl->loop == LOOP_CACHING_NOWAIT && |
4149 | (!ffe_ctl->orig_have_caching_bg && full_search)) |
4150 | ffe_ctl->loop++; |
4151 | ffe_ctl->loop++; |
4152 | |
4153 | if (ffe_ctl->loop == LOOP_ALLOC_CHUNK) { |
4154 | struct btrfs_trans_handle *trans; |
4155 | int exist = 0; |
4156 | |
4157 | /* Check if allocation policy allows to create a new chunk */ |
4158 | ret = can_allocate_chunk(fs_info, ffe_ctl); |
4159 | if (ret) |
4160 | return ret; |
4161 | |
4162 | trans = current->journal_info; |
4163 | if (trans) |
4164 | exist = 1; |
4165 | else |
4166 | trans = btrfs_join_transaction(root); |
4167 | |
4168 | if (IS_ERR(ptr: trans)) { |
4169 | ret = PTR_ERR(ptr: trans); |
4170 | return ret; |
4171 | } |
4172 | |
4173 | ret = btrfs_chunk_alloc(trans, flags: ffe_ctl->flags, |
4174 | force: CHUNK_ALLOC_FORCE_FOR_EXTENT); |
4175 | |
4176 | /* Do not bail out on ENOSPC since we can do more. */ |
4177 | if (ret == -ENOSPC) { |
4178 | ret = 0; |
4179 | ffe_ctl->loop++; |
4180 | } |
4181 | else if (ret < 0) |
4182 | btrfs_abort_transaction(trans, ret); |
4183 | else |
4184 | ret = 0; |
4185 | if (!exist) |
4186 | btrfs_end_transaction(trans); |
4187 | if (ret) |
4188 | return ret; |
4189 | } |
4190 | |
4191 | if (ffe_ctl->loop == LOOP_NO_EMPTY_SIZE) { |
4192 | if (ffe_ctl->policy != BTRFS_EXTENT_ALLOC_CLUSTERED) |
4193 | return -ENOSPC; |
4194 | |
4195 | /* |
4196 | * Don't loop again if we already have no empty_size and |
4197 | * no empty_cluster. |
4198 | */ |
4199 | if (ffe_ctl->empty_size == 0 && |
4200 | ffe_ctl->empty_cluster == 0) |
4201 | return -ENOSPC; |
4202 | ffe_ctl->empty_size = 0; |
4203 | ffe_ctl->empty_cluster = 0; |
4204 | } |
4205 | return 1; |
4206 | } |
4207 | return -ENOSPC; |
4208 | } |
4209 | |
4210 | static bool find_free_extent_check_size_class(struct find_free_extent_ctl *ffe_ctl, |
4211 | struct btrfs_block_group *bg) |
4212 | { |
4213 | if (ffe_ctl->policy == BTRFS_EXTENT_ALLOC_ZONED) |
4214 | return true; |
4215 | if (!btrfs_block_group_should_use_size_class(bg)) |
4216 | return true; |
4217 | if (ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS) |
4218 | return true; |
4219 | if (ffe_ctl->loop >= LOOP_UNSET_SIZE_CLASS && |
4220 | bg->size_class == BTRFS_BG_SZ_NONE) |
4221 | return true; |
4222 | return ffe_ctl->size_class == bg->size_class; |
4223 | } |
4224 | |
4225 | static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info, |
4226 | struct find_free_extent_ctl *ffe_ctl, |
4227 | struct btrfs_space_info *space_info, |
4228 | struct btrfs_key *ins) |
4229 | { |
4230 | /* |
4231 | * If our free space is heavily fragmented we may not be able to make |
4232 | * big contiguous allocations, so instead of doing the expensive search |
4233 | * for free space, simply return ENOSPC with our max_extent_size so we |
4234 | * can go ahead and search for a more manageable chunk. |
4235 | * |
4236 | * If our max_extent_size is large enough for our allocation simply |
4237 | * disable clustering since we will likely not be able to find enough |
4238 | * space to create a cluster and induce latency trying. |
4239 | */ |
4240 | if (space_info->max_extent_size) { |
4241 | spin_lock(lock: &space_info->lock); |
4242 | if (space_info->max_extent_size && |
4243 | ffe_ctl->num_bytes > space_info->max_extent_size) { |
4244 | ins->offset = space_info->max_extent_size; |
4245 | spin_unlock(lock: &space_info->lock); |
4246 | return -ENOSPC; |
4247 | } else if (space_info->max_extent_size) { |
4248 | ffe_ctl->use_cluster = false; |
4249 | } |
4250 | spin_unlock(lock: &space_info->lock); |
4251 | } |
4252 | |
4253 | ffe_ctl->last_ptr = fetch_cluster_info(fs_info, space_info, |
4254 | empty_cluster: &ffe_ctl->empty_cluster); |
4255 | if (ffe_ctl->last_ptr) { |
4256 | struct btrfs_free_cluster *last_ptr = ffe_ctl->last_ptr; |
4257 | |
4258 | spin_lock(lock: &last_ptr->lock); |
4259 | if (last_ptr->block_group) |
4260 | ffe_ctl->hint_byte = last_ptr->window_start; |
4261 | if (last_ptr->fragmented) { |
4262 | /* |
4263 | * We still set window_start so we can keep track of the |
4264 | * last place we found an allocation to try and save |
4265 | * some time. |
4266 | */ |
4267 | ffe_ctl->hint_byte = last_ptr->window_start; |
4268 | ffe_ctl->use_cluster = false; |
4269 | } |
4270 | spin_unlock(lock: &last_ptr->lock); |
4271 | } |
4272 | |
4273 | return 0; |
4274 | } |
4275 | |
4276 | static int prepare_allocation(struct btrfs_fs_info *fs_info, |
4277 | struct find_free_extent_ctl *ffe_ctl, |
4278 | struct btrfs_space_info *space_info, |
4279 | struct btrfs_key *ins) |
4280 | { |
4281 | switch (ffe_ctl->policy) { |
4282 | case BTRFS_EXTENT_ALLOC_CLUSTERED: |
4283 | return prepare_allocation_clustered(fs_info, ffe_ctl, |
4284 | space_info, ins); |
4285 | case BTRFS_EXTENT_ALLOC_ZONED: |
4286 | if (ffe_ctl->for_treelog) { |
4287 | spin_lock(lock: &fs_info->treelog_bg_lock); |
4288 | if (fs_info->treelog_bg) |
4289 | ffe_ctl->hint_byte = fs_info->treelog_bg; |
4290 | spin_unlock(lock: &fs_info->treelog_bg_lock); |
4291 | } |
4292 | if (ffe_ctl->for_data_reloc) { |
4293 | spin_lock(lock: &fs_info->relocation_bg_lock); |
4294 | if (fs_info->data_reloc_bg) |
4295 | ffe_ctl->hint_byte = fs_info->data_reloc_bg; |
4296 | spin_unlock(lock: &fs_info->relocation_bg_lock); |
4297 | } |
4298 | return 0; |
4299 | default: |
4300 | BUG(); |
4301 | } |
4302 | } |
4303 | |
4304 | /* |
4305 | * walks the btree of allocated extents and find a hole of a given size. |
4306 | * The key ins is changed to record the hole: |
4307 | * ins->objectid == start position |
4308 | * ins->flags = BTRFS_EXTENT_ITEM_KEY |
4309 | * ins->offset == the size of the hole. |
4310 | * Any available blocks before search_start are skipped. |
4311 | * |
4312 | * If there is no suitable free space, we will record the max size of |
4313 | * the free space extent currently. |
4314 | * |
4315 | * The overall logic and call chain: |
4316 | * |
4317 | * find_free_extent() |
4318 | * |- Iterate through all block groups |
4319 | * | |- Get a valid block group |
4320 | * | |- Try to do clustered allocation in that block group |
4321 | * | |- Try to do unclustered allocation in that block group |
4322 | * | |- Check if the result is valid |
4323 | * | | |- If valid, then exit |
4324 | * | |- Jump to next block group |
4325 | * | |
4326 | * |- Push harder to find free extents |
4327 | * |- If not found, re-iterate all block groups |
4328 | */ |
4329 | static noinline int find_free_extent(struct btrfs_root *root, |
4330 | struct btrfs_key *ins, |
4331 | struct find_free_extent_ctl *ffe_ctl) |
4332 | { |
4333 | struct btrfs_fs_info *fs_info = root->fs_info; |
4334 | int ret = 0; |
4335 | int cache_block_group_error = 0; |
4336 | struct btrfs_block_group *block_group = NULL; |
4337 | struct btrfs_space_info *space_info; |
4338 | bool full_search = false; |
4339 | |
4340 | WARN_ON(ffe_ctl->num_bytes < fs_info->sectorsize); |
4341 | |
4342 | ffe_ctl->search_start = 0; |
4343 | /* For clustered allocation */ |
4344 | ffe_ctl->empty_cluster = 0; |
4345 | ffe_ctl->last_ptr = NULL; |
4346 | ffe_ctl->use_cluster = true; |
4347 | ffe_ctl->have_caching_bg = false; |
4348 | ffe_ctl->orig_have_caching_bg = false; |
4349 | ffe_ctl->index = btrfs_bg_flags_to_raid_index(flags: ffe_ctl->flags); |
4350 | ffe_ctl->loop = 0; |
4351 | ffe_ctl->retry_uncached = false; |
4352 | ffe_ctl->cached = 0; |
4353 | ffe_ctl->max_extent_size = 0; |
4354 | ffe_ctl->total_free_space = 0; |
4355 | ffe_ctl->found_offset = 0; |
4356 | ffe_ctl->policy = BTRFS_EXTENT_ALLOC_CLUSTERED; |
4357 | ffe_ctl->size_class = btrfs_calc_block_group_size_class(size: ffe_ctl->num_bytes); |
4358 | |
4359 | if (btrfs_is_zoned(fs_info)) |
4360 | ffe_ctl->policy = BTRFS_EXTENT_ALLOC_ZONED; |
4361 | |
4362 | ins->type = BTRFS_EXTENT_ITEM_KEY; |
4363 | ins->objectid = 0; |
4364 | ins->offset = 0; |
4365 | |
4366 | trace_find_free_extent(root, ffe_ctl); |
4367 | |
4368 | space_info = btrfs_find_space_info(info: fs_info, flags: ffe_ctl->flags); |
4369 | if (!space_info) { |
4370 | btrfs_err(fs_info, "No space info for %llu" , ffe_ctl->flags); |
4371 | return -ENOSPC; |
4372 | } |
4373 | |
4374 | ret = prepare_allocation(fs_info, ffe_ctl, space_info, ins); |
4375 | if (ret < 0) |
4376 | return ret; |
4377 | |
4378 | ffe_ctl->search_start = max(ffe_ctl->search_start, |
4379 | first_logical_byte(fs_info)); |
4380 | ffe_ctl->search_start = max(ffe_ctl->search_start, ffe_ctl->hint_byte); |
4381 | if (ffe_ctl->search_start == ffe_ctl->hint_byte) { |
4382 | block_group = btrfs_lookup_block_group(info: fs_info, |
4383 | bytenr: ffe_ctl->search_start); |
4384 | /* |
4385 | * we don't want to use the block group if it doesn't match our |
4386 | * allocation bits, or if its not cached. |
4387 | * |
4388 | * However if we are re-searching with an ideal block group |
4389 | * picked out then we don't care that the block group is cached. |
4390 | */ |
4391 | if (block_group && block_group_bits(cache: block_group, bits: ffe_ctl->flags) && |
4392 | block_group->cached != BTRFS_CACHE_NO) { |
4393 | down_read(sem: &space_info->groups_sem); |
4394 | if (list_empty(head: &block_group->list) || |
4395 | block_group->ro) { |
4396 | /* |
4397 | * someone is removing this block group, |
4398 | * we can't jump into the have_block_group |
4399 | * target because our list pointers are not |
4400 | * valid |
4401 | */ |
4402 | btrfs_put_block_group(cache: block_group); |
4403 | up_read(sem: &space_info->groups_sem); |
4404 | } else { |
4405 | ffe_ctl->index = btrfs_bg_flags_to_raid_index( |
4406 | flags: block_group->flags); |
4407 | btrfs_lock_block_group(cache: block_group, |
4408 | delalloc: ffe_ctl->delalloc); |
4409 | ffe_ctl->hinted = true; |
4410 | goto have_block_group; |
4411 | } |
4412 | } else if (block_group) { |
4413 | btrfs_put_block_group(cache: block_group); |
4414 | } |
4415 | } |
4416 | search: |
4417 | trace_find_free_extent_search_loop(root, ffe_ctl); |
4418 | ffe_ctl->have_caching_bg = false; |
4419 | if (ffe_ctl->index == btrfs_bg_flags_to_raid_index(flags: ffe_ctl->flags) || |
4420 | ffe_ctl->index == 0) |
4421 | full_search = true; |
4422 | down_read(sem: &space_info->groups_sem); |
4423 | list_for_each_entry(block_group, |
4424 | &space_info->block_groups[ffe_ctl->index], list) { |
4425 | struct btrfs_block_group *bg_ret; |
4426 | |
4427 | ffe_ctl->hinted = false; |
4428 | /* If the block group is read-only, we can skip it entirely. */ |
4429 | if (unlikely(block_group->ro)) { |
4430 | if (ffe_ctl->for_treelog) |
4431 | btrfs_clear_treelog_bg(bg: block_group); |
4432 | if (ffe_ctl->for_data_reloc) |
4433 | btrfs_clear_data_reloc_bg(bg: block_group); |
4434 | continue; |
4435 | } |
4436 | |
4437 | btrfs_grab_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4438 | ffe_ctl->search_start = block_group->start; |
4439 | |
4440 | /* |
4441 | * this can happen if we end up cycling through all the |
4442 | * raid types, but we want to make sure we only allocate |
4443 | * for the proper type. |
4444 | */ |
4445 | if (!block_group_bits(cache: block_group, bits: ffe_ctl->flags)) { |
4446 | u64 = BTRFS_BLOCK_GROUP_DUP | |
4447 | BTRFS_BLOCK_GROUP_RAID1_MASK | |
4448 | BTRFS_BLOCK_GROUP_RAID56_MASK | |
4449 | BTRFS_BLOCK_GROUP_RAID10; |
4450 | |
4451 | /* |
4452 | * if they asked for extra copies and this block group |
4453 | * doesn't provide them, bail. This does allow us to |
4454 | * fill raid0 from raid1. |
4455 | */ |
4456 | if ((ffe_ctl->flags & extra) && !(block_group->flags & extra)) |
4457 | goto loop; |
4458 | |
4459 | /* |
4460 | * This block group has different flags than we want. |
4461 | * It's possible that we have MIXED_GROUP flag but no |
4462 | * block group is mixed. Just skip such block group. |
4463 | */ |
4464 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4465 | continue; |
4466 | } |
4467 | |
4468 | have_block_group: |
4469 | trace_find_free_extent_have_block_group(root, ffe_ctl, block_group); |
4470 | ffe_ctl->cached = btrfs_block_group_done(cache: block_group); |
4471 | if (unlikely(!ffe_ctl->cached)) { |
4472 | ffe_ctl->have_caching_bg = true; |
4473 | ret = btrfs_cache_block_group(cache: block_group, wait: false); |
4474 | |
4475 | /* |
4476 | * If we get ENOMEM here or something else we want to |
4477 | * try other block groups, because it may not be fatal. |
4478 | * However if we can't find anything else we need to |
4479 | * save our return here so that we return the actual |
4480 | * error that caused problems, not ENOSPC. |
4481 | */ |
4482 | if (ret < 0) { |
4483 | if (!cache_block_group_error) |
4484 | cache_block_group_error = ret; |
4485 | ret = 0; |
4486 | goto loop; |
4487 | } |
4488 | ret = 0; |
4489 | } |
4490 | |
4491 | if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { |
4492 | if (!cache_block_group_error) |
4493 | cache_block_group_error = -EIO; |
4494 | goto loop; |
4495 | } |
4496 | |
4497 | if (!find_free_extent_check_size_class(ffe_ctl, bg: block_group)) |
4498 | goto loop; |
4499 | |
4500 | bg_ret = NULL; |
4501 | ret = do_allocation(block_group, ffe_ctl, bg_ret: &bg_ret); |
4502 | if (ret > 0) |
4503 | goto loop; |
4504 | |
4505 | if (bg_ret && bg_ret != block_group) { |
4506 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4507 | block_group = bg_ret; |
4508 | } |
4509 | |
4510 | /* Checks */ |
4511 | ffe_ctl->search_start = round_up(ffe_ctl->found_offset, |
4512 | fs_info->stripesize); |
4513 | |
4514 | /* move on to the next group */ |
4515 | if (ffe_ctl->search_start + ffe_ctl->num_bytes > |
4516 | block_group->start + block_group->length) { |
4517 | btrfs_add_free_space_unused(block_group, |
4518 | bytenr: ffe_ctl->found_offset, |
4519 | size: ffe_ctl->num_bytes); |
4520 | goto loop; |
4521 | } |
4522 | |
4523 | if (ffe_ctl->found_offset < ffe_ctl->search_start) |
4524 | btrfs_add_free_space_unused(block_group, |
4525 | bytenr: ffe_ctl->found_offset, |
4526 | size: ffe_ctl->search_start - ffe_ctl->found_offset); |
4527 | |
4528 | ret = btrfs_add_reserved_bytes(cache: block_group, ram_bytes: ffe_ctl->ram_bytes, |
4529 | num_bytes: ffe_ctl->num_bytes, |
4530 | delalloc: ffe_ctl->delalloc, |
4531 | force_wrong_size_class: ffe_ctl->loop >= LOOP_WRONG_SIZE_CLASS); |
4532 | if (ret == -EAGAIN) { |
4533 | btrfs_add_free_space_unused(block_group, |
4534 | bytenr: ffe_ctl->found_offset, |
4535 | size: ffe_ctl->num_bytes); |
4536 | goto loop; |
4537 | } |
4538 | btrfs_inc_block_group_reservations(bg: block_group); |
4539 | |
4540 | /* we are all good, lets return */ |
4541 | ins->objectid = ffe_ctl->search_start; |
4542 | ins->offset = ffe_ctl->num_bytes; |
4543 | |
4544 | trace_btrfs_reserve_extent(block_group, ffe_ctl); |
4545 | btrfs_release_block_group(cache: block_group, delalloc: ffe_ctl->delalloc); |
4546 | break; |
4547 | loop: |
4548 | if (!ffe_ctl->cached && ffe_ctl->loop > LOOP_CACHING_NOWAIT && |
4549 | !ffe_ctl->retry_uncached) { |
4550 | ffe_ctl->retry_uncached = true; |
4551 | btrfs_wait_block_group_cache_progress(cache: block_group, |
4552 | num_bytes: ffe_ctl->num_bytes + |
4553 | ffe_ctl->empty_cluster + |
4554 | ffe_ctl->empty_size); |
4555 | goto have_block_group; |
4556 | } |
4557 | release_block_group(block_group, ffe_ctl, delalloc: ffe_ctl->delalloc); |
4558 | cond_resched(); |
4559 | } |
4560 | up_read(sem: &space_info->groups_sem); |
4561 | |
4562 | ret = find_free_extent_update_loop(fs_info, ins, ffe_ctl, full_search); |
4563 | if (ret > 0) |
4564 | goto search; |
4565 | |
4566 | if (ret == -ENOSPC && !cache_block_group_error) { |
4567 | /* |
4568 | * Use ffe_ctl->total_free_space as fallback if we can't find |
4569 | * any contiguous hole. |
4570 | */ |
4571 | if (!ffe_ctl->max_extent_size) |
4572 | ffe_ctl->max_extent_size = ffe_ctl->total_free_space; |
4573 | spin_lock(lock: &space_info->lock); |
4574 | space_info->max_extent_size = ffe_ctl->max_extent_size; |
4575 | spin_unlock(lock: &space_info->lock); |
4576 | ins->offset = ffe_ctl->max_extent_size; |
4577 | } else if (ret == -ENOSPC) { |
4578 | ret = cache_block_group_error; |
4579 | } |
4580 | return ret; |
4581 | } |
4582 | |
4583 | /* |
4584 | * Entry point to the extent allocator. Tries to find a hole that is at least |
4585 | * as big as @num_bytes. |
4586 | * |
4587 | * @root - The root that will contain this extent |
4588 | * |
4589 | * @ram_bytes - The amount of space in ram that @num_bytes take. This |
4590 | * is used for accounting purposes. This value differs |
4591 | * from @num_bytes only in the case of compressed extents. |
4592 | * |
4593 | * @num_bytes - Number of bytes to allocate on-disk. |
4594 | * |
4595 | * @min_alloc_size - Indicates the minimum amount of space that the |
4596 | * allocator should try to satisfy. In some cases |
4597 | * @num_bytes may be larger than what is required and if |
4598 | * the filesystem is fragmented then allocation fails. |
4599 | * However, the presence of @min_alloc_size gives a |
4600 | * chance to try and satisfy the smaller allocation. |
4601 | * |
4602 | * @empty_size - A hint that you plan on doing more COW. This is the |
4603 | * size in bytes the allocator should try to find free |
4604 | * next to the block it returns. This is just a hint and |
4605 | * may be ignored by the allocator. |
4606 | * |
4607 | * @hint_byte - Hint to the allocator to start searching above the byte |
4608 | * address passed. It might be ignored. |
4609 | * |
4610 | * @ins - This key is modified to record the found hole. It will |
4611 | * have the following values: |
4612 | * ins->objectid == start position |
4613 | * ins->flags = BTRFS_EXTENT_ITEM_KEY |
4614 | * ins->offset == the size of the hole. |
4615 | * |
4616 | * @is_data - Boolean flag indicating whether an extent is |
4617 | * allocated for data (true) or metadata (false) |
4618 | * |
4619 | * @delalloc - Boolean flag indicating whether this allocation is for |
4620 | * delalloc or not. If 'true' data_rwsem of block groups |
4621 | * is going to be acquired. |
4622 | * |
4623 | * |
4624 | * Returns 0 when an allocation succeeded or < 0 when an error occurred. In |
4625 | * case -ENOSPC is returned then @ins->offset will contain the size of the |
4626 | * largest available hole the allocator managed to find. |
4627 | */ |
4628 | int btrfs_reserve_extent(struct btrfs_root *root, u64 ram_bytes, |
4629 | u64 num_bytes, u64 min_alloc_size, |
4630 | u64 empty_size, u64 hint_byte, |
4631 | struct btrfs_key *ins, int is_data, int delalloc) |
4632 | { |
4633 | struct btrfs_fs_info *fs_info = root->fs_info; |
4634 | struct find_free_extent_ctl ffe_ctl = {}; |
4635 | bool final_tried = num_bytes == min_alloc_size; |
4636 | u64 flags; |
4637 | int ret; |
4638 | bool for_treelog = (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID); |
4639 | bool for_data_reloc = (btrfs_is_data_reloc_root(root) && is_data); |
4640 | |
4641 | flags = get_alloc_profile_by_root(root, data: is_data); |
4642 | again: |
4643 | WARN_ON(num_bytes < fs_info->sectorsize); |
4644 | |
4645 | ffe_ctl.ram_bytes = ram_bytes; |
4646 | ffe_ctl.num_bytes = num_bytes; |
4647 | ffe_ctl.min_alloc_size = min_alloc_size; |
4648 | ffe_ctl.empty_size = empty_size; |
4649 | ffe_ctl.flags = flags; |
4650 | ffe_ctl.delalloc = delalloc; |
4651 | ffe_ctl.hint_byte = hint_byte; |
4652 | ffe_ctl.for_treelog = for_treelog; |
4653 | ffe_ctl.for_data_reloc = for_data_reloc; |
4654 | |
4655 | ret = find_free_extent(root, ins, ffe_ctl: &ffe_ctl); |
4656 | if (!ret && !is_data) { |
4657 | btrfs_dec_block_group_reservations(fs_info, start: ins->objectid); |
4658 | } else if (ret == -ENOSPC) { |
4659 | if (!final_tried && ins->offset) { |
4660 | num_bytes = min(num_bytes >> 1, ins->offset); |
4661 | num_bytes = round_down(num_bytes, |
4662 | fs_info->sectorsize); |
4663 | num_bytes = max(num_bytes, min_alloc_size); |
4664 | ram_bytes = num_bytes; |
4665 | if (num_bytes == min_alloc_size) |
4666 | final_tried = true; |
4667 | goto again; |
4668 | } else if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) { |
4669 | struct btrfs_space_info *sinfo; |
4670 | |
4671 | sinfo = btrfs_find_space_info(info: fs_info, flags); |
4672 | btrfs_err(fs_info, |
4673 | "allocation failed flags %llu, wanted %llu tree-log %d, relocation: %d" , |
4674 | flags, num_bytes, for_treelog, for_data_reloc); |
4675 | if (sinfo) |
4676 | btrfs_dump_space_info(fs_info, info: sinfo, |
4677 | bytes: num_bytes, dump_block_groups: 1); |
4678 | } |
4679 | } |
4680 | |
4681 | return ret; |
4682 | } |
4683 | |
4684 | int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info, |
4685 | u64 start, u64 len, int delalloc) |
4686 | { |
4687 | struct btrfs_block_group *cache; |
4688 | |
4689 | cache = btrfs_lookup_block_group(info: fs_info, bytenr: start); |
4690 | if (!cache) { |
4691 | btrfs_err(fs_info, "Unable to find block group for %llu" , |
4692 | start); |
4693 | return -ENOSPC; |
4694 | } |
4695 | |
4696 | btrfs_add_free_space(block_group: cache, bytenr: start, size: len); |
4697 | btrfs_free_reserved_bytes(cache, num_bytes: len, delalloc); |
4698 | trace_btrfs_reserved_extent_free(fs_info, start, len); |
4699 | |
4700 | btrfs_put_block_group(cache); |
4701 | return 0; |
4702 | } |
4703 | |
4704 | int btrfs_pin_reserved_extent(struct btrfs_trans_handle *trans, |
4705 | const struct extent_buffer *eb) |
4706 | { |
4707 | struct btrfs_block_group *cache; |
4708 | int ret = 0; |
4709 | |
4710 | cache = btrfs_lookup_block_group(info: trans->fs_info, bytenr: eb->start); |
4711 | if (!cache) { |
4712 | btrfs_err(trans->fs_info, "unable to find block group for %llu" , |
4713 | eb->start); |
4714 | return -ENOSPC; |
4715 | } |
4716 | |
4717 | ret = pin_down_extent(trans, cache, bytenr: eb->start, num_bytes: eb->len, reserved: 1); |
4718 | btrfs_put_block_group(cache); |
4719 | return ret; |
4720 | } |
4721 | |
4722 | static int alloc_reserved_extent(struct btrfs_trans_handle *trans, u64 bytenr, |
4723 | u64 num_bytes) |
4724 | { |
4725 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4726 | int ret; |
4727 | |
4728 | ret = remove_from_free_space_tree(trans, start: bytenr, size: num_bytes); |
4729 | if (ret) |
4730 | return ret; |
4731 | |
4732 | ret = btrfs_update_block_group(trans, bytenr, num_bytes, alloc: true); |
4733 | if (ret) { |
4734 | ASSERT(!ret); |
4735 | btrfs_err(fs_info, "update block group failed for %llu %llu" , |
4736 | bytenr, num_bytes); |
4737 | return ret; |
4738 | } |
4739 | |
4740 | trace_btrfs_reserved_extent_alloc(fs_info, start: bytenr, len: num_bytes); |
4741 | return 0; |
4742 | } |
4743 | |
4744 | static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
4745 | u64 parent, u64 root_objectid, |
4746 | u64 flags, u64 owner, u64 offset, |
4747 | struct btrfs_key *ins, int ref_mod, u64 oref_root) |
4748 | { |
4749 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4750 | struct btrfs_root *extent_root; |
4751 | int ret; |
4752 | struct btrfs_extent_item *extent_item; |
4753 | struct btrfs_extent_owner_ref *oref; |
4754 | struct btrfs_extent_inline_ref *iref; |
4755 | struct btrfs_path *path; |
4756 | struct extent_buffer *leaf; |
4757 | int type; |
4758 | u32 size; |
4759 | const bool simple_quota = (btrfs_qgroup_mode(fs_info) == BTRFS_QGROUP_MODE_SIMPLE); |
4760 | |
4761 | if (parent > 0) |
4762 | type = BTRFS_SHARED_DATA_REF_KEY; |
4763 | else |
4764 | type = BTRFS_EXTENT_DATA_REF_KEY; |
4765 | |
4766 | size = sizeof(*extent_item); |
4767 | if (simple_quota) |
4768 | size += btrfs_extent_inline_ref_size(BTRFS_EXTENT_OWNER_REF_KEY); |
4769 | size += btrfs_extent_inline_ref_size(type); |
4770 | |
4771 | path = btrfs_alloc_path(); |
4772 | if (!path) |
4773 | return -ENOMEM; |
4774 | |
4775 | extent_root = btrfs_extent_root(fs_info, bytenr: ins->objectid); |
4776 | ret = btrfs_insert_empty_item(trans, root: extent_root, path, key: ins, data_size: size); |
4777 | if (ret) { |
4778 | btrfs_free_path(p: path); |
4779 | return ret; |
4780 | } |
4781 | |
4782 | leaf = path->nodes[0]; |
4783 | extent_item = btrfs_item_ptr(leaf, path->slots[0], |
4784 | struct btrfs_extent_item); |
4785 | btrfs_set_extent_refs(eb: leaf, s: extent_item, val: ref_mod); |
4786 | btrfs_set_extent_generation(eb: leaf, s: extent_item, val: trans->transid); |
4787 | btrfs_set_extent_flags(eb: leaf, s: extent_item, |
4788 | val: flags | BTRFS_EXTENT_FLAG_DATA); |
4789 | |
4790 | iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); |
4791 | if (simple_quota) { |
4792 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, BTRFS_EXTENT_OWNER_REF_KEY); |
4793 | oref = (struct btrfs_extent_owner_ref *)(&iref->offset); |
4794 | btrfs_set_extent_owner_ref_root_id(eb: leaf, s: oref, val: oref_root); |
4795 | iref = (struct btrfs_extent_inline_ref *)(oref + 1); |
4796 | } |
4797 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, val: type); |
4798 | |
4799 | if (parent > 0) { |
4800 | struct btrfs_shared_data_ref *ref; |
4801 | ref = (struct btrfs_shared_data_ref *)(iref + 1); |
4802 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: parent); |
4803 | btrfs_set_shared_data_ref_count(eb: leaf, s: ref, val: ref_mod); |
4804 | } else { |
4805 | struct btrfs_extent_data_ref *ref; |
4806 | ref = (struct btrfs_extent_data_ref *)(&iref->offset); |
4807 | btrfs_set_extent_data_ref_root(eb: leaf, s: ref, val: root_objectid); |
4808 | btrfs_set_extent_data_ref_objectid(eb: leaf, s: ref, val: owner); |
4809 | btrfs_set_extent_data_ref_offset(eb: leaf, s: ref, val: offset); |
4810 | btrfs_set_extent_data_ref_count(eb: leaf, s: ref, val: ref_mod); |
4811 | } |
4812 | |
4813 | btrfs_mark_buffer_dirty(trans, buf: path->nodes[0]); |
4814 | btrfs_free_path(p: path); |
4815 | |
4816 | return alloc_reserved_extent(trans, bytenr: ins->objectid, num_bytes: ins->offset); |
4817 | } |
4818 | |
4819 | static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, |
4820 | struct btrfs_delayed_ref_node *node, |
4821 | struct btrfs_delayed_extent_op *extent_op) |
4822 | { |
4823 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4824 | struct btrfs_root *extent_root; |
4825 | int ret; |
4826 | struct btrfs_extent_item *extent_item; |
4827 | struct btrfs_key extent_key; |
4828 | struct btrfs_tree_block_info *block_info; |
4829 | struct btrfs_extent_inline_ref *iref; |
4830 | struct btrfs_path *path; |
4831 | struct extent_buffer *leaf; |
4832 | struct btrfs_delayed_tree_ref *ref; |
4833 | u32 size = sizeof(*extent_item) + sizeof(*iref); |
4834 | u64 flags = extent_op->flags_to_set; |
4835 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
4836 | |
4837 | ref = btrfs_delayed_node_to_tree_ref(node); |
4838 | |
4839 | extent_key.objectid = node->bytenr; |
4840 | if (skinny_metadata) { |
4841 | extent_key.offset = ref->level; |
4842 | extent_key.type = BTRFS_METADATA_ITEM_KEY; |
4843 | } else { |
4844 | extent_key.offset = node->num_bytes; |
4845 | extent_key.type = BTRFS_EXTENT_ITEM_KEY; |
4846 | size += sizeof(*block_info); |
4847 | } |
4848 | |
4849 | path = btrfs_alloc_path(); |
4850 | if (!path) |
4851 | return -ENOMEM; |
4852 | |
4853 | extent_root = btrfs_extent_root(fs_info, bytenr: extent_key.objectid); |
4854 | ret = btrfs_insert_empty_item(trans, root: extent_root, path, key: &extent_key, |
4855 | data_size: size); |
4856 | if (ret) { |
4857 | btrfs_free_path(p: path); |
4858 | return ret; |
4859 | } |
4860 | |
4861 | leaf = path->nodes[0]; |
4862 | extent_item = btrfs_item_ptr(leaf, path->slots[0], |
4863 | struct btrfs_extent_item); |
4864 | btrfs_set_extent_refs(eb: leaf, s: extent_item, val: 1); |
4865 | btrfs_set_extent_generation(eb: leaf, s: extent_item, val: trans->transid); |
4866 | btrfs_set_extent_flags(eb: leaf, s: extent_item, |
4867 | val: flags | BTRFS_EXTENT_FLAG_TREE_BLOCK); |
4868 | |
4869 | if (skinny_metadata) { |
4870 | iref = (struct btrfs_extent_inline_ref *)(extent_item + 1); |
4871 | } else { |
4872 | block_info = (struct btrfs_tree_block_info *)(extent_item + 1); |
4873 | btrfs_set_tree_block_key(eb: leaf, item: block_info, key: &extent_op->key); |
4874 | btrfs_set_tree_block_level(eb: leaf, s: block_info, val: ref->level); |
4875 | iref = (struct btrfs_extent_inline_ref *)(block_info + 1); |
4876 | } |
4877 | |
4878 | if (node->type == BTRFS_SHARED_BLOCK_REF_KEY) { |
4879 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, |
4880 | BTRFS_SHARED_BLOCK_REF_KEY); |
4881 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: ref->parent); |
4882 | } else { |
4883 | btrfs_set_extent_inline_ref_type(eb: leaf, s: iref, |
4884 | BTRFS_TREE_BLOCK_REF_KEY); |
4885 | btrfs_set_extent_inline_ref_offset(eb: leaf, s: iref, val: ref->root); |
4886 | } |
4887 | |
4888 | btrfs_mark_buffer_dirty(trans, buf: leaf); |
4889 | btrfs_free_path(p: path); |
4890 | |
4891 | return alloc_reserved_extent(trans, bytenr: node->bytenr, num_bytes: fs_info->nodesize); |
4892 | } |
4893 | |
4894 | int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans, |
4895 | struct btrfs_root *root, u64 owner, |
4896 | u64 offset, u64 ram_bytes, |
4897 | struct btrfs_key *ins) |
4898 | { |
4899 | struct btrfs_ref generic_ref = { 0 }; |
4900 | u64 root_objectid = root->root_key.objectid; |
4901 | u64 owning_root = root_objectid; |
4902 | |
4903 | BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID); |
4904 | |
4905 | if (btrfs_is_data_reloc_root(root) && is_fstree(rootid: root->relocation_src_root)) |
4906 | owning_root = root->relocation_src_root; |
4907 | |
4908 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_ADD_DELAYED_EXTENT, |
4909 | bytenr: ins->objectid, len: ins->offset, parent: 0, owning_root); |
4910 | btrfs_init_data_ref(generic_ref: &generic_ref, ref_root: root_objectid, ino: owner, |
4911 | offset, mod_root: 0, skip_qgroup: false); |
4912 | btrfs_ref_tree_mod(fs_info: root->fs_info, generic_ref: &generic_ref); |
4913 | |
4914 | return btrfs_add_delayed_data_ref(trans, generic_ref: &generic_ref, reserved: ram_bytes); |
4915 | } |
4916 | |
4917 | /* |
4918 | * this is used by the tree logging recovery code. It records that |
4919 | * an extent has been allocated and makes sure to clear the free |
4920 | * space cache bits as well |
4921 | */ |
4922 | int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, |
4923 | u64 root_objectid, u64 owner, u64 offset, |
4924 | struct btrfs_key *ins) |
4925 | { |
4926 | struct btrfs_fs_info *fs_info = trans->fs_info; |
4927 | int ret; |
4928 | struct btrfs_block_group *block_group; |
4929 | struct btrfs_space_info *space_info; |
4930 | struct btrfs_squota_delta delta = { |
4931 | .root = root_objectid, |
4932 | .num_bytes = ins->offset, |
4933 | .generation = trans->transid, |
4934 | .rsv_bytes = 0, |
4935 | .is_data = true, |
4936 | .is_inc = true, |
4937 | }; |
4938 | |
4939 | /* |
4940 | * Mixed block groups will exclude before processing the log so we only |
4941 | * need to do the exclude dance if this fs isn't mixed. |
4942 | */ |
4943 | if (!btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { |
4944 | ret = __exclude_logged_extent(fs_info, start: ins->objectid, |
4945 | num_bytes: ins->offset); |
4946 | if (ret) |
4947 | return ret; |
4948 | } |
4949 | |
4950 | block_group = btrfs_lookup_block_group(info: fs_info, bytenr: ins->objectid); |
4951 | if (!block_group) |
4952 | return -EINVAL; |
4953 | |
4954 | space_info = block_group->space_info; |
4955 | spin_lock(lock: &space_info->lock); |
4956 | spin_lock(lock: &block_group->lock); |
4957 | space_info->bytes_reserved += ins->offset; |
4958 | block_group->reserved += ins->offset; |
4959 | spin_unlock(lock: &block_group->lock); |
4960 | spin_unlock(lock: &space_info->lock); |
4961 | |
4962 | ret = alloc_reserved_file_extent(trans, parent: 0, root_objectid, flags: 0, owner, |
4963 | offset, ins, ref_mod: 1, oref_root: root_objectid); |
4964 | if (ret) |
4965 | btrfs_pin_extent(trans, bytenr: ins->objectid, num_bytes: ins->offset, reserved: 1); |
4966 | ret = btrfs_record_squota_delta(fs_info, delta: &delta); |
4967 | btrfs_put_block_group(cache: block_group); |
4968 | return ret; |
4969 | } |
4970 | |
4971 | #ifdef CONFIG_BTRFS_DEBUG |
4972 | /* |
4973 | * Extra safety check in case the extent tree is corrupted and extent allocator |
4974 | * chooses to use a tree block which is already used and locked. |
4975 | */ |
4976 | static bool check_eb_lock_owner(const struct extent_buffer *eb) |
4977 | { |
4978 | if (eb->lock_owner == current->pid) { |
4979 | btrfs_err_rl(eb->fs_info, |
4980 | "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected" , |
4981 | eb->start, btrfs_header_owner(eb), current->pid); |
4982 | return true; |
4983 | } |
4984 | return false; |
4985 | } |
4986 | #else |
4987 | static bool check_eb_lock_owner(struct extent_buffer *eb) |
4988 | { |
4989 | return false; |
4990 | } |
4991 | #endif |
4992 | |
4993 | static struct extent_buffer * |
4994 | btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
4995 | u64 bytenr, int level, u64 owner, |
4996 | enum btrfs_lock_nesting nest) |
4997 | { |
4998 | struct btrfs_fs_info *fs_info = root->fs_info; |
4999 | struct extent_buffer *buf; |
5000 | u64 lockdep_owner = owner; |
5001 | |
5002 | buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root: owner, level); |
5003 | if (IS_ERR(ptr: buf)) |
5004 | return buf; |
5005 | |
5006 | if (check_eb_lock_owner(eb: buf)) { |
5007 | free_extent_buffer(eb: buf); |
5008 | return ERR_PTR(error: -EUCLEAN); |
5009 | } |
5010 | |
5011 | /* |
5012 | * The reloc trees are just snapshots, so we need them to appear to be |
5013 | * just like any other fs tree WRT lockdep. |
5014 | * |
5015 | * The exception however is in replace_path() in relocation, where we |
5016 | * hold the lock on the original fs root and then search for the reloc |
5017 | * root. At that point we need to make sure any reloc root buffers are |
5018 | * set to the BTRFS_TREE_RELOC_OBJECTID lockdep class in order to make |
5019 | * lockdep happy. |
5020 | */ |
5021 | if (lockdep_owner == BTRFS_TREE_RELOC_OBJECTID && |
5022 | !test_bit(BTRFS_ROOT_RESET_LOCKDEP_CLASS, &root->state)) |
5023 | lockdep_owner = BTRFS_FS_TREE_OBJECTID; |
5024 | |
5025 | /* btrfs_clear_buffer_dirty() accesses generation field. */ |
5026 | btrfs_set_header_generation(eb: buf, val: trans->transid); |
5027 | |
5028 | /* |
5029 | * This needs to stay, because we could allocate a freed block from an |
5030 | * old tree into a new tree, so we need to make sure this new block is |
5031 | * set to the appropriate level and owner. |
5032 | */ |
5033 | btrfs_set_buffer_lockdep_class(objectid: lockdep_owner, eb: buf, level); |
5034 | |
5035 | __btrfs_tree_lock(eb: buf, nest); |
5036 | btrfs_clear_buffer_dirty(trans, buf); |
5037 | clear_bit(nr: EXTENT_BUFFER_STALE, addr: &buf->bflags); |
5038 | clear_bit(nr: EXTENT_BUFFER_NO_CHECK, addr: &buf->bflags); |
5039 | |
5040 | set_extent_buffer_uptodate(buf); |
5041 | |
5042 | memzero_extent_buffer(eb: buf, start: 0, len: sizeof(struct btrfs_header)); |
5043 | btrfs_set_header_level(eb: buf, val: level); |
5044 | btrfs_set_header_bytenr(eb: buf, val: buf->start); |
5045 | btrfs_set_header_generation(eb: buf, val: trans->transid); |
5046 | btrfs_set_header_backref_rev(eb: buf, BTRFS_MIXED_BACKREF_REV); |
5047 | btrfs_set_header_owner(eb: buf, val: owner); |
5048 | write_extent_buffer_fsid(eb: buf, fsid: fs_info->fs_devices->metadata_uuid); |
5049 | write_extent_buffer_chunk_tree_uuid(eb: buf, chunk_tree_uuid: fs_info->chunk_tree_uuid); |
5050 | if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { |
5051 | buf->log_index = root->log_transid % 2; |
5052 | /* |
5053 | * we allow two log transactions at a time, use different |
5054 | * EXTENT bit to differentiate dirty pages. |
5055 | */ |
5056 | if (buf->log_index == 0) |
5057 | set_extent_bit(tree: &root->dirty_log_pages, start: buf->start, |
5058 | end: buf->start + buf->len - 1, |
5059 | bits: EXTENT_DIRTY, NULL); |
5060 | else |
5061 | set_extent_bit(tree: &root->dirty_log_pages, start: buf->start, |
5062 | end: buf->start + buf->len - 1, |
5063 | bits: EXTENT_NEW, NULL); |
5064 | } else { |
5065 | buf->log_index = -1; |
5066 | set_extent_bit(tree: &trans->transaction->dirty_pages, start: buf->start, |
5067 | end: buf->start + buf->len - 1, bits: EXTENT_DIRTY, NULL); |
5068 | } |
5069 | /* this returns a buffer locked for blocking */ |
5070 | return buf; |
5071 | } |
5072 | |
5073 | /* |
5074 | * finds a free extent and does all the dirty work required for allocation |
5075 | * returns the tree buffer or an ERR_PTR on error. |
5076 | */ |
5077 | struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, |
5078 | struct btrfs_root *root, |
5079 | u64 parent, u64 root_objectid, |
5080 | const struct btrfs_disk_key *key, |
5081 | int level, u64 hint, |
5082 | u64 empty_size, |
5083 | u64 reloc_src_root, |
5084 | enum btrfs_lock_nesting nest) |
5085 | { |
5086 | struct btrfs_fs_info *fs_info = root->fs_info; |
5087 | struct btrfs_key ins; |
5088 | struct btrfs_block_rsv *block_rsv; |
5089 | struct extent_buffer *buf; |
5090 | struct btrfs_delayed_extent_op *extent_op; |
5091 | struct btrfs_ref generic_ref = { 0 }; |
5092 | u64 flags = 0; |
5093 | int ret; |
5094 | u32 blocksize = fs_info->nodesize; |
5095 | bool skinny_metadata = btrfs_fs_incompat(fs_info, SKINNY_METADATA); |
5096 | u64 owning_root; |
5097 | |
5098 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
5099 | if (btrfs_is_testing(fs_info)) { |
5100 | buf = btrfs_init_new_buffer(trans, root, bytenr: root->alloc_bytenr, |
5101 | level, owner: root_objectid, nest); |
5102 | if (!IS_ERR(ptr: buf)) |
5103 | root->alloc_bytenr += blocksize; |
5104 | return buf; |
5105 | } |
5106 | #endif |
5107 | |
5108 | block_rsv = btrfs_use_block_rsv(trans, root, blocksize); |
5109 | if (IS_ERR(ptr: block_rsv)) |
5110 | return ERR_CAST(ptr: block_rsv); |
5111 | |
5112 | ret = btrfs_reserve_extent(root, ram_bytes: blocksize, num_bytes: blocksize, min_alloc_size: blocksize, |
5113 | empty_size, hint_byte: hint, ins: &ins, is_data: 0, delalloc: 0); |
5114 | if (ret) |
5115 | goto out_unuse; |
5116 | |
5117 | buf = btrfs_init_new_buffer(trans, root, bytenr: ins.objectid, level, |
5118 | owner: root_objectid, nest); |
5119 | if (IS_ERR(ptr: buf)) { |
5120 | ret = PTR_ERR(ptr: buf); |
5121 | goto out_free_reserved; |
5122 | } |
5123 | owning_root = btrfs_header_owner(eb: buf); |
5124 | |
5125 | if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { |
5126 | if (parent == 0) |
5127 | parent = ins.objectid; |
5128 | flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF; |
5129 | owning_root = reloc_src_root; |
5130 | } else |
5131 | BUG_ON(parent > 0); |
5132 | |
5133 | if (root_objectid != BTRFS_TREE_LOG_OBJECTID) { |
5134 | extent_op = btrfs_alloc_delayed_extent_op(); |
5135 | if (!extent_op) { |
5136 | ret = -ENOMEM; |
5137 | goto out_free_buf; |
5138 | } |
5139 | if (key) |
5140 | memcpy(&extent_op->key, key, sizeof(extent_op->key)); |
5141 | else |
5142 | memset(&extent_op->key, 0, sizeof(extent_op->key)); |
5143 | extent_op->flags_to_set = flags; |
5144 | extent_op->update_key = skinny_metadata ? false : true; |
5145 | extent_op->update_flags = true; |
5146 | extent_op->level = level; |
5147 | |
5148 | btrfs_init_generic_ref(generic_ref: &generic_ref, action: BTRFS_ADD_DELAYED_EXTENT, |
5149 | bytenr: ins.objectid, len: ins.offset, parent, owning_root); |
5150 | btrfs_init_tree_ref(generic_ref: &generic_ref, level, root: root_objectid, |
5151 | mod_root: root->root_key.objectid, skip_qgroup: false); |
5152 | btrfs_ref_tree_mod(fs_info, generic_ref: &generic_ref); |
5153 | ret = btrfs_add_delayed_tree_ref(trans, generic_ref: &generic_ref, extent_op); |
5154 | if (ret) |
5155 | goto out_free_delayed; |
5156 | } |
5157 | return buf; |
5158 | |
5159 | out_free_delayed: |
5160 | btrfs_free_delayed_extent_op(op: extent_op); |
5161 | out_free_buf: |
5162 | btrfs_tree_unlock(eb: buf); |
5163 | free_extent_buffer(eb: buf); |
5164 | out_free_reserved: |
5165 | btrfs_free_reserved_extent(fs_info, start: ins.objectid, len: ins.offset, delalloc: 0); |
5166 | out_unuse: |
5167 | btrfs_unuse_block_rsv(fs_info, block_rsv, blocksize); |
5168 | return ERR_PTR(error: ret); |
5169 | } |
5170 | |
5171 | struct walk_control { |
5172 | u64 refs[BTRFS_MAX_LEVEL]; |
5173 | u64 flags[BTRFS_MAX_LEVEL]; |
5174 | struct btrfs_key update_progress; |
5175 | struct btrfs_key drop_progress; |
5176 | int drop_level; |
5177 | int stage; |
5178 | int level; |
5179 | int shared_level; |
5180 | int update_ref; |
5181 | int keep_locks; |
5182 | int reada_slot; |
5183 | int reada_count; |
5184 | int restarted; |
5185 | }; |
5186 | |
5187 | #define DROP_REFERENCE 1 |
5188 | #define UPDATE_BACKREF 2 |
5189 | |
5190 | static noinline void reada_walk_down(struct btrfs_trans_handle *trans, |
5191 | struct btrfs_root *root, |
5192 | struct walk_control *wc, |
5193 | struct btrfs_path *path) |
5194 | { |
5195 | struct btrfs_fs_info *fs_info = root->fs_info; |
5196 | u64 bytenr; |
5197 | u64 generation; |
5198 | u64 refs; |
5199 | u64 flags; |
5200 | u32 nritems; |
5201 | struct btrfs_key key; |
5202 | struct extent_buffer *eb; |
5203 | int ret; |
5204 | int slot; |
5205 | int nread = 0; |
5206 | |
5207 | if (path->slots[wc->level] < wc->reada_slot) { |
5208 | wc->reada_count = wc->reada_count * 2 / 3; |
5209 | wc->reada_count = max(wc->reada_count, 2); |
5210 | } else { |
5211 | wc->reada_count = wc->reada_count * 3 / 2; |
5212 | wc->reada_count = min_t(int, wc->reada_count, |
5213 | BTRFS_NODEPTRS_PER_BLOCK(fs_info)); |
5214 | } |
5215 | |
5216 | eb = path->nodes[wc->level]; |
5217 | nritems = btrfs_header_nritems(eb); |
5218 | |
5219 | for (slot = path->slots[wc->level]; slot < nritems; slot++) { |
5220 | if (nread >= wc->reada_count) |
5221 | break; |
5222 | |
5223 | cond_resched(); |
5224 | bytenr = btrfs_node_blockptr(eb, nr: slot); |
5225 | generation = btrfs_node_ptr_generation(eb, nr: slot); |
5226 | |
5227 | if (slot == path->slots[wc->level]) |
5228 | goto reada; |
5229 | |
5230 | if (wc->stage == UPDATE_BACKREF && |
5231 | generation <= root->root_key.offset) |
5232 | continue; |
5233 | |
5234 | /* We don't lock the tree block, it's OK to be racy here */ |
5235 | ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, |
5236 | offset: wc->level - 1, metadata: 1, refs: &refs, |
5237 | flags: &flags); |
5238 | /* We don't care about errors in readahead. */ |
5239 | if (ret < 0) |
5240 | continue; |
5241 | BUG_ON(refs == 0); |
5242 | |
5243 | if (wc->stage == DROP_REFERENCE) { |
5244 | if (refs == 1) |
5245 | goto reada; |
5246 | |
5247 | if (wc->level == 1 && |
5248 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5249 | continue; |
5250 | if (!wc->update_ref || |
5251 | generation <= root->root_key.offset) |
5252 | continue; |
5253 | btrfs_node_key_to_cpu(eb, cpu_key: &key, nr: slot); |
5254 | ret = btrfs_comp_cpu_keys(k1: &key, |
5255 | k2: &wc->update_progress); |
5256 | if (ret < 0) |
5257 | continue; |
5258 | } else { |
5259 | if (wc->level == 1 && |
5260 | (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5261 | continue; |
5262 | } |
5263 | reada: |
5264 | btrfs_readahead_node_child(node: eb, slot); |
5265 | nread++; |
5266 | } |
5267 | wc->reada_slot = slot; |
5268 | } |
5269 | |
5270 | /* |
5271 | * helper to process tree block while walking down the tree. |
5272 | * |
5273 | * when wc->stage == UPDATE_BACKREF, this function updates |
5274 | * back refs for pointers in the block. |
5275 | * |
5276 | * NOTE: return value 1 means we should stop walking down. |
5277 | */ |
5278 | static noinline int walk_down_proc(struct btrfs_trans_handle *trans, |
5279 | struct btrfs_root *root, |
5280 | struct btrfs_path *path, |
5281 | struct walk_control *wc, int lookup_info) |
5282 | { |
5283 | struct btrfs_fs_info *fs_info = root->fs_info; |
5284 | int level = wc->level; |
5285 | struct extent_buffer *eb = path->nodes[level]; |
5286 | u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
5287 | int ret; |
5288 | |
5289 | if (wc->stage == UPDATE_BACKREF && |
5290 | btrfs_header_owner(eb) != root->root_key.objectid) |
5291 | return 1; |
5292 | |
5293 | /* |
5294 | * when reference count of tree block is 1, it won't increase |
5295 | * again. once full backref flag is set, we never clear it. |
5296 | */ |
5297 | if (lookup_info && |
5298 | ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) || |
5299 | (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) { |
5300 | BUG_ON(!path->locks[level]); |
5301 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5302 | bytenr: eb->start, offset: level, metadata: 1, |
5303 | refs: &wc->refs[level], |
5304 | flags: &wc->flags[level]); |
5305 | BUG_ON(ret == -ENOMEM); |
5306 | if (ret) |
5307 | return ret; |
5308 | BUG_ON(wc->refs[level] == 0); |
5309 | } |
5310 | |
5311 | if (wc->stage == DROP_REFERENCE) { |
5312 | if (wc->refs[level] > 1) |
5313 | return 1; |
5314 | |
5315 | if (path->locks[level] && !wc->keep_locks) { |
5316 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5317 | path->locks[level] = 0; |
5318 | } |
5319 | return 0; |
5320 | } |
5321 | |
5322 | /* wc->stage == UPDATE_BACKREF */ |
5323 | if (!(wc->flags[level] & flag)) { |
5324 | BUG_ON(!path->locks[level]); |
5325 | ret = btrfs_inc_ref(trans, root, buf: eb, full_backref: 1); |
5326 | BUG_ON(ret); /* -ENOMEM */ |
5327 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 0); |
5328 | BUG_ON(ret); /* -ENOMEM */ |
5329 | ret = btrfs_set_disk_extent_flags(trans, eb, flags: flag); |
5330 | BUG_ON(ret); /* -ENOMEM */ |
5331 | wc->flags[level] |= flag; |
5332 | } |
5333 | |
5334 | /* |
5335 | * the block is shared by multiple trees, so it's not good to |
5336 | * keep the tree lock |
5337 | */ |
5338 | if (path->locks[level] && level > 0) { |
5339 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5340 | path->locks[level] = 0; |
5341 | } |
5342 | return 0; |
5343 | } |
5344 | |
5345 | /* |
5346 | * This is used to verify a ref exists for this root to deal with a bug where we |
5347 | * would have a drop_progress key that hadn't been updated properly. |
5348 | */ |
5349 | static int check_ref_exists(struct btrfs_trans_handle *trans, |
5350 | struct btrfs_root *root, u64 bytenr, u64 parent, |
5351 | int level) |
5352 | { |
5353 | struct btrfs_path *path; |
5354 | struct btrfs_extent_inline_ref *iref; |
5355 | int ret; |
5356 | |
5357 | path = btrfs_alloc_path(); |
5358 | if (!path) |
5359 | return -ENOMEM; |
5360 | |
5361 | ret = lookup_extent_backref(trans, path, ref_ret: &iref, bytenr, |
5362 | num_bytes: root->fs_info->nodesize, parent, |
5363 | root_objectid: root->root_key.objectid, owner: level, offset: 0); |
5364 | btrfs_free_path(p: path); |
5365 | if (ret == -ENOENT) |
5366 | return 0; |
5367 | if (ret < 0) |
5368 | return ret; |
5369 | return 1; |
5370 | } |
5371 | |
5372 | /* |
5373 | * helper to process tree block pointer. |
5374 | * |
5375 | * when wc->stage == DROP_REFERENCE, this function checks |
5376 | * reference count of the block pointed to. if the block |
5377 | * is shared and we need update back refs for the subtree |
5378 | * rooted at the block, this function changes wc->stage to |
5379 | * UPDATE_BACKREF. if the block is shared and there is no |
5380 | * need to update back, this function drops the reference |
5381 | * to the block. |
5382 | * |
5383 | * NOTE: return value 1 means we should stop walking down. |
5384 | */ |
5385 | static noinline int do_walk_down(struct btrfs_trans_handle *trans, |
5386 | struct btrfs_root *root, |
5387 | struct btrfs_path *path, |
5388 | struct walk_control *wc, int *lookup_info) |
5389 | { |
5390 | struct btrfs_fs_info *fs_info = root->fs_info; |
5391 | u64 bytenr; |
5392 | u64 generation; |
5393 | u64 parent; |
5394 | struct btrfs_tree_parent_check check = { 0 }; |
5395 | struct btrfs_key key; |
5396 | struct btrfs_ref ref = { 0 }; |
5397 | struct extent_buffer *next; |
5398 | int level = wc->level; |
5399 | int reada = 0; |
5400 | int ret = 0; |
5401 | bool need_account = false; |
5402 | |
5403 | generation = btrfs_node_ptr_generation(eb: path->nodes[level], |
5404 | nr: path->slots[level]); |
5405 | /* |
5406 | * if the lower level block was created before the snapshot |
5407 | * was created, we know there is no need to update back refs |
5408 | * for the subtree |
5409 | */ |
5410 | if (wc->stage == UPDATE_BACKREF && |
5411 | generation <= root->root_key.offset) { |
5412 | *lookup_info = 1; |
5413 | return 1; |
5414 | } |
5415 | |
5416 | bytenr = btrfs_node_blockptr(eb: path->nodes[level], nr: path->slots[level]); |
5417 | |
5418 | check.level = level - 1; |
5419 | check.transid = generation; |
5420 | check.owner_root = root->root_key.objectid; |
5421 | check.has_first_key = true; |
5422 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &check.first_key, |
5423 | nr: path->slots[level]); |
5424 | |
5425 | next = find_extent_buffer(fs_info, start: bytenr); |
5426 | if (!next) { |
5427 | next = btrfs_find_create_tree_block(fs_info, bytenr, |
5428 | owner_root: root->root_key.objectid, level: level - 1); |
5429 | if (IS_ERR(ptr: next)) |
5430 | return PTR_ERR(ptr: next); |
5431 | reada = 1; |
5432 | } |
5433 | btrfs_tree_lock(eb: next); |
5434 | |
5435 | ret = btrfs_lookup_extent_info(trans, fs_info, bytenr, offset: level - 1, metadata: 1, |
5436 | refs: &wc->refs[level - 1], |
5437 | flags: &wc->flags[level - 1]); |
5438 | if (ret < 0) |
5439 | goto out_unlock; |
5440 | |
5441 | if (unlikely(wc->refs[level - 1] == 0)) { |
5442 | btrfs_err(fs_info, "Missing references." ); |
5443 | ret = -EIO; |
5444 | goto out_unlock; |
5445 | } |
5446 | *lookup_info = 0; |
5447 | |
5448 | if (wc->stage == DROP_REFERENCE) { |
5449 | if (wc->refs[level - 1] > 1) { |
5450 | need_account = true; |
5451 | if (level == 1 && |
5452 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5453 | goto skip; |
5454 | |
5455 | if (!wc->update_ref || |
5456 | generation <= root->root_key.offset) |
5457 | goto skip; |
5458 | |
5459 | btrfs_node_key_to_cpu(eb: path->nodes[level], cpu_key: &key, |
5460 | nr: path->slots[level]); |
5461 | ret = btrfs_comp_cpu_keys(k1: &key, k2: &wc->update_progress); |
5462 | if (ret < 0) |
5463 | goto skip; |
5464 | |
5465 | wc->stage = UPDATE_BACKREF; |
5466 | wc->shared_level = level - 1; |
5467 | } |
5468 | } else { |
5469 | if (level == 1 && |
5470 | (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF)) |
5471 | goto skip; |
5472 | } |
5473 | |
5474 | if (!btrfs_buffer_uptodate(buf: next, parent_transid: generation, atomic: 0)) { |
5475 | btrfs_tree_unlock(eb: next); |
5476 | free_extent_buffer(eb: next); |
5477 | next = NULL; |
5478 | *lookup_info = 1; |
5479 | } |
5480 | |
5481 | if (!next) { |
5482 | if (reada && level == 1) |
5483 | reada_walk_down(trans, root, wc, path); |
5484 | next = read_tree_block(fs_info, bytenr, check: &check); |
5485 | if (IS_ERR(ptr: next)) { |
5486 | return PTR_ERR(ptr: next); |
5487 | } else if (!extent_buffer_uptodate(eb: next)) { |
5488 | free_extent_buffer(eb: next); |
5489 | return -EIO; |
5490 | } |
5491 | btrfs_tree_lock(eb: next); |
5492 | } |
5493 | |
5494 | level--; |
5495 | ASSERT(level == btrfs_header_level(next)); |
5496 | if (level != btrfs_header_level(eb: next)) { |
5497 | btrfs_err(root->fs_info, "mismatched level" ); |
5498 | ret = -EIO; |
5499 | goto out_unlock; |
5500 | } |
5501 | path->nodes[level] = next; |
5502 | path->slots[level] = 0; |
5503 | path->locks[level] = BTRFS_WRITE_LOCK; |
5504 | wc->level = level; |
5505 | if (wc->level == 1) |
5506 | wc->reada_slot = 0; |
5507 | return 0; |
5508 | skip: |
5509 | wc->refs[level - 1] = 0; |
5510 | wc->flags[level - 1] = 0; |
5511 | if (wc->stage == DROP_REFERENCE) { |
5512 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) { |
5513 | parent = path->nodes[level]->start; |
5514 | } else { |
5515 | ASSERT(root->root_key.objectid == |
5516 | btrfs_header_owner(path->nodes[level])); |
5517 | if (root->root_key.objectid != |
5518 | btrfs_header_owner(eb: path->nodes[level])) { |
5519 | btrfs_err(root->fs_info, |
5520 | "mismatched block owner" ); |
5521 | ret = -EIO; |
5522 | goto out_unlock; |
5523 | } |
5524 | parent = 0; |
5525 | } |
5526 | |
5527 | /* |
5528 | * If we had a drop_progress we need to verify the refs are set |
5529 | * as expected. If we find our ref then we know that from here |
5530 | * on out everything should be correct, and we can clear the |
5531 | * ->restarted flag. |
5532 | */ |
5533 | if (wc->restarted) { |
5534 | ret = check_ref_exists(trans, root, bytenr, parent, |
5535 | level: level - 1); |
5536 | if (ret < 0) |
5537 | goto out_unlock; |
5538 | if (ret == 0) |
5539 | goto no_delete; |
5540 | ret = 0; |
5541 | wc->restarted = 0; |
5542 | } |
5543 | |
5544 | /* |
5545 | * Reloc tree doesn't contribute to qgroup numbers, and we have |
5546 | * already accounted them at merge time (replace_path), |
5547 | * thus we could skip expensive subtree trace here. |
5548 | */ |
5549 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID && |
5550 | need_account) { |
5551 | ret = btrfs_qgroup_trace_subtree(trans, root_eb: next, |
5552 | root_gen: generation, root_level: level - 1); |
5553 | if (ret) { |
5554 | btrfs_err_rl(fs_info, |
5555 | "Error %d accounting shared subtree. Quota is out of sync, rescan required." , |
5556 | ret); |
5557 | } |
5558 | } |
5559 | |
5560 | /* |
5561 | * We need to update the next key in our walk control so we can |
5562 | * update the drop_progress key accordingly. We don't care if |
5563 | * find_next_key doesn't find a key because that means we're at |
5564 | * the end and are going to clean up now. |
5565 | */ |
5566 | wc->drop_level = level; |
5567 | find_next_key(path, level, key: &wc->drop_progress); |
5568 | |
5569 | btrfs_init_generic_ref(generic_ref: &ref, action: BTRFS_DROP_DELAYED_REF, bytenr, |
5570 | len: fs_info->nodesize, parent, |
5571 | owning_root: btrfs_header_owner(eb: next)); |
5572 | btrfs_init_tree_ref(generic_ref: &ref, level: level - 1, root: root->root_key.objectid, |
5573 | mod_root: 0, skip_qgroup: false); |
5574 | ret = btrfs_free_extent(trans, ref: &ref); |
5575 | if (ret) |
5576 | goto out_unlock; |
5577 | } |
5578 | no_delete: |
5579 | *lookup_info = 1; |
5580 | ret = 1; |
5581 | |
5582 | out_unlock: |
5583 | btrfs_tree_unlock(eb: next); |
5584 | free_extent_buffer(eb: next); |
5585 | |
5586 | return ret; |
5587 | } |
5588 | |
5589 | /* |
5590 | * helper to process tree block while walking up the tree. |
5591 | * |
5592 | * when wc->stage == DROP_REFERENCE, this function drops |
5593 | * reference count on the block. |
5594 | * |
5595 | * when wc->stage == UPDATE_BACKREF, this function changes |
5596 | * wc->stage back to DROP_REFERENCE if we changed wc->stage |
5597 | * to UPDATE_BACKREF previously while processing the block. |
5598 | * |
5599 | * NOTE: return value 1 means we should stop walking up. |
5600 | */ |
5601 | static noinline int walk_up_proc(struct btrfs_trans_handle *trans, |
5602 | struct btrfs_root *root, |
5603 | struct btrfs_path *path, |
5604 | struct walk_control *wc) |
5605 | { |
5606 | struct btrfs_fs_info *fs_info = root->fs_info; |
5607 | int ret; |
5608 | int level = wc->level; |
5609 | struct extent_buffer *eb = path->nodes[level]; |
5610 | u64 parent = 0; |
5611 | |
5612 | if (wc->stage == UPDATE_BACKREF) { |
5613 | BUG_ON(wc->shared_level < level); |
5614 | if (level < wc->shared_level) |
5615 | goto out; |
5616 | |
5617 | ret = find_next_key(path, level: level + 1, key: &wc->update_progress); |
5618 | if (ret > 0) |
5619 | wc->update_ref = 0; |
5620 | |
5621 | wc->stage = DROP_REFERENCE; |
5622 | wc->shared_level = -1; |
5623 | path->slots[level] = 0; |
5624 | |
5625 | /* |
5626 | * check reference count again if the block isn't locked. |
5627 | * we should start walking down the tree again if reference |
5628 | * count is one. |
5629 | */ |
5630 | if (!path->locks[level]) { |
5631 | BUG_ON(level == 0); |
5632 | btrfs_tree_lock(eb); |
5633 | path->locks[level] = BTRFS_WRITE_LOCK; |
5634 | |
5635 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5636 | bytenr: eb->start, offset: level, metadata: 1, |
5637 | refs: &wc->refs[level], |
5638 | flags: &wc->flags[level]); |
5639 | if (ret < 0) { |
5640 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5641 | path->locks[level] = 0; |
5642 | return ret; |
5643 | } |
5644 | BUG_ON(wc->refs[level] == 0); |
5645 | if (wc->refs[level] == 1) { |
5646 | btrfs_tree_unlock_rw(eb, rw: path->locks[level]); |
5647 | path->locks[level] = 0; |
5648 | return 1; |
5649 | } |
5650 | } |
5651 | } |
5652 | |
5653 | /* wc->stage == DROP_REFERENCE */ |
5654 | BUG_ON(wc->refs[level] > 1 && !path->locks[level]); |
5655 | |
5656 | if (wc->refs[level] == 1) { |
5657 | if (level == 0) { |
5658 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5659 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 1); |
5660 | else |
5661 | ret = btrfs_dec_ref(trans, root, buf: eb, full_backref: 0); |
5662 | BUG_ON(ret); /* -ENOMEM */ |
5663 | if (is_fstree(rootid: root->root_key.objectid)) { |
5664 | ret = btrfs_qgroup_trace_leaf_items(trans, eb); |
5665 | if (ret) { |
5666 | btrfs_err_rl(fs_info, |
5667 | "error %d accounting leaf items, quota is out of sync, rescan required" , |
5668 | ret); |
5669 | } |
5670 | } |
5671 | } |
5672 | /* Make block locked assertion in btrfs_clear_buffer_dirty happy. */ |
5673 | if (!path->locks[level]) { |
5674 | btrfs_tree_lock(eb); |
5675 | path->locks[level] = BTRFS_WRITE_LOCK; |
5676 | } |
5677 | btrfs_clear_buffer_dirty(trans, buf: eb); |
5678 | } |
5679 | |
5680 | if (eb == root->node) { |
5681 | if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5682 | parent = eb->start; |
5683 | else if (root->root_key.objectid != btrfs_header_owner(eb)) |
5684 | goto owner_mismatch; |
5685 | } else { |
5686 | if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF) |
5687 | parent = path->nodes[level + 1]->start; |
5688 | else if (root->root_key.objectid != |
5689 | btrfs_header_owner(eb: path->nodes[level + 1])) |
5690 | goto owner_mismatch; |
5691 | } |
5692 | |
5693 | btrfs_free_tree_block(trans, root_id: btrfs_root_id(root), buf: eb, parent, |
5694 | last_ref: wc->refs[level] == 1); |
5695 | out: |
5696 | wc->refs[level] = 0; |
5697 | wc->flags[level] = 0; |
5698 | return 0; |
5699 | |
5700 | owner_mismatch: |
5701 | btrfs_err_rl(fs_info, "unexpected tree owner, have %llu expect %llu" , |
5702 | btrfs_header_owner(eb), root->root_key.objectid); |
5703 | return -EUCLEAN; |
5704 | } |
5705 | |
5706 | static noinline int walk_down_tree(struct btrfs_trans_handle *trans, |
5707 | struct btrfs_root *root, |
5708 | struct btrfs_path *path, |
5709 | struct walk_control *wc) |
5710 | { |
5711 | int level = wc->level; |
5712 | int lookup_info = 1; |
5713 | int ret = 0; |
5714 | |
5715 | while (level >= 0) { |
5716 | ret = walk_down_proc(trans, root, path, wc, lookup_info); |
5717 | if (ret) |
5718 | break; |
5719 | |
5720 | if (level == 0) |
5721 | break; |
5722 | |
5723 | if (path->slots[level] >= |
5724 | btrfs_header_nritems(eb: path->nodes[level])) |
5725 | break; |
5726 | |
5727 | ret = do_walk_down(trans, root, path, wc, lookup_info: &lookup_info); |
5728 | if (ret > 0) { |
5729 | path->slots[level]++; |
5730 | continue; |
5731 | } else if (ret < 0) |
5732 | break; |
5733 | level = wc->level; |
5734 | } |
5735 | return (ret == 1) ? 0 : ret; |
5736 | } |
5737 | |
5738 | static noinline int walk_up_tree(struct btrfs_trans_handle *trans, |
5739 | struct btrfs_root *root, |
5740 | struct btrfs_path *path, |
5741 | struct walk_control *wc, int max_level) |
5742 | { |
5743 | int level = wc->level; |
5744 | int ret; |
5745 | |
5746 | path->slots[level] = btrfs_header_nritems(eb: path->nodes[level]); |
5747 | while (level < max_level && path->nodes[level]) { |
5748 | wc->level = level; |
5749 | if (path->slots[level] + 1 < |
5750 | btrfs_header_nritems(eb: path->nodes[level])) { |
5751 | path->slots[level]++; |
5752 | return 0; |
5753 | } else { |
5754 | ret = walk_up_proc(trans, root, path, wc); |
5755 | if (ret > 0) |
5756 | return 0; |
5757 | if (ret < 0) |
5758 | return ret; |
5759 | |
5760 | if (path->locks[level]) { |
5761 | btrfs_tree_unlock_rw(eb: path->nodes[level], |
5762 | rw: path->locks[level]); |
5763 | path->locks[level] = 0; |
5764 | } |
5765 | free_extent_buffer(eb: path->nodes[level]); |
5766 | path->nodes[level] = NULL; |
5767 | level++; |
5768 | } |
5769 | } |
5770 | return 1; |
5771 | } |
5772 | |
5773 | /* |
5774 | * drop a subvolume tree. |
5775 | * |
5776 | * this function traverses the tree freeing any blocks that only |
5777 | * referenced by the tree. |
5778 | * |
5779 | * when a shared tree block is found. this function decreases its |
5780 | * reference count by one. if update_ref is true, this function |
5781 | * also make sure backrefs for the shared block and all lower level |
5782 | * blocks are properly updated. |
5783 | * |
5784 | * If called with for_reloc == 0, may exit early with -EAGAIN |
5785 | */ |
5786 | int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref, int for_reloc) |
5787 | { |
5788 | const bool is_reloc_root = (root->root_key.objectid == |
5789 | BTRFS_TREE_RELOC_OBJECTID); |
5790 | struct btrfs_fs_info *fs_info = root->fs_info; |
5791 | struct btrfs_path *path; |
5792 | struct btrfs_trans_handle *trans; |
5793 | struct btrfs_root *tree_root = fs_info->tree_root; |
5794 | struct btrfs_root_item *root_item = &root->root_item; |
5795 | struct walk_control *wc; |
5796 | struct btrfs_key key; |
5797 | int err = 0; |
5798 | int ret; |
5799 | int level; |
5800 | bool root_dropped = false; |
5801 | bool unfinished_drop = false; |
5802 | |
5803 | btrfs_debug(fs_info, "Drop subvolume %llu" , root->root_key.objectid); |
5804 | |
5805 | path = btrfs_alloc_path(); |
5806 | if (!path) { |
5807 | err = -ENOMEM; |
5808 | goto out; |
5809 | } |
5810 | |
5811 | wc = kzalloc(size: sizeof(*wc), GFP_NOFS); |
5812 | if (!wc) { |
5813 | btrfs_free_path(p: path); |
5814 | err = -ENOMEM; |
5815 | goto out; |
5816 | } |
5817 | |
5818 | /* |
5819 | * Use join to avoid potential EINTR from transaction start. See |
5820 | * wait_reserve_ticket and the whole reservation callchain. |
5821 | */ |
5822 | if (for_reloc) |
5823 | trans = btrfs_join_transaction(root: tree_root); |
5824 | else |
5825 | trans = btrfs_start_transaction(root: tree_root, num_items: 0); |
5826 | if (IS_ERR(ptr: trans)) { |
5827 | err = PTR_ERR(ptr: trans); |
5828 | goto out_free; |
5829 | } |
5830 | |
5831 | err = btrfs_run_delayed_items(trans); |
5832 | if (err) |
5833 | goto out_end_trans; |
5834 | |
5835 | /* |
5836 | * This will help us catch people modifying the fs tree while we're |
5837 | * dropping it. It is unsafe to mess with the fs tree while it's being |
5838 | * dropped as we unlock the root node and parent nodes as we walk down |
5839 | * the tree, assuming nothing will change. If something does change |
5840 | * then we'll have stale information and drop references to blocks we've |
5841 | * already dropped. |
5842 | */ |
5843 | set_bit(nr: BTRFS_ROOT_DELETING, addr: &root->state); |
5844 | unfinished_drop = test_bit(BTRFS_ROOT_UNFINISHED_DROP, &root->state); |
5845 | |
5846 | if (btrfs_disk_key_objectid(s: &root_item->drop_progress) == 0) { |
5847 | level = btrfs_header_level(eb: root->node); |
5848 | path->nodes[level] = btrfs_lock_root_node(root); |
5849 | path->slots[level] = 0; |
5850 | path->locks[level] = BTRFS_WRITE_LOCK; |
5851 | memset(&wc->update_progress, 0, |
5852 | sizeof(wc->update_progress)); |
5853 | } else { |
5854 | btrfs_disk_key_to_cpu(cpu_key: &key, disk_key: &root_item->drop_progress); |
5855 | memcpy(&wc->update_progress, &key, |
5856 | sizeof(wc->update_progress)); |
5857 | |
5858 | level = btrfs_root_drop_level(s: root_item); |
5859 | BUG_ON(level == 0); |
5860 | path->lowest_level = level; |
5861 | ret = btrfs_search_slot(NULL, root, key: &key, p: path, ins_len: 0, cow: 0); |
5862 | path->lowest_level = 0; |
5863 | if (ret < 0) { |
5864 | err = ret; |
5865 | goto out_end_trans; |
5866 | } |
5867 | WARN_ON(ret > 0); |
5868 | |
5869 | /* |
5870 | * unlock our path, this is safe because only this |
5871 | * function is allowed to delete this snapshot |
5872 | */ |
5873 | btrfs_unlock_up_safe(path, level: 0); |
5874 | |
5875 | level = btrfs_header_level(eb: root->node); |
5876 | while (1) { |
5877 | btrfs_tree_lock(eb: path->nodes[level]); |
5878 | path->locks[level] = BTRFS_WRITE_LOCK; |
5879 | |
5880 | ret = btrfs_lookup_extent_info(trans, fs_info, |
5881 | bytenr: path->nodes[level]->start, |
5882 | offset: level, metadata: 1, refs: &wc->refs[level], |
5883 | flags: &wc->flags[level]); |
5884 | if (ret < 0) { |
5885 | err = ret; |
5886 | goto out_end_trans; |
5887 | } |
5888 | BUG_ON(wc->refs[level] == 0); |
5889 | |
5890 | if (level == btrfs_root_drop_level(s: root_item)) |
5891 | break; |
5892 | |
5893 | btrfs_tree_unlock(eb: path->nodes[level]); |
5894 | path->locks[level] = 0; |
5895 | WARN_ON(wc->refs[level] != 1); |
5896 | level--; |
5897 | } |
5898 | } |
5899 | |
5900 | wc->restarted = test_bit(BTRFS_ROOT_DEAD_TREE, &root->state); |
5901 | wc->level = level; |
5902 | wc->shared_level = -1; |
5903 | wc->stage = DROP_REFERENCE; |
5904 | wc->update_ref = update_ref; |
5905 | wc->keep_locks = 0; |
5906 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(info: fs_info); |
5907 | |
5908 | while (1) { |
5909 | |
5910 | ret = walk_down_tree(trans, root, path, wc); |
5911 | if (ret < 0) { |
5912 | btrfs_abort_transaction(trans, ret); |
5913 | err = ret; |
5914 | break; |
5915 | } |
5916 | |
5917 | ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL); |
5918 | if (ret < 0) { |
5919 | btrfs_abort_transaction(trans, ret); |
5920 | err = ret; |
5921 | break; |
5922 | } |
5923 | |
5924 | if (ret > 0) { |
5925 | BUG_ON(wc->stage != DROP_REFERENCE); |
5926 | break; |
5927 | } |
5928 | |
5929 | if (wc->stage == DROP_REFERENCE) { |
5930 | wc->drop_level = wc->level; |
5931 | btrfs_node_key_to_cpu(eb: path->nodes[wc->drop_level], |
5932 | cpu_key: &wc->drop_progress, |
5933 | nr: path->slots[wc->drop_level]); |
5934 | } |
5935 | btrfs_cpu_key_to_disk(disk_key: &root_item->drop_progress, |
5936 | cpu_key: &wc->drop_progress); |
5937 | btrfs_set_root_drop_level(s: root_item, val: wc->drop_level); |
5938 | |
5939 | BUG_ON(wc->level == 0); |
5940 | if (btrfs_should_end_transaction(trans) || |
5941 | (!for_reloc && btrfs_need_cleaner_sleep(fs_info))) { |
5942 | ret = btrfs_update_root(trans, root: tree_root, |
5943 | key: &root->root_key, |
5944 | item: root_item); |
5945 | if (ret) { |
5946 | btrfs_abort_transaction(trans, ret); |
5947 | err = ret; |
5948 | goto out_end_trans; |
5949 | } |
5950 | |
5951 | if (!is_reloc_root) |
5952 | btrfs_set_last_root_drop_gen(fs_info, gen: trans->transid); |
5953 | |
5954 | btrfs_end_transaction_throttle(trans); |
5955 | if (!for_reloc && btrfs_need_cleaner_sleep(fs_info)) { |
5956 | btrfs_debug(fs_info, |
5957 | "drop snapshot early exit" ); |
5958 | err = -EAGAIN; |
5959 | goto out_free; |
5960 | } |
5961 | |
5962 | /* |
5963 | * Use join to avoid potential EINTR from transaction |
5964 | * start. See wait_reserve_ticket and the whole |
5965 | * reservation callchain. |
5966 | */ |
5967 | if (for_reloc) |
5968 | trans = btrfs_join_transaction(root: tree_root); |
5969 | else |
5970 | trans = btrfs_start_transaction(root: tree_root, num_items: 0); |
5971 | if (IS_ERR(ptr: trans)) { |
5972 | err = PTR_ERR(ptr: trans); |
5973 | goto out_free; |
5974 | } |
5975 | } |
5976 | } |
5977 | btrfs_release_path(p: path); |
5978 | if (err) |
5979 | goto out_end_trans; |
5980 | |
5981 | ret = btrfs_del_root(trans, key: &root->root_key); |
5982 | if (ret) { |
5983 | btrfs_abort_transaction(trans, ret); |
5984 | err = ret; |
5985 | goto out_end_trans; |
5986 | } |
5987 | |
5988 | if (!is_reloc_root) { |
5989 | ret = btrfs_find_root(root: tree_root, search_key: &root->root_key, path, |
5990 | NULL, NULL); |
5991 | if (ret < 0) { |
5992 | btrfs_abort_transaction(trans, ret); |
5993 | err = ret; |
5994 | goto out_end_trans; |
5995 | } else if (ret > 0) { |
5996 | /* if we fail to delete the orphan item this time |
5997 | * around, it'll get picked up the next time. |
5998 | * |
5999 | * The most common failure here is just -ENOENT. |
6000 | */ |
6001 | btrfs_del_orphan_item(trans, root: tree_root, |
6002 | offset: root->root_key.objectid); |
6003 | } |
6004 | } |
6005 | |
6006 | /* |
6007 | * This subvolume is going to be completely dropped, and won't be |
6008 | * recorded as dirty roots, thus pertrans meta rsv will not be freed at |
6009 | * commit transaction time. So free it here manually. |
6010 | */ |
6011 | btrfs_qgroup_convert_reserved_meta(root, INT_MAX); |
6012 | btrfs_qgroup_free_meta_all_pertrans(root); |
6013 | |
6014 | if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) |
6015 | btrfs_add_dropped_root(trans, root); |
6016 | else |
6017 | btrfs_put_root(root); |
6018 | root_dropped = true; |
6019 | out_end_trans: |
6020 | if (!is_reloc_root) |
6021 | btrfs_set_last_root_drop_gen(fs_info, gen: trans->transid); |
6022 | |
6023 | btrfs_end_transaction_throttle(trans); |
6024 | out_free: |
6025 | kfree(objp: wc); |
6026 | btrfs_free_path(p: path); |
6027 | out: |
6028 | /* |
6029 | * We were an unfinished drop root, check to see if there are any |
6030 | * pending, and if not clear and wake up any waiters. |
6031 | */ |
6032 | if (!err && unfinished_drop) |
6033 | btrfs_maybe_wake_unfinished_drop(fs_info); |
6034 | |
6035 | /* |
6036 | * So if we need to stop dropping the snapshot for whatever reason we |
6037 | * need to make sure to add it back to the dead root list so that we |
6038 | * keep trying to do the work later. This also cleans up roots if we |
6039 | * don't have it in the radix (like when we recover after a power fail |
6040 | * or unmount) so we don't leak memory. |
6041 | */ |
6042 | if (!for_reloc && !root_dropped) |
6043 | btrfs_add_dead_root(root); |
6044 | return err; |
6045 | } |
6046 | |
6047 | /* |
6048 | * drop subtree rooted at tree block 'node'. |
6049 | * |
6050 | * NOTE: this function will unlock and release tree block 'node' |
6051 | * only used by relocation code |
6052 | */ |
6053 | int btrfs_drop_subtree(struct btrfs_trans_handle *trans, |
6054 | struct btrfs_root *root, |
6055 | struct extent_buffer *node, |
6056 | struct extent_buffer *parent) |
6057 | { |
6058 | struct btrfs_fs_info *fs_info = root->fs_info; |
6059 | struct btrfs_path *path; |
6060 | struct walk_control *wc; |
6061 | int level; |
6062 | int parent_level; |
6063 | int ret = 0; |
6064 | int wret; |
6065 | |
6066 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); |
6067 | |
6068 | path = btrfs_alloc_path(); |
6069 | if (!path) |
6070 | return -ENOMEM; |
6071 | |
6072 | wc = kzalloc(size: sizeof(*wc), GFP_NOFS); |
6073 | if (!wc) { |
6074 | btrfs_free_path(p: path); |
6075 | return -ENOMEM; |
6076 | } |
6077 | |
6078 | btrfs_assert_tree_write_locked(eb: parent); |
6079 | parent_level = btrfs_header_level(eb: parent); |
6080 | atomic_inc(v: &parent->refs); |
6081 | path->nodes[parent_level] = parent; |
6082 | path->slots[parent_level] = btrfs_header_nritems(eb: parent); |
6083 | |
6084 | btrfs_assert_tree_write_locked(eb: node); |
6085 | level = btrfs_header_level(eb: node); |
6086 | path->nodes[level] = node; |
6087 | path->slots[level] = 0; |
6088 | path->locks[level] = BTRFS_WRITE_LOCK; |
6089 | |
6090 | wc->refs[parent_level] = 1; |
6091 | wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF; |
6092 | wc->level = level; |
6093 | wc->shared_level = -1; |
6094 | wc->stage = DROP_REFERENCE; |
6095 | wc->update_ref = 0; |
6096 | wc->keep_locks = 1; |
6097 | wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(info: fs_info); |
6098 | |
6099 | while (1) { |
6100 | wret = walk_down_tree(trans, root, path, wc); |
6101 | if (wret < 0) { |
6102 | ret = wret; |
6103 | break; |
6104 | } |
6105 | |
6106 | wret = walk_up_tree(trans, root, path, wc, max_level: parent_level); |
6107 | if (wret < 0) |
6108 | ret = wret; |
6109 | if (wret != 0) |
6110 | break; |
6111 | } |
6112 | |
6113 | kfree(objp: wc); |
6114 | btrfs_free_path(p: path); |
6115 | return ret; |
6116 | } |
6117 | |
6118 | int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info, |
6119 | u64 start, u64 end) |
6120 | { |
6121 | return unpin_extent_range(fs_info, start, end, return_free_space: false); |
6122 | } |
6123 | |
6124 | /* |
6125 | * It used to be that old block groups would be left around forever. |
6126 | * Iterating over them would be enough to trim unused space. Since we |
6127 | * now automatically remove them, we also need to iterate over unallocated |
6128 | * space. |
6129 | * |
6130 | * We don't want a transaction for this since the discard may take a |
6131 | * substantial amount of time. We don't require that a transaction be |
6132 | * running, but we do need to take a running transaction into account |
6133 | * to ensure that we're not discarding chunks that were released or |
6134 | * allocated in the current transaction. |
6135 | * |
6136 | * Holding the chunks lock will prevent other threads from allocating |
6137 | * or releasing chunks, but it won't prevent a running transaction |
6138 | * from committing and releasing the memory that the pending chunks |
6139 | * list head uses. For that, we need to take a reference to the |
6140 | * transaction and hold the commit root sem. We only need to hold |
6141 | * it while performing the free space search since we have already |
6142 | * held back allocations. |
6143 | */ |
6144 | static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) |
6145 | { |
6146 | u64 start = BTRFS_DEVICE_RANGE_RESERVED, len = 0, end = 0; |
6147 | int ret; |
6148 | |
6149 | *trimmed = 0; |
6150 | |
6151 | /* Discard not supported = nothing to do. */ |
6152 | if (!bdev_max_discard_sectors(bdev: device->bdev)) |
6153 | return 0; |
6154 | |
6155 | /* Not writable = nothing to do. */ |
6156 | if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) |
6157 | return 0; |
6158 | |
6159 | /* No free space = nothing to do. */ |
6160 | if (device->total_bytes <= device->bytes_used) |
6161 | return 0; |
6162 | |
6163 | ret = 0; |
6164 | |
6165 | while (1) { |
6166 | struct btrfs_fs_info *fs_info = device->fs_info; |
6167 | u64 bytes; |
6168 | |
6169 | ret = mutex_lock_interruptible(&fs_info->chunk_mutex); |
6170 | if (ret) |
6171 | break; |
6172 | |
6173 | find_first_clear_extent_bit(tree: &device->alloc_state, start, |
6174 | start_ret: &start, end_ret: &end, |
6175 | CHUNK_TRIMMED | CHUNK_ALLOCATED); |
6176 | |
6177 | /* Check if there are any CHUNK_* bits left */ |
6178 | if (start > device->total_bytes) { |
6179 | WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); |
6180 | btrfs_warn_in_rcu(fs_info, |
6181 | "ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu" , |
6182 | start, end - start + 1, |
6183 | btrfs_dev_name(device), |
6184 | device->total_bytes); |
6185 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6186 | ret = 0; |
6187 | break; |
6188 | } |
6189 | |
6190 | /* Ensure we skip the reserved space on each device. */ |
6191 | start = max_t(u64, start, BTRFS_DEVICE_RANGE_RESERVED); |
6192 | |
6193 | /* |
6194 | * If find_first_clear_extent_bit find a range that spans the |
6195 | * end of the device it will set end to -1, in this case it's up |
6196 | * to the caller to trim the value to the size of the device. |
6197 | */ |
6198 | end = min(end, device->total_bytes - 1); |
6199 | |
6200 | len = end - start + 1; |
6201 | |
6202 | /* We didn't find any extents */ |
6203 | if (!len) { |
6204 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6205 | ret = 0; |
6206 | break; |
6207 | } |
6208 | |
6209 | ret = btrfs_issue_discard(bdev: device->bdev, start, len, |
6210 | discarded_bytes: &bytes); |
6211 | if (!ret) |
6212 | set_extent_bit(tree: &device->alloc_state, start, |
6213 | end: start + bytes - 1, CHUNK_TRIMMED, NULL); |
6214 | mutex_unlock(lock: &fs_info->chunk_mutex); |
6215 | |
6216 | if (ret) |
6217 | break; |
6218 | |
6219 | start += len; |
6220 | *trimmed += bytes; |
6221 | |
6222 | if (fatal_signal_pending(current)) { |
6223 | ret = -ERESTARTSYS; |
6224 | break; |
6225 | } |
6226 | |
6227 | cond_resched(); |
6228 | } |
6229 | |
6230 | return ret; |
6231 | } |
6232 | |
6233 | /* |
6234 | * Trim the whole filesystem by: |
6235 | * 1) trimming the free space in each block group |
6236 | * 2) trimming the unallocated space on each device |
6237 | * |
6238 | * This will also continue trimming even if a block group or device encounters |
6239 | * an error. The return value will be the last error, or 0 if nothing bad |
6240 | * happens. |
6241 | */ |
6242 | int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) |
6243 | { |
6244 | struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; |
6245 | struct btrfs_block_group *cache = NULL; |
6246 | struct btrfs_device *device; |
6247 | u64 group_trimmed; |
6248 | u64 range_end = U64_MAX; |
6249 | u64 start; |
6250 | u64 end; |
6251 | u64 trimmed = 0; |
6252 | u64 bg_failed = 0; |
6253 | u64 dev_failed = 0; |
6254 | int bg_ret = 0; |
6255 | int dev_ret = 0; |
6256 | int ret = 0; |
6257 | |
6258 | if (range->start == U64_MAX) |
6259 | return -EINVAL; |
6260 | |
6261 | /* |
6262 | * Check range overflow if range->len is set. |
6263 | * The default range->len is U64_MAX. |
6264 | */ |
6265 | if (range->len != U64_MAX && |
6266 | check_add_overflow(range->start, range->len, &range_end)) |
6267 | return -EINVAL; |
6268 | |
6269 | cache = btrfs_lookup_first_block_group(info: fs_info, bytenr: range->start); |
6270 | for (; cache; cache = btrfs_next_block_group(cache)) { |
6271 | if (cache->start >= range_end) { |
6272 | btrfs_put_block_group(cache); |
6273 | break; |
6274 | } |
6275 | |
6276 | start = max(range->start, cache->start); |
6277 | end = min(range_end, cache->start + cache->length); |
6278 | |
6279 | if (end - start >= range->minlen) { |
6280 | if (!btrfs_block_group_done(cache)) { |
6281 | ret = btrfs_cache_block_group(cache, wait: true); |
6282 | if (ret) { |
6283 | bg_failed++; |
6284 | bg_ret = ret; |
6285 | continue; |
6286 | } |
6287 | } |
6288 | ret = btrfs_trim_block_group(block_group: cache, |
6289 | trimmed: &group_trimmed, |
6290 | start, |
6291 | end, |
6292 | minlen: range->minlen); |
6293 | |
6294 | trimmed += group_trimmed; |
6295 | if (ret) { |
6296 | bg_failed++; |
6297 | bg_ret = ret; |
6298 | continue; |
6299 | } |
6300 | } |
6301 | } |
6302 | |
6303 | if (bg_failed) |
6304 | btrfs_warn(fs_info, |
6305 | "failed to trim %llu block group(s), last error %d" , |
6306 | bg_failed, bg_ret); |
6307 | |
6308 | mutex_lock(&fs_devices->device_list_mutex); |
6309 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
6310 | if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) |
6311 | continue; |
6312 | |
6313 | ret = btrfs_trim_free_extents(device, trimmed: &group_trimmed); |
6314 | if (ret) { |
6315 | dev_failed++; |
6316 | dev_ret = ret; |
6317 | break; |
6318 | } |
6319 | |
6320 | trimmed += group_trimmed; |
6321 | } |
6322 | mutex_unlock(lock: &fs_devices->device_list_mutex); |
6323 | |
6324 | if (dev_failed) |
6325 | btrfs_warn(fs_info, |
6326 | "failed to trim %llu device(s), last error %d" , |
6327 | dev_failed, dev_ret); |
6328 | range->len = trimmed; |
6329 | if (bg_ret) |
6330 | return bg_ret; |
6331 | return dev_ret; |
6332 | } |
6333 | |