1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2007 Oracle. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/slab.h> |
7 | #include <linux/blkdev.h> |
8 | #include <linux/writeback.h> |
9 | #include <linux/sched/mm.h> |
10 | #include "messages.h" |
11 | #include "misc.h" |
12 | #include "ctree.h" |
13 | #include "transaction.h" |
14 | #include "btrfs_inode.h" |
15 | #include "extent_io.h" |
16 | #include "disk-io.h" |
17 | #include "compression.h" |
18 | #include "delalloc-space.h" |
19 | #include "qgroup.h" |
20 | #include "subpage.h" |
21 | #include "file.h" |
22 | #include "super.h" |
23 | |
24 | static struct kmem_cache *btrfs_ordered_extent_cache; |
25 | |
26 | static u64 entry_end(struct btrfs_ordered_extent *entry) |
27 | { |
28 | if (entry->file_offset + entry->num_bytes < entry->file_offset) |
29 | return (u64)-1; |
30 | return entry->file_offset + entry->num_bytes; |
31 | } |
32 | |
33 | /* returns NULL if the insertion worked, or it returns the node it did find |
34 | * in the tree |
35 | */ |
36 | static struct rb_node *tree_insert(struct rb_root *root, u64 file_offset, |
37 | struct rb_node *node) |
38 | { |
39 | struct rb_node **p = &root->rb_node; |
40 | struct rb_node *parent = NULL; |
41 | struct btrfs_ordered_extent *entry; |
42 | |
43 | while (*p) { |
44 | parent = *p; |
45 | entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); |
46 | |
47 | if (file_offset < entry->file_offset) |
48 | p = &(*p)->rb_left; |
49 | else if (file_offset >= entry_end(entry)) |
50 | p = &(*p)->rb_right; |
51 | else |
52 | return parent; |
53 | } |
54 | |
55 | rb_link_node(node, parent, rb_link: p); |
56 | rb_insert_color(node, root); |
57 | return NULL; |
58 | } |
59 | |
60 | /* |
61 | * look for a given offset in the tree, and if it can't be found return the |
62 | * first lesser offset |
63 | */ |
64 | static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset, |
65 | struct rb_node **prev_ret) |
66 | { |
67 | struct rb_node *n = root->rb_node; |
68 | struct rb_node *prev = NULL; |
69 | struct rb_node *test; |
70 | struct btrfs_ordered_extent *entry; |
71 | struct btrfs_ordered_extent *prev_entry = NULL; |
72 | |
73 | while (n) { |
74 | entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
75 | prev = n; |
76 | prev_entry = entry; |
77 | |
78 | if (file_offset < entry->file_offset) |
79 | n = n->rb_left; |
80 | else if (file_offset >= entry_end(entry)) |
81 | n = n->rb_right; |
82 | else |
83 | return n; |
84 | } |
85 | if (!prev_ret) |
86 | return NULL; |
87 | |
88 | while (prev && file_offset >= entry_end(entry: prev_entry)) { |
89 | test = rb_next(prev); |
90 | if (!test) |
91 | break; |
92 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
93 | rb_node); |
94 | if (file_offset < entry_end(entry: prev_entry)) |
95 | break; |
96 | |
97 | prev = test; |
98 | } |
99 | if (prev) |
100 | prev_entry = rb_entry(prev, struct btrfs_ordered_extent, |
101 | rb_node); |
102 | while (prev && file_offset < entry_end(entry: prev_entry)) { |
103 | test = rb_prev(prev); |
104 | if (!test) |
105 | break; |
106 | prev_entry = rb_entry(test, struct btrfs_ordered_extent, |
107 | rb_node); |
108 | prev = test; |
109 | } |
110 | *prev_ret = prev; |
111 | return NULL; |
112 | } |
113 | |
114 | static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset, |
115 | u64 len) |
116 | { |
117 | if (file_offset + len <= entry->file_offset || |
118 | entry->file_offset + entry->num_bytes <= file_offset) |
119 | return 0; |
120 | return 1; |
121 | } |
122 | |
123 | /* |
124 | * look find the first ordered struct that has this offset, otherwise |
125 | * the first one less than this offset |
126 | */ |
127 | static inline struct rb_node *ordered_tree_search(struct btrfs_inode *inode, |
128 | u64 file_offset) |
129 | { |
130 | struct rb_node *prev = NULL; |
131 | struct rb_node *ret; |
132 | struct btrfs_ordered_extent *entry; |
133 | |
134 | if (inode->ordered_tree_last) { |
135 | entry = rb_entry(inode->ordered_tree_last, struct btrfs_ordered_extent, |
136 | rb_node); |
137 | if (in_range(file_offset, entry->file_offset, entry->num_bytes)) |
138 | return inode->ordered_tree_last; |
139 | } |
140 | ret = __tree_search(root: &inode->ordered_tree, file_offset, prev_ret: &prev); |
141 | if (!ret) |
142 | ret = prev; |
143 | if (ret) |
144 | inode->ordered_tree_last = ret; |
145 | return ret; |
146 | } |
147 | |
148 | static struct btrfs_ordered_extent *alloc_ordered_extent( |
149 | struct btrfs_inode *inode, u64 file_offset, u64 num_bytes, |
150 | u64 ram_bytes, u64 disk_bytenr, u64 disk_num_bytes, |
151 | u64 offset, unsigned long flags, int compress_type) |
152 | { |
153 | struct btrfs_ordered_extent *entry; |
154 | int ret; |
155 | |
156 | if (flags & |
157 | ((1 << BTRFS_ORDERED_NOCOW) | (1 << BTRFS_ORDERED_PREALLOC))) { |
158 | /* For nocow write, we can release the qgroup rsv right now */ |
159 | ret = btrfs_qgroup_free_data(inode, NULL, start: file_offset, len: num_bytes); |
160 | if (ret < 0) |
161 | return ERR_PTR(error: ret); |
162 | } else { |
163 | /* |
164 | * The ordered extent has reserved qgroup space, release now |
165 | * and pass the reserved number for qgroup_record to free. |
166 | */ |
167 | ret = btrfs_qgroup_release_data(inode, start: file_offset, len: num_bytes); |
168 | if (ret < 0) |
169 | return ERR_PTR(error: ret); |
170 | } |
171 | entry = kmem_cache_zalloc(k: btrfs_ordered_extent_cache, GFP_NOFS); |
172 | if (!entry) |
173 | return ERR_PTR(error: -ENOMEM); |
174 | |
175 | entry->file_offset = file_offset; |
176 | entry->num_bytes = num_bytes; |
177 | entry->ram_bytes = ram_bytes; |
178 | entry->disk_bytenr = disk_bytenr; |
179 | entry->disk_num_bytes = disk_num_bytes; |
180 | entry->offset = offset; |
181 | entry->bytes_left = num_bytes; |
182 | entry->inode = igrab(&inode->vfs_inode); |
183 | entry->compress_type = compress_type; |
184 | entry->truncated_len = (u64)-1; |
185 | entry->qgroup_rsv = ret; |
186 | entry->flags = flags; |
187 | refcount_set(r: &entry->refs, n: 1); |
188 | init_waitqueue_head(&entry->wait); |
189 | INIT_LIST_HEAD(list: &entry->list); |
190 | INIT_LIST_HEAD(list: &entry->log_list); |
191 | INIT_LIST_HEAD(list: &entry->root_extent_list); |
192 | INIT_LIST_HEAD(list: &entry->work_list); |
193 | INIT_LIST_HEAD(list: &entry->bioc_list); |
194 | init_completion(x: &entry->completion); |
195 | |
196 | /* |
197 | * We don't need the count_max_extents here, we can assume that all of |
198 | * that work has been done at higher layers, so this is truly the |
199 | * smallest the extent is going to get. |
200 | */ |
201 | spin_lock(lock: &inode->lock); |
202 | btrfs_mod_outstanding_extents(inode, mod: 1); |
203 | spin_unlock(lock: &inode->lock); |
204 | |
205 | return entry; |
206 | } |
207 | |
208 | static void insert_ordered_extent(struct btrfs_ordered_extent *entry) |
209 | { |
210 | struct btrfs_inode *inode = BTRFS_I(inode: entry->inode); |
211 | struct btrfs_root *root = inode->root; |
212 | struct btrfs_fs_info *fs_info = root->fs_info; |
213 | struct rb_node *node; |
214 | |
215 | trace_btrfs_ordered_extent_add(inode, ordered: entry); |
216 | |
217 | percpu_counter_add_batch(fbc: &fs_info->ordered_bytes, amount: entry->num_bytes, |
218 | batch: fs_info->delalloc_batch); |
219 | |
220 | /* One ref for the tree. */ |
221 | refcount_inc(r: &entry->refs); |
222 | |
223 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
224 | node = tree_insert(root: &inode->ordered_tree, file_offset: entry->file_offset, |
225 | node: &entry->rb_node); |
226 | if (node) |
227 | btrfs_panic(fs_info, -EEXIST, |
228 | "inconsistency in ordered tree at offset %llu" , |
229 | entry->file_offset); |
230 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
231 | |
232 | spin_lock(lock: &root->ordered_extent_lock); |
233 | list_add_tail(new: &entry->root_extent_list, |
234 | head: &root->ordered_extents); |
235 | root->nr_ordered_extents++; |
236 | if (root->nr_ordered_extents == 1) { |
237 | spin_lock(lock: &fs_info->ordered_root_lock); |
238 | BUG_ON(!list_empty(&root->ordered_root)); |
239 | list_add_tail(new: &root->ordered_root, head: &fs_info->ordered_roots); |
240 | spin_unlock(lock: &fs_info->ordered_root_lock); |
241 | } |
242 | spin_unlock(lock: &root->ordered_extent_lock); |
243 | } |
244 | |
245 | /* |
246 | * Add an ordered extent to the per-inode tree. |
247 | * |
248 | * @inode: Inode that this extent is for. |
249 | * @file_offset: Logical offset in file where the extent starts. |
250 | * @num_bytes: Logical length of extent in file. |
251 | * @ram_bytes: Full length of unencoded data. |
252 | * @disk_bytenr: Offset of extent on disk. |
253 | * @disk_num_bytes: Size of extent on disk. |
254 | * @offset: Offset into unencoded data where file data starts. |
255 | * @flags: Flags specifying type of extent (1 << BTRFS_ORDERED_*). |
256 | * @compress_type: Compression algorithm used for data. |
257 | * |
258 | * Most of these parameters correspond to &struct btrfs_file_extent_item. The |
259 | * tree is given a single reference on the ordered extent that was inserted, and |
260 | * the returned pointer is given a second reference. |
261 | * |
262 | * Return: the new ordered extent or error pointer. |
263 | */ |
264 | struct btrfs_ordered_extent *btrfs_alloc_ordered_extent( |
265 | struct btrfs_inode *inode, u64 file_offset, |
266 | u64 num_bytes, u64 ram_bytes, u64 disk_bytenr, |
267 | u64 disk_num_bytes, u64 offset, unsigned long flags, |
268 | int compress_type) |
269 | { |
270 | struct btrfs_ordered_extent *entry; |
271 | |
272 | ASSERT((flags & ~BTRFS_ORDERED_TYPE_FLAGS) == 0); |
273 | |
274 | entry = alloc_ordered_extent(inode, file_offset, num_bytes, ram_bytes, |
275 | disk_bytenr, disk_num_bytes, offset, flags, |
276 | compress_type); |
277 | if (!IS_ERR(ptr: entry)) |
278 | insert_ordered_extent(entry); |
279 | return entry; |
280 | } |
281 | |
282 | /* |
283 | * Add a struct btrfs_ordered_sum into the list of checksums to be inserted |
284 | * when an ordered extent is finished. If the list covers more than one |
285 | * ordered extent, it is split across multiples. |
286 | */ |
287 | void btrfs_add_ordered_sum(struct btrfs_ordered_extent *entry, |
288 | struct btrfs_ordered_sum *sum) |
289 | { |
290 | struct btrfs_inode *inode = BTRFS_I(inode: entry->inode); |
291 | |
292 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
293 | list_add_tail(new: &sum->list, head: &entry->list); |
294 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
295 | } |
296 | |
297 | static void finish_ordered_fn(struct btrfs_work *work) |
298 | { |
299 | struct btrfs_ordered_extent *ordered_extent; |
300 | |
301 | ordered_extent = container_of(work, struct btrfs_ordered_extent, work); |
302 | btrfs_finish_ordered_io(ordered_extent); |
303 | } |
304 | |
305 | static bool can_finish_ordered_extent(struct btrfs_ordered_extent *ordered, |
306 | struct page *page, u64 file_offset, |
307 | u64 len, bool uptodate) |
308 | { |
309 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
310 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
311 | |
312 | lockdep_assert_held(&inode->ordered_tree_lock); |
313 | |
314 | if (page) { |
315 | ASSERT(page->mapping); |
316 | ASSERT(page_offset(page) <= file_offset); |
317 | ASSERT(file_offset + len <= page_offset(page) + PAGE_SIZE); |
318 | |
319 | /* |
320 | * Ordered (Private2) bit indicates whether we still have |
321 | * pending io unfinished for the ordered extent. |
322 | * |
323 | * If there's no such bit, we need to skip to next range. |
324 | */ |
325 | if (!btrfs_page_test_ordered(fs_info, page, start: file_offset, len)) |
326 | return false; |
327 | btrfs_page_clear_ordered(fs_info, page, start: file_offset, len); |
328 | } |
329 | |
330 | /* Now we're fine to update the accounting. */ |
331 | if (WARN_ON_ONCE(len > ordered->bytes_left)) { |
332 | btrfs_crit(fs_info, |
333 | "bad ordered extent accounting, root=%llu ino=%llu OE offset=%llu OE len=%llu to_dec=%llu left=%llu" , |
334 | inode->root->root_key.objectid, btrfs_ino(inode), |
335 | ordered->file_offset, ordered->num_bytes, |
336 | len, ordered->bytes_left); |
337 | ordered->bytes_left = 0; |
338 | } else { |
339 | ordered->bytes_left -= len; |
340 | } |
341 | |
342 | if (!uptodate) |
343 | set_bit(nr: BTRFS_ORDERED_IOERR, addr: &ordered->flags); |
344 | |
345 | if (ordered->bytes_left) |
346 | return false; |
347 | |
348 | /* |
349 | * All the IO of the ordered extent is finished, we need to queue |
350 | * the finish_func to be executed. |
351 | */ |
352 | set_bit(nr: BTRFS_ORDERED_IO_DONE, addr: &ordered->flags); |
353 | cond_wake_up(wq: &ordered->wait); |
354 | refcount_inc(r: &ordered->refs); |
355 | trace_btrfs_ordered_extent_mark_finished(inode, ordered); |
356 | return true; |
357 | } |
358 | |
359 | static void btrfs_queue_ordered_fn(struct btrfs_ordered_extent *ordered) |
360 | { |
361 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
362 | struct btrfs_fs_info *fs_info = inode->root->fs_info; |
363 | struct btrfs_workqueue *wq = btrfs_is_free_space_inode(inode) ? |
364 | fs_info->endio_freespace_worker : fs_info->endio_write_workers; |
365 | |
366 | btrfs_init_work(work: &ordered->work, func: finish_ordered_fn, NULL); |
367 | btrfs_queue_work(wq, work: &ordered->work); |
368 | } |
369 | |
370 | bool btrfs_finish_ordered_extent(struct btrfs_ordered_extent *ordered, |
371 | struct page *page, u64 file_offset, u64 len, |
372 | bool uptodate) |
373 | { |
374 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
375 | unsigned long flags; |
376 | bool ret; |
377 | |
378 | trace_btrfs_finish_ordered_extent(inode, start: file_offset, len, uptodate); |
379 | |
380 | spin_lock_irqsave(&inode->ordered_tree_lock, flags); |
381 | ret = can_finish_ordered_extent(ordered, page, file_offset, len, uptodate); |
382 | spin_unlock_irqrestore(lock: &inode->ordered_tree_lock, flags); |
383 | |
384 | if (ret) |
385 | btrfs_queue_ordered_fn(ordered); |
386 | return ret; |
387 | } |
388 | |
389 | /* |
390 | * Mark all ordered extents io inside the specified range finished. |
391 | * |
392 | * @page: The involved page for the operation. |
393 | * For uncompressed buffered IO, the page status also needs to be |
394 | * updated to indicate whether the pending ordered io is finished. |
395 | * Can be NULL for direct IO and compressed write. |
396 | * For these cases, callers are ensured they won't execute the |
397 | * endio function twice. |
398 | * |
399 | * This function is called for endio, thus the range must have ordered |
400 | * extent(s) covering it. |
401 | */ |
402 | void btrfs_mark_ordered_io_finished(struct btrfs_inode *inode, |
403 | struct page *page, u64 file_offset, |
404 | u64 num_bytes, bool uptodate) |
405 | { |
406 | struct rb_node *node; |
407 | struct btrfs_ordered_extent *entry = NULL; |
408 | unsigned long flags; |
409 | u64 cur = file_offset; |
410 | |
411 | trace_btrfs_writepage_end_io_hook(inode, start: file_offset, |
412 | end: file_offset + num_bytes - 1, |
413 | uptodate); |
414 | |
415 | spin_lock_irqsave(&inode->ordered_tree_lock, flags); |
416 | while (cur < file_offset + num_bytes) { |
417 | u64 entry_end; |
418 | u64 end; |
419 | u32 len; |
420 | |
421 | node = ordered_tree_search(inode, file_offset: cur); |
422 | /* No ordered extents at all */ |
423 | if (!node) |
424 | break; |
425 | |
426 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
427 | entry_end = entry->file_offset + entry->num_bytes; |
428 | /* |
429 | * |<-- OE --->| | |
430 | * cur |
431 | * Go to next OE. |
432 | */ |
433 | if (cur >= entry_end) { |
434 | node = rb_next(node); |
435 | /* No more ordered extents, exit */ |
436 | if (!node) |
437 | break; |
438 | entry = rb_entry(node, struct btrfs_ordered_extent, |
439 | rb_node); |
440 | |
441 | /* Go to next ordered extent and continue */ |
442 | cur = entry->file_offset; |
443 | continue; |
444 | } |
445 | /* |
446 | * | |<--- OE --->| |
447 | * cur |
448 | * Go to the start of OE. |
449 | */ |
450 | if (cur < entry->file_offset) { |
451 | cur = entry->file_offset; |
452 | continue; |
453 | } |
454 | |
455 | /* |
456 | * Now we are definitely inside one ordered extent. |
457 | * |
458 | * |<--- OE --->| |
459 | * | |
460 | * cur |
461 | */ |
462 | end = min(entry->file_offset + entry->num_bytes, |
463 | file_offset + num_bytes) - 1; |
464 | ASSERT(end + 1 - cur < U32_MAX); |
465 | len = end + 1 - cur; |
466 | |
467 | if (can_finish_ordered_extent(ordered: entry, page, file_offset: cur, len, uptodate)) { |
468 | spin_unlock_irqrestore(lock: &inode->ordered_tree_lock, flags); |
469 | btrfs_queue_ordered_fn(ordered: entry); |
470 | spin_lock_irqsave(&inode->ordered_tree_lock, flags); |
471 | } |
472 | cur += len; |
473 | } |
474 | spin_unlock_irqrestore(lock: &inode->ordered_tree_lock, flags); |
475 | } |
476 | |
477 | /* |
478 | * Finish IO for one ordered extent across a given range. The range can only |
479 | * contain one ordered extent. |
480 | * |
481 | * @cached: The cached ordered extent. If not NULL, we can skip the tree |
482 | * search and use the ordered extent directly. |
483 | * Will be also used to store the finished ordered extent. |
484 | * @file_offset: File offset for the finished IO |
485 | * @io_size: Length of the finish IO range |
486 | * |
487 | * Return true if the ordered extent is finished in the range, and update |
488 | * @cached. |
489 | * Return false otherwise. |
490 | * |
491 | * NOTE: The range can NOT cross multiple ordered extents. |
492 | * Thus caller should ensure the range doesn't cross ordered extents. |
493 | */ |
494 | bool btrfs_dec_test_ordered_pending(struct btrfs_inode *inode, |
495 | struct btrfs_ordered_extent **cached, |
496 | u64 file_offset, u64 io_size) |
497 | { |
498 | struct rb_node *node; |
499 | struct btrfs_ordered_extent *entry = NULL; |
500 | unsigned long flags; |
501 | bool finished = false; |
502 | |
503 | spin_lock_irqsave(&inode->ordered_tree_lock, flags); |
504 | if (cached && *cached) { |
505 | entry = *cached; |
506 | goto have_entry; |
507 | } |
508 | |
509 | node = ordered_tree_search(inode, file_offset); |
510 | if (!node) |
511 | goto out; |
512 | |
513 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
514 | have_entry: |
515 | if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) |
516 | goto out; |
517 | |
518 | if (io_size > entry->bytes_left) |
519 | btrfs_crit(inode->root->fs_info, |
520 | "bad ordered accounting left %llu size %llu" , |
521 | entry->bytes_left, io_size); |
522 | |
523 | entry->bytes_left -= io_size; |
524 | |
525 | if (entry->bytes_left == 0) { |
526 | /* |
527 | * Ensure only one caller can set the flag and finished_ret |
528 | * accordingly |
529 | */ |
530 | finished = !test_and_set_bit(nr: BTRFS_ORDERED_IO_DONE, addr: &entry->flags); |
531 | /* test_and_set_bit implies a barrier */ |
532 | cond_wake_up_nomb(wq: &entry->wait); |
533 | } |
534 | out: |
535 | if (finished && cached && entry) { |
536 | *cached = entry; |
537 | refcount_inc(r: &entry->refs); |
538 | trace_btrfs_ordered_extent_dec_test_pending(inode, ordered: entry); |
539 | } |
540 | spin_unlock_irqrestore(lock: &inode->ordered_tree_lock, flags); |
541 | return finished; |
542 | } |
543 | |
544 | /* |
545 | * used to drop a reference on an ordered extent. This will free |
546 | * the extent if the last reference is dropped |
547 | */ |
548 | void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) |
549 | { |
550 | struct list_head *cur; |
551 | struct btrfs_ordered_sum *sum; |
552 | |
553 | trace_btrfs_ordered_extent_put(inode: BTRFS_I(inode: entry->inode), ordered: entry); |
554 | |
555 | if (refcount_dec_and_test(r: &entry->refs)) { |
556 | ASSERT(list_empty(&entry->root_extent_list)); |
557 | ASSERT(list_empty(&entry->log_list)); |
558 | ASSERT(RB_EMPTY_NODE(&entry->rb_node)); |
559 | if (entry->inode) |
560 | btrfs_add_delayed_iput(inode: BTRFS_I(inode: entry->inode)); |
561 | while (!list_empty(head: &entry->list)) { |
562 | cur = entry->list.next; |
563 | sum = list_entry(cur, struct btrfs_ordered_sum, list); |
564 | list_del(entry: &sum->list); |
565 | kvfree(addr: sum); |
566 | } |
567 | kmem_cache_free(s: btrfs_ordered_extent_cache, objp: entry); |
568 | } |
569 | } |
570 | |
571 | /* |
572 | * remove an ordered extent from the tree. No references are dropped |
573 | * and waiters are woken up. |
574 | */ |
575 | void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode, |
576 | struct btrfs_ordered_extent *entry) |
577 | { |
578 | struct btrfs_root *root = btrfs_inode->root; |
579 | struct btrfs_fs_info *fs_info = root->fs_info; |
580 | struct rb_node *node; |
581 | bool pending; |
582 | bool freespace_inode; |
583 | |
584 | /* |
585 | * If this is a free space inode the thread has not acquired the ordered |
586 | * extents lockdep map. |
587 | */ |
588 | freespace_inode = btrfs_is_free_space_inode(inode: btrfs_inode); |
589 | |
590 | btrfs_lockdep_acquire(fs_info, btrfs_trans_pending_ordered); |
591 | /* This is paired with btrfs_alloc_ordered_extent. */ |
592 | spin_lock(lock: &btrfs_inode->lock); |
593 | btrfs_mod_outstanding_extents(inode: btrfs_inode, mod: -1); |
594 | spin_unlock(lock: &btrfs_inode->lock); |
595 | if (root != fs_info->tree_root) { |
596 | u64 release; |
597 | |
598 | if (test_bit(BTRFS_ORDERED_ENCODED, &entry->flags)) |
599 | release = entry->disk_num_bytes; |
600 | else |
601 | release = entry->num_bytes; |
602 | btrfs_delalloc_release_metadata(inode: btrfs_inode, num_bytes: release, qgroup_free: false); |
603 | } |
604 | |
605 | percpu_counter_add_batch(fbc: &fs_info->ordered_bytes, amount: -entry->num_bytes, |
606 | batch: fs_info->delalloc_batch); |
607 | |
608 | spin_lock_irq(lock: &btrfs_inode->ordered_tree_lock); |
609 | node = &entry->rb_node; |
610 | rb_erase(node, &btrfs_inode->ordered_tree); |
611 | RB_CLEAR_NODE(node); |
612 | if (btrfs_inode->ordered_tree_last == node) |
613 | btrfs_inode->ordered_tree_last = NULL; |
614 | set_bit(nr: BTRFS_ORDERED_COMPLETE, addr: &entry->flags); |
615 | pending = test_and_clear_bit(nr: BTRFS_ORDERED_PENDING, addr: &entry->flags); |
616 | spin_unlock_irq(lock: &btrfs_inode->ordered_tree_lock); |
617 | |
618 | /* |
619 | * The current running transaction is waiting on us, we need to let it |
620 | * know that we're complete and wake it up. |
621 | */ |
622 | if (pending) { |
623 | struct btrfs_transaction *trans; |
624 | |
625 | /* |
626 | * The checks for trans are just a formality, it should be set, |
627 | * but if it isn't we don't want to deref/assert under the spin |
628 | * lock, so be nice and check if trans is set, but ASSERT() so |
629 | * if it isn't set a developer will notice. |
630 | */ |
631 | spin_lock(lock: &fs_info->trans_lock); |
632 | trans = fs_info->running_transaction; |
633 | if (trans) |
634 | refcount_inc(r: &trans->use_count); |
635 | spin_unlock(lock: &fs_info->trans_lock); |
636 | |
637 | ASSERT(trans || BTRFS_FS_ERROR(fs_info)); |
638 | if (trans) { |
639 | if (atomic_dec_and_test(v: &trans->pending_ordered)) |
640 | wake_up(&trans->pending_wait); |
641 | btrfs_put_transaction(transaction: trans); |
642 | } |
643 | } |
644 | |
645 | btrfs_lockdep_release(fs_info, btrfs_trans_pending_ordered); |
646 | |
647 | spin_lock(lock: &root->ordered_extent_lock); |
648 | list_del_init(entry: &entry->root_extent_list); |
649 | root->nr_ordered_extents--; |
650 | |
651 | trace_btrfs_ordered_extent_remove(inode: btrfs_inode, ordered: entry); |
652 | |
653 | if (!root->nr_ordered_extents) { |
654 | spin_lock(lock: &fs_info->ordered_root_lock); |
655 | BUG_ON(list_empty(&root->ordered_root)); |
656 | list_del_init(entry: &root->ordered_root); |
657 | spin_unlock(lock: &fs_info->ordered_root_lock); |
658 | } |
659 | spin_unlock(lock: &root->ordered_extent_lock); |
660 | wake_up(&entry->wait); |
661 | if (!freespace_inode) |
662 | btrfs_lockdep_release(fs_info, btrfs_ordered_extent); |
663 | } |
664 | |
665 | static void btrfs_run_ordered_extent_work(struct btrfs_work *work) |
666 | { |
667 | struct btrfs_ordered_extent *ordered; |
668 | |
669 | ordered = container_of(work, struct btrfs_ordered_extent, flush_work); |
670 | btrfs_start_ordered_extent(entry: ordered); |
671 | complete(&ordered->completion); |
672 | } |
673 | |
674 | /* |
675 | * wait for all the ordered extents in a root. This is done when balancing |
676 | * space between drives. |
677 | */ |
678 | u64 btrfs_wait_ordered_extents(struct btrfs_root *root, u64 nr, |
679 | const u64 range_start, const u64 range_len) |
680 | { |
681 | struct btrfs_fs_info *fs_info = root->fs_info; |
682 | LIST_HEAD(splice); |
683 | LIST_HEAD(skipped); |
684 | LIST_HEAD(works); |
685 | struct btrfs_ordered_extent *ordered, *next; |
686 | u64 count = 0; |
687 | const u64 range_end = range_start + range_len; |
688 | |
689 | mutex_lock(&root->ordered_extent_mutex); |
690 | spin_lock(lock: &root->ordered_extent_lock); |
691 | list_splice_init(list: &root->ordered_extents, head: &splice); |
692 | while (!list_empty(head: &splice) && nr) { |
693 | ordered = list_first_entry(&splice, struct btrfs_ordered_extent, |
694 | root_extent_list); |
695 | |
696 | if (range_end <= ordered->disk_bytenr || |
697 | ordered->disk_bytenr + ordered->disk_num_bytes <= range_start) { |
698 | list_move_tail(list: &ordered->root_extent_list, head: &skipped); |
699 | cond_resched_lock(&root->ordered_extent_lock); |
700 | continue; |
701 | } |
702 | |
703 | list_move_tail(list: &ordered->root_extent_list, |
704 | head: &root->ordered_extents); |
705 | refcount_inc(r: &ordered->refs); |
706 | spin_unlock(lock: &root->ordered_extent_lock); |
707 | |
708 | btrfs_init_work(work: &ordered->flush_work, |
709 | func: btrfs_run_ordered_extent_work, NULL); |
710 | list_add_tail(new: &ordered->work_list, head: &works); |
711 | btrfs_queue_work(wq: fs_info->flush_workers, work: &ordered->flush_work); |
712 | |
713 | cond_resched(); |
714 | spin_lock(lock: &root->ordered_extent_lock); |
715 | if (nr != U64_MAX) |
716 | nr--; |
717 | count++; |
718 | } |
719 | list_splice_tail(list: &skipped, head: &root->ordered_extents); |
720 | list_splice_tail(list: &splice, head: &root->ordered_extents); |
721 | spin_unlock(lock: &root->ordered_extent_lock); |
722 | |
723 | list_for_each_entry_safe(ordered, next, &works, work_list) { |
724 | list_del_init(entry: &ordered->work_list); |
725 | wait_for_completion(&ordered->completion); |
726 | btrfs_put_ordered_extent(entry: ordered); |
727 | cond_resched(); |
728 | } |
729 | mutex_unlock(lock: &root->ordered_extent_mutex); |
730 | |
731 | return count; |
732 | } |
733 | |
734 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, u64 nr, |
735 | const u64 range_start, const u64 range_len) |
736 | { |
737 | struct btrfs_root *root; |
738 | LIST_HEAD(splice); |
739 | u64 done; |
740 | |
741 | mutex_lock(&fs_info->ordered_operations_mutex); |
742 | spin_lock(lock: &fs_info->ordered_root_lock); |
743 | list_splice_init(list: &fs_info->ordered_roots, head: &splice); |
744 | while (!list_empty(head: &splice) && nr) { |
745 | root = list_first_entry(&splice, struct btrfs_root, |
746 | ordered_root); |
747 | root = btrfs_grab_root(root); |
748 | BUG_ON(!root); |
749 | list_move_tail(list: &root->ordered_root, |
750 | head: &fs_info->ordered_roots); |
751 | spin_unlock(lock: &fs_info->ordered_root_lock); |
752 | |
753 | done = btrfs_wait_ordered_extents(root, nr, |
754 | range_start, range_len); |
755 | btrfs_put_root(root); |
756 | |
757 | spin_lock(lock: &fs_info->ordered_root_lock); |
758 | if (nr != U64_MAX) { |
759 | nr -= done; |
760 | } |
761 | } |
762 | list_splice_tail(list: &splice, head: &fs_info->ordered_roots); |
763 | spin_unlock(lock: &fs_info->ordered_root_lock); |
764 | mutex_unlock(lock: &fs_info->ordered_operations_mutex); |
765 | } |
766 | |
767 | /* |
768 | * Start IO and wait for a given ordered extent to finish. |
769 | * |
770 | * Wait on page writeback for all the pages in the extent and the IO completion |
771 | * code to insert metadata into the btree corresponding to the extent. |
772 | */ |
773 | void btrfs_start_ordered_extent(struct btrfs_ordered_extent *entry) |
774 | { |
775 | u64 start = entry->file_offset; |
776 | u64 end = start + entry->num_bytes - 1; |
777 | struct btrfs_inode *inode = BTRFS_I(inode: entry->inode); |
778 | bool freespace_inode; |
779 | |
780 | trace_btrfs_ordered_extent_start(inode, ordered: entry); |
781 | |
782 | /* |
783 | * If this is a free space inode do not take the ordered extents lockdep |
784 | * map. |
785 | */ |
786 | freespace_inode = btrfs_is_free_space_inode(inode); |
787 | |
788 | /* |
789 | * pages in the range can be dirty, clean or writeback. We |
790 | * start IO on any dirty ones so the wait doesn't stall waiting |
791 | * for the flusher thread to find them |
792 | */ |
793 | if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) |
794 | filemap_fdatawrite_range(mapping: inode->vfs_inode.i_mapping, start, end); |
795 | |
796 | if (!freespace_inode) |
797 | btrfs_might_wait_for_event(inode->root->fs_info, btrfs_ordered_extent); |
798 | wait_event(entry->wait, test_bit(BTRFS_ORDERED_COMPLETE, &entry->flags)); |
799 | } |
800 | |
801 | /* |
802 | * Used to wait on ordered extents across a large range of bytes. |
803 | */ |
804 | int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len) |
805 | { |
806 | int ret = 0; |
807 | int ret_wb = 0; |
808 | u64 end; |
809 | u64 orig_end; |
810 | struct btrfs_ordered_extent *ordered; |
811 | |
812 | if (start + len < start) { |
813 | orig_end = OFFSET_MAX; |
814 | } else { |
815 | orig_end = start + len - 1; |
816 | if (orig_end > OFFSET_MAX) |
817 | orig_end = OFFSET_MAX; |
818 | } |
819 | |
820 | /* start IO across the range first to instantiate any delalloc |
821 | * extents |
822 | */ |
823 | ret = btrfs_fdatawrite_range(inode, start, end: orig_end); |
824 | if (ret) |
825 | return ret; |
826 | |
827 | /* |
828 | * If we have a writeback error don't return immediately. Wait first |
829 | * for any ordered extents that haven't completed yet. This is to make |
830 | * sure no one can dirty the same page ranges and call writepages() |
831 | * before the ordered extents complete - to avoid failures (-EEXIST) |
832 | * when adding the new ordered extents to the ordered tree. |
833 | */ |
834 | ret_wb = filemap_fdatawait_range(inode->i_mapping, lstart: start, lend: orig_end); |
835 | |
836 | end = orig_end; |
837 | while (1) { |
838 | ordered = btrfs_lookup_first_ordered_extent(inode: BTRFS_I(inode), file_offset: end); |
839 | if (!ordered) |
840 | break; |
841 | if (ordered->file_offset > orig_end) { |
842 | btrfs_put_ordered_extent(entry: ordered); |
843 | break; |
844 | } |
845 | if (ordered->file_offset + ordered->num_bytes <= start) { |
846 | btrfs_put_ordered_extent(entry: ordered); |
847 | break; |
848 | } |
849 | btrfs_start_ordered_extent(entry: ordered); |
850 | end = ordered->file_offset; |
851 | /* |
852 | * If the ordered extent had an error save the error but don't |
853 | * exit without waiting first for all other ordered extents in |
854 | * the range to complete. |
855 | */ |
856 | if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) |
857 | ret = -EIO; |
858 | btrfs_put_ordered_extent(entry: ordered); |
859 | if (end == 0 || end == start) |
860 | break; |
861 | end--; |
862 | } |
863 | return ret_wb ? ret_wb : ret; |
864 | } |
865 | |
866 | /* |
867 | * find an ordered extent corresponding to file_offset. return NULL if |
868 | * nothing is found, otherwise take a reference on the extent and return it |
869 | */ |
870 | struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct btrfs_inode *inode, |
871 | u64 file_offset) |
872 | { |
873 | struct rb_node *node; |
874 | struct btrfs_ordered_extent *entry = NULL; |
875 | unsigned long flags; |
876 | |
877 | spin_lock_irqsave(&inode->ordered_tree_lock, flags); |
878 | node = ordered_tree_search(inode, file_offset); |
879 | if (!node) |
880 | goto out; |
881 | |
882 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
883 | if (!in_range(file_offset, entry->file_offset, entry->num_bytes)) |
884 | entry = NULL; |
885 | if (entry) { |
886 | refcount_inc(r: &entry->refs); |
887 | trace_btrfs_ordered_extent_lookup(inode, ordered: entry); |
888 | } |
889 | out: |
890 | spin_unlock_irqrestore(lock: &inode->ordered_tree_lock, flags); |
891 | return entry; |
892 | } |
893 | |
894 | /* Since the DIO code tries to lock a wide area we need to look for any ordered |
895 | * extents that exist in the range, rather than just the start of the range. |
896 | */ |
897 | struct btrfs_ordered_extent *btrfs_lookup_ordered_range( |
898 | struct btrfs_inode *inode, u64 file_offset, u64 len) |
899 | { |
900 | struct rb_node *node; |
901 | struct btrfs_ordered_extent *entry = NULL; |
902 | |
903 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
904 | node = ordered_tree_search(inode, file_offset); |
905 | if (!node) { |
906 | node = ordered_tree_search(inode, file_offset: file_offset + len); |
907 | if (!node) |
908 | goto out; |
909 | } |
910 | |
911 | while (1) { |
912 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
913 | if (range_overlaps(entry, file_offset, len)) |
914 | break; |
915 | |
916 | if (entry->file_offset >= file_offset + len) { |
917 | entry = NULL; |
918 | break; |
919 | } |
920 | entry = NULL; |
921 | node = rb_next(node); |
922 | if (!node) |
923 | break; |
924 | } |
925 | out: |
926 | if (entry) { |
927 | refcount_inc(r: &entry->refs); |
928 | trace_btrfs_ordered_extent_lookup_range(inode, ordered: entry); |
929 | } |
930 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
931 | return entry; |
932 | } |
933 | |
934 | /* |
935 | * Adds all ordered extents to the given list. The list ends up sorted by the |
936 | * file_offset of the ordered extents. |
937 | */ |
938 | void btrfs_get_ordered_extents_for_logging(struct btrfs_inode *inode, |
939 | struct list_head *list) |
940 | { |
941 | struct rb_node *n; |
942 | |
943 | ASSERT(inode_is_locked(&inode->vfs_inode)); |
944 | |
945 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
946 | for (n = rb_first(&inode->ordered_tree); n; n = rb_next(n)) { |
947 | struct btrfs_ordered_extent *ordered; |
948 | |
949 | ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); |
950 | |
951 | if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags)) |
952 | continue; |
953 | |
954 | ASSERT(list_empty(&ordered->log_list)); |
955 | list_add_tail(new: &ordered->log_list, head: list); |
956 | refcount_inc(r: &ordered->refs); |
957 | trace_btrfs_ordered_extent_lookup_for_logging(inode, ordered); |
958 | } |
959 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
960 | } |
961 | |
962 | /* |
963 | * lookup and return any extent before 'file_offset'. NULL is returned |
964 | * if none is found |
965 | */ |
966 | struct btrfs_ordered_extent * |
967 | btrfs_lookup_first_ordered_extent(struct btrfs_inode *inode, u64 file_offset) |
968 | { |
969 | struct rb_node *node; |
970 | struct btrfs_ordered_extent *entry = NULL; |
971 | |
972 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
973 | node = ordered_tree_search(inode, file_offset); |
974 | if (!node) |
975 | goto out; |
976 | |
977 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
978 | refcount_inc(r: &entry->refs); |
979 | trace_btrfs_ordered_extent_lookup_first(inode, ordered: entry); |
980 | out: |
981 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
982 | return entry; |
983 | } |
984 | |
985 | /* |
986 | * Lookup the first ordered extent that overlaps the range |
987 | * [@file_offset, @file_offset + @len). |
988 | * |
989 | * The difference between this and btrfs_lookup_first_ordered_extent() is |
990 | * that this one won't return any ordered extent that does not overlap the range. |
991 | * And the difference against btrfs_lookup_ordered_extent() is, this function |
992 | * ensures the first ordered extent gets returned. |
993 | */ |
994 | struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range( |
995 | struct btrfs_inode *inode, u64 file_offset, u64 len) |
996 | { |
997 | struct rb_node *node; |
998 | struct rb_node *cur; |
999 | struct rb_node *prev; |
1000 | struct rb_node *next; |
1001 | struct btrfs_ordered_extent *entry = NULL; |
1002 | |
1003 | spin_lock_irq(lock: &inode->ordered_tree_lock); |
1004 | node = inode->ordered_tree.rb_node; |
1005 | /* |
1006 | * Here we don't want to use tree_search() which will use tree->last |
1007 | * and screw up the search order. |
1008 | * And __tree_search() can't return the adjacent ordered extents |
1009 | * either, thus here we do our own search. |
1010 | */ |
1011 | while (node) { |
1012 | entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); |
1013 | |
1014 | if (file_offset < entry->file_offset) { |
1015 | node = node->rb_left; |
1016 | } else if (file_offset >= entry_end(entry)) { |
1017 | node = node->rb_right; |
1018 | } else { |
1019 | /* |
1020 | * Direct hit, got an ordered extent that starts at |
1021 | * @file_offset |
1022 | */ |
1023 | goto out; |
1024 | } |
1025 | } |
1026 | if (!entry) { |
1027 | /* Empty tree */ |
1028 | goto out; |
1029 | } |
1030 | |
1031 | cur = &entry->rb_node; |
1032 | /* We got an entry around @file_offset, check adjacent entries */ |
1033 | if (entry->file_offset < file_offset) { |
1034 | prev = cur; |
1035 | next = rb_next(cur); |
1036 | } else { |
1037 | prev = rb_prev(cur); |
1038 | next = cur; |
1039 | } |
1040 | if (prev) { |
1041 | entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node); |
1042 | if (range_overlaps(entry, file_offset, len)) |
1043 | goto out; |
1044 | } |
1045 | if (next) { |
1046 | entry = rb_entry(next, struct btrfs_ordered_extent, rb_node); |
1047 | if (range_overlaps(entry, file_offset, len)) |
1048 | goto out; |
1049 | } |
1050 | /* No ordered extent in the range */ |
1051 | entry = NULL; |
1052 | out: |
1053 | if (entry) { |
1054 | refcount_inc(r: &entry->refs); |
1055 | trace_btrfs_ordered_extent_lookup_first_range(inode, ordered: entry); |
1056 | } |
1057 | |
1058 | spin_unlock_irq(lock: &inode->ordered_tree_lock); |
1059 | return entry; |
1060 | } |
1061 | |
1062 | /* |
1063 | * Lock the passed range and ensures all pending ordered extents in it are run |
1064 | * to completion. |
1065 | * |
1066 | * @inode: Inode whose ordered tree is to be searched |
1067 | * @start: Beginning of range to flush |
1068 | * @end: Last byte of range to lock |
1069 | * @cached_state: If passed, will return the extent state responsible for the |
1070 | * locked range. It's the caller's responsibility to free the |
1071 | * cached state. |
1072 | * |
1073 | * Always return with the given range locked, ensuring after it's called no |
1074 | * order extent can be pending. |
1075 | */ |
1076 | void btrfs_lock_and_flush_ordered_range(struct btrfs_inode *inode, u64 start, |
1077 | u64 end, |
1078 | struct extent_state **cached_state) |
1079 | { |
1080 | struct btrfs_ordered_extent *ordered; |
1081 | struct extent_state *cache = NULL; |
1082 | struct extent_state **cachedp = &cache; |
1083 | |
1084 | if (cached_state) |
1085 | cachedp = cached_state; |
1086 | |
1087 | while (1) { |
1088 | lock_extent(tree: &inode->io_tree, start, end, cached: cachedp); |
1089 | ordered = btrfs_lookup_ordered_range(inode, file_offset: start, |
1090 | len: end - start + 1); |
1091 | if (!ordered) { |
1092 | /* |
1093 | * If no external cached_state has been passed then |
1094 | * decrement the extra ref taken for cachedp since we |
1095 | * aren't exposing it outside of this function |
1096 | */ |
1097 | if (!cached_state) |
1098 | refcount_dec(r: &cache->refs); |
1099 | break; |
1100 | } |
1101 | unlock_extent(tree: &inode->io_tree, start, end, cached: cachedp); |
1102 | btrfs_start_ordered_extent(entry: ordered); |
1103 | btrfs_put_ordered_extent(entry: ordered); |
1104 | } |
1105 | } |
1106 | |
1107 | /* |
1108 | * Lock the passed range and ensure all pending ordered extents in it are run |
1109 | * to completion in nowait mode. |
1110 | * |
1111 | * Return true if btrfs_lock_ordered_range does not return any extents, |
1112 | * otherwise false. |
1113 | */ |
1114 | bool btrfs_try_lock_ordered_range(struct btrfs_inode *inode, u64 start, u64 end, |
1115 | struct extent_state **cached_state) |
1116 | { |
1117 | struct btrfs_ordered_extent *ordered; |
1118 | |
1119 | if (!try_lock_extent(tree: &inode->io_tree, start, end, cached: cached_state)) |
1120 | return false; |
1121 | |
1122 | ordered = btrfs_lookup_ordered_range(inode, file_offset: start, len: end - start + 1); |
1123 | if (!ordered) |
1124 | return true; |
1125 | |
1126 | btrfs_put_ordered_extent(entry: ordered); |
1127 | unlock_extent(tree: &inode->io_tree, start, end, cached: cached_state); |
1128 | |
1129 | return false; |
1130 | } |
1131 | |
1132 | /* Split out a new ordered extent for this first @len bytes of @ordered. */ |
1133 | struct btrfs_ordered_extent *btrfs_split_ordered_extent( |
1134 | struct btrfs_ordered_extent *ordered, u64 len) |
1135 | { |
1136 | struct btrfs_inode *inode = BTRFS_I(inode: ordered->inode); |
1137 | struct btrfs_root *root = inode->root; |
1138 | struct btrfs_fs_info *fs_info = root->fs_info; |
1139 | u64 file_offset = ordered->file_offset; |
1140 | u64 disk_bytenr = ordered->disk_bytenr; |
1141 | unsigned long flags = ordered->flags; |
1142 | struct btrfs_ordered_sum *sum, *tmpsum; |
1143 | struct btrfs_ordered_extent *new; |
1144 | struct rb_node *node; |
1145 | u64 offset = 0; |
1146 | |
1147 | trace_btrfs_ordered_extent_split(inode, ordered); |
1148 | |
1149 | ASSERT(!(flags & (1U << BTRFS_ORDERED_COMPRESSED))); |
1150 | |
1151 | /* |
1152 | * The entire bio must be covered by the ordered extent, but we can't |
1153 | * reduce the original extent to a zero length either. |
1154 | */ |
1155 | if (WARN_ON_ONCE(len >= ordered->num_bytes)) |
1156 | return ERR_PTR(error: -EINVAL); |
1157 | /* We cannot split partially completed ordered extents. */ |
1158 | if (ordered->bytes_left) { |
1159 | ASSERT(!(flags & ~BTRFS_ORDERED_TYPE_FLAGS)); |
1160 | if (WARN_ON_ONCE(ordered->bytes_left != ordered->disk_num_bytes)) |
1161 | return ERR_PTR(error: -EINVAL); |
1162 | } |
1163 | /* We cannot split a compressed ordered extent. */ |
1164 | if (WARN_ON_ONCE(ordered->disk_num_bytes != ordered->num_bytes)) |
1165 | return ERR_PTR(error: -EINVAL); |
1166 | |
1167 | new = alloc_ordered_extent(inode, file_offset, num_bytes: len, ram_bytes: len, disk_bytenr, |
1168 | disk_num_bytes: len, offset: 0, flags, compress_type: ordered->compress_type); |
1169 | if (IS_ERR(ptr: new)) |
1170 | return new; |
1171 | |
1172 | /* One ref for the tree. */ |
1173 | refcount_inc(r: &new->refs); |
1174 | |
1175 | spin_lock_irq(lock: &root->ordered_extent_lock); |
1176 | spin_lock(lock: &inode->ordered_tree_lock); |
1177 | /* Remove from tree once */ |
1178 | node = &ordered->rb_node; |
1179 | rb_erase(node, &inode->ordered_tree); |
1180 | RB_CLEAR_NODE(node); |
1181 | if (inode->ordered_tree_last == node) |
1182 | inode->ordered_tree_last = NULL; |
1183 | |
1184 | ordered->file_offset += len; |
1185 | ordered->disk_bytenr += len; |
1186 | ordered->num_bytes -= len; |
1187 | ordered->disk_num_bytes -= len; |
1188 | |
1189 | if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) { |
1190 | ASSERT(ordered->bytes_left == 0); |
1191 | new->bytes_left = 0; |
1192 | } else { |
1193 | ordered->bytes_left -= len; |
1194 | } |
1195 | |
1196 | if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags)) { |
1197 | if (ordered->truncated_len > len) { |
1198 | ordered->truncated_len -= len; |
1199 | } else { |
1200 | new->truncated_len = ordered->truncated_len; |
1201 | ordered->truncated_len = 0; |
1202 | } |
1203 | } |
1204 | |
1205 | list_for_each_entry_safe(sum, tmpsum, &ordered->list, list) { |
1206 | if (offset == len) |
1207 | break; |
1208 | list_move_tail(list: &sum->list, head: &new->list); |
1209 | offset += sum->len; |
1210 | } |
1211 | |
1212 | /* Re-insert the node */ |
1213 | node = tree_insert(root: &inode->ordered_tree, file_offset: ordered->file_offset, |
1214 | node: &ordered->rb_node); |
1215 | if (node) |
1216 | btrfs_panic(fs_info, -EEXIST, |
1217 | "zoned: inconsistency in ordered tree at offset %llu" , |
1218 | ordered->file_offset); |
1219 | |
1220 | node = tree_insert(root: &inode->ordered_tree, file_offset: new->file_offset, node: &new->rb_node); |
1221 | if (node) |
1222 | btrfs_panic(fs_info, -EEXIST, |
1223 | "zoned: inconsistency in ordered tree at offset %llu" , |
1224 | new->file_offset); |
1225 | spin_unlock(lock: &inode->ordered_tree_lock); |
1226 | |
1227 | list_add_tail(new: &new->root_extent_list, head: &root->ordered_extents); |
1228 | root->nr_ordered_extents++; |
1229 | spin_unlock_irq(lock: &root->ordered_extent_lock); |
1230 | return new; |
1231 | } |
1232 | |
1233 | int __init ordered_data_init(void) |
1234 | { |
1235 | btrfs_ordered_extent_cache = kmem_cache_create(name: "btrfs_ordered_extent" , |
1236 | size: sizeof(struct btrfs_ordered_extent), align: 0, |
1237 | SLAB_MEM_SPREAD, |
1238 | NULL); |
1239 | if (!btrfs_ordered_extent_cache) |
1240 | return -ENOMEM; |
1241 | |
1242 | return 0; |
1243 | } |
1244 | |
1245 | void __cold ordered_data_exit(void) |
1246 | { |
1247 | kmem_cache_destroy(s: btrfs_ordered_extent_cache); |
1248 | } |
1249 | |