1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
6 * All Rights Reserved.
7 */
8#include "xfs.h"
9#include "xfs_fs.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_shared.h"
13#include "xfs_trans_resv.h"
14#include "xfs_mount.h"
15#include "xfs_alloc.h"
16#include "xfs_extent_busy.h"
17#include "xfs_trace.h"
18#include "xfs_trans.h"
19#include "xfs_log.h"
20#include "xfs_ag.h"
21
22static void
23xfs_extent_busy_insert_list(
24 struct xfs_perag *pag,
25 xfs_agblock_t bno,
26 xfs_extlen_t len,
27 unsigned int flags,
28 struct list_head *busy_list)
29{
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct rb_node **rbp;
33 struct rb_node *parent = NULL;
34
35 new = kmem_zalloc(size: sizeof(struct xfs_extent_busy), flags: 0);
36 new->agno = pag->pag_agno;
37 new->bno = bno;
38 new->length = len;
39 INIT_LIST_HEAD(list: &new->list);
40 new->flags = flags;
41
42 /* trace before insert to be able to see failed inserts */
43 trace_xfs_extent_busy(pag->pag_mount, pag->pag_agno, bno, len);
44
45 spin_lock(lock: &pag->pagb_lock);
46 rbp = &pag->pagb_tree.rb_node;
47 while (*rbp) {
48 parent = *rbp;
49 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
50
51 if (new->bno < busyp->bno) {
52 rbp = &(*rbp)->rb_left;
53 ASSERT(new->bno + new->length <= busyp->bno);
54 } else if (new->bno > busyp->bno) {
55 rbp = &(*rbp)->rb_right;
56 ASSERT(bno >= busyp->bno + busyp->length);
57 } else {
58 ASSERT(0);
59 }
60 }
61
62 rb_link_node(node: &new->rb_node, parent, rb_link: rbp);
63 rb_insert_color(&new->rb_node, &pag->pagb_tree);
64
65 /* always process discard lists in fifo order */
66 list_add_tail(new: &new->list, head: busy_list);
67 spin_unlock(lock: &pag->pagb_lock);
68}
69
70void
71xfs_extent_busy_insert(
72 struct xfs_trans *tp,
73 struct xfs_perag *pag,
74 xfs_agblock_t bno,
75 xfs_extlen_t len,
76 unsigned int flags)
77{
78 xfs_extent_busy_insert_list(pag, bno, len, flags, &tp->t_busy);
79}
80
81void
82xfs_extent_busy_insert_discard(
83 struct xfs_perag *pag,
84 xfs_agblock_t bno,
85 xfs_extlen_t len,
86 struct list_head *busy_list)
87{
88 xfs_extent_busy_insert_list(pag, bno, len, XFS_EXTENT_BUSY_DISCARDED,
89 busy_list);
90}
91
92/*
93 * Search for a busy extent within the range of the extent we are about to
94 * allocate. You need to be holding the busy extent tree lock when calling
95 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
96 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
97 * match. This is done so that a non-zero return indicates an overlap that
98 * will require a synchronous transaction, but it can still be
99 * used to distinguish between a partial or exact match.
100 */
101int
102xfs_extent_busy_search(
103 struct xfs_mount *mp,
104 struct xfs_perag *pag,
105 xfs_agblock_t bno,
106 xfs_extlen_t len)
107{
108 struct rb_node *rbp;
109 struct xfs_extent_busy *busyp;
110 int match = 0;
111
112 /* find closest start bno overlap */
113 spin_lock(lock: &pag->pagb_lock);
114 rbp = pag->pagb_tree.rb_node;
115 while (rbp) {
116 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
117 if (bno < busyp->bno) {
118 /* may overlap, but exact start block is lower */
119 if (bno + len > busyp->bno)
120 match = -1;
121 rbp = rbp->rb_left;
122 } else if (bno > busyp->bno) {
123 /* may overlap, but exact start block is higher */
124 if (bno < busyp->bno + busyp->length)
125 match = -1;
126 rbp = rbp->rb_right;
127 } else {
128 /* bno matches busyp, length determines exact match */
129 match = (busyp->length == len) ? 1 : -1;
130 break;
131 }
132 }
133 spin_unlock(lock: &pag->pagb_lock);
134 return match;
135}
136
137/*
138 * The found free extent [fbno, fend] overlaps part or all of the given busy
139 * extent. If the overlap covers the beginning, the end, or all of the busy
140 * extent, the overlapping portion can be made unbusy and used for the
141 * allocation. We can't split a busy extent because we can't modify a
142 * transaction/CIL context busy list, but we can update an entry's block
143 * number or length.
144 *
145 * Returns true if the extent can safely be reused, or false if the search
146 * needs to be restarted.
147 */
148STATIC bool
149xfs_extent_busy_update_extent(
150 struct xfs_mount *mp,
151 struct xfs_perag *pag,
152 struct xfs_extent_busy *busyp,
153 xfs_agblock_t fbno,
154 xfs_extlen_t flen,
155 bool userdata) __releases(&pag->pagb_lock)
156 __acquires(&pag->pagb_lock)
157{
158 xfs_agblock_t fend = fbno + flen;
159 xfs_agblock_t bbno = busyp->bno;
160 xfs_agblock_t bend = bbno + busyp->length;
161
162 /*
163 * This extent is currently being discarded. Give the thread
164 * performing the discard a chance to mark the extent unbusy
165 * and retry.
166 */
167 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
168 spin_unlock(lock: &pag->pagb_lock);
169 delay(ticks: 1);
170 spin_lock(lock: &pag->pagb_lock);
171 return false;
172 }
173
174 /*
175 * If there is a busy extent overlapping a user allocation, we have
176 * no choice but to force the log and retry the search.
177 *
178 * Fortunately this does not happen during normal operation, but
179 * only if the filesystem is very low on space and has to dip into
180 * the AGFL for normal allocations.
181 */
182 if (userdata)
183 goto out_force_log;
184
185 if (bbno < fbno && bend > fend) {
186 /*
187 * Case 1:
188 * bbno bend
189 * +BBBBBBBBBBBBBBBBB+
190 * +---------+
191 * fbno fend
192 */
193
194 /*
195 * We would have to split the busy extent to be able to track
196 * it correct, which we cannot do because we would have to
197 * modify the list of busy extents attached to the transaction
198 * or CIL context, which is immutable.
199 *
200 * Force out the log to clear the busy extent and retry the
201 * search.
202 */
203 goto out_force_log;
204 } else if (bbno >= fbno && bend <= fend) {
205 /*
206 * Case 2:
207 * bbno bend
208 * +BBBBBBBBBBBBBBBBB+
209 * +-----------------+
210 * fbno fend
211 *
212 * Case 3:
213 * bbno bend
214 * +BBBBBBBBBBBBBBBBB+
215 * +--------------------------+
216 * fbno fend
217 *
218 * Case 4:
219 * bbno bend
220 * +BBBBBBBBBBBBBBBBB+
221 * +--------------------------+
222 * fbno fend
223 *
224 * Case 5:
225 * bbno bend
226 * +BBBBBBBBBBBBBBBBB+
227 * +-----------------------------------+
228 * fbno fend
229 *
230 */
231
232 /*
233 * The busy extent is fully covered by the extent we are
234 * allocating, and can simply be removed from the rbtree.
235 * However we cannot remove it from the immutable list
236 * tracking busy extents in the transaction or CIL context,
237 * so set the length to zero to mark it invalid.
238 *
239 * We also need to restart the busy extent search from the
240 * tree root, because erasing the node can rearrange the
241 * tree topology.
242 */
243 rb_erase(&busyp->rb_node, &pag->pagb_tree);
244 busyp->length = 0;
245 return false;
246 } else if (fend < bend) {
247 /*
248 * Case 6:
249 * bbno bend
250 * +BBBBBBBBBBBBBBBBB+
251 * +---------+
252 * fbno fend
253 *
254 * Case 7:
255 * bbno bend
256 * +BBBBBBBBBBBBBBBBB+
257 * +------------------+
258 * fbno fend
259 *
260 */
261 busyp->bno = fend;
262 busyp->length = bend - fend;
263 } else if (bbno < fbno) {
264 /*
265 * Case 8:
266 * bbno bend
267 * +BBBBBBBBBBBBBBBBB+
268 * +-------------+
269 * fbno fend
270 *
271 * Case 9:
272 * bbno bend
273 * +BBBBBBBBBBBBBBBBB+
274 * +----------------------+
275 * fbno fend
276 */
277 busyp->length = fbno - busyp->bno;
278 } else {
279 ASSERT(0);
280 }
281
282 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
283 return true;
284
285out_force_log:
286 spin_unlock(lock: &pag->pagb_lock);
287 xfs_log_force(mp, XFS_LOG_SYNC);
288 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
289 spin_lock(lock: &pag->pagb_lock);
290 return false;
291}
292
293
294/*
295 * For a given extent [fbno, flen], make sure we can reuse it safely.
296 */
297void
298xfs_extent_busy_reuse(
299 struct xfs_mount *mp,
300 struct xfs_perag *pag,
301 xfs_agblock_t fbno,
302 xfs_extlen_t flen,
303 bool userdata)
304{
305 struct rb_node *rbp;
306
307 ASSERT(flen > 0);
308 spin_lock(lock: &pag->pagb_lock);
309restart:
310 rbp = pag->pagb_tree.rb_node;
311 while (rbp) {
312 struct xfs_extent_busy *busyp =
313 rb_entry(rbp, struct xfs_extent_busy, rb_node);
314 xfs_agblock_t bbno = busyp->bno;
315 xfs_agblock_t bend = bbno + busyp->length;
316
317 if (fbno + flen <= bbno) {
318 rbp = rbp->rb_left;
319 continue;
320 } else if (fbno >= bend) {
321 rbp = rbp->rb_right;
322 continue;
323 }
324
325 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
326 userdata))
327 goto restart;
328 }
329 spin_unlock(lock: &pag->pagb_lock);
330}
331
332/*
333 * For a given extent [fbno, flen], search the busy extent list to find a
334 * subset of the extent that is not busy. If *rlen is smaller than
335 * args->minlen no suitable extent could be found, and the higher level
336 * code needs to force out the log and retry the allocation.
337 *
338 * Return the current busy generation for the AG if the extent is busy. This
339 * value can be used to wait for at least one of the currently busy extents
340 * to be cleared. Note that the busy list is not guaranteed to be empty after
341 * the gen is woken. The state of a specific extent must always be confirmed
342 * with another call to xfs_extent_busy_trim() before it can be used.
343 */
344bool
345xfs_extent_busy_trim(
346 struct xfs_alloc_arg *args,
347 xfs_agblock_t *bno,
348 xfs_extlen_t *len,
349 unsigned *busy_gen)
350{
351 xfs_agblock_t fbno;
352 xfs_extlen_t flen;
353 struct rb_node *rbp;
354 bool ret = false;
355
356 ASSERT(*len > 0);
357
358 spin_lock(lock: &args->pag->pagb_lock);
359 fbno = *bno;
360 flen = *len;
361 rbp = args->pag->pagb_tree.rb_node;
362 while (rbp && flen >= args->minlen) {
363 struct xfs_extent_busy *busyp =
364 rb_entry(rbp, struct xfs_extent_busy, rb_node);
365 xfs_agblock_t fend = fbno + flen;
366 xfs_agblock_t bbno = busyp->bno;
367 xfs_agblock_t bend = bbno + busyp->length;
368
369 if (fend <= bbno) {
370 rbp = rbp->rb_left;
371 continue;
372 } else if (fbno >= bend) {
373 rbp = rbp->rb_right;
374 continue;
375 }
376
377 if (bbno <= fbno) {
378 /* start overlap */
379
380 /*
381 * Case 1:
382 * bbno bend
383 * +BBBBBBBBBBBBBBBBB+
384 * +---------+
385 * fbno fend
386 *
387 * Case 2:
388 * bbno bend
389 * +BBBBBBBBBBBBBBBBB+
390 * +-------------+
391 * fbno fend
392 *
393 * Case 3:
394 * bbno bend
395 * +BBBBBBBBBBBBBBBBB+
396 * +-------------+
397 * fbno fend
398 *
399 * Case 4:
400 * bbno bend
401 * +BBBBBBBBBBBBBBBBB+
402 * +-----------------+
403 * fbno fend
404 *
405 * No unbusy region in extent, return failure.
406 */
407 if (fend <= bend)
408 goto fail;
409
410 /*
411 * Case 5:
412 * bbno bend
413 * +BBBBBBBBBBBBBBBBB+
414 * +----------------------+
415 * fbno fend
416 *
417 * Case 6:
418 * bbno bend
419 * +BBBBBBBBBBBBBBBBB+
420 * +--------------------------+
421 * fbno fend
422 *
423 * Needs to be trimmed to:
424 * +-------+
425 * fbno fend
426 */
427 fbno = bend;
428 } else if (bend >= fend) {
429 /* end overlap */
430
431 /*
432 * Case 7:
433 * bbno bend
434 * +BBBBBBBBBBBBBBBBB+
435 * +------------------+
436 * fbno fend
437 *
438 * Case 8:
439 * bbno bend
440 * +BBBBBBBBBBBBBBBBB+
441 * +--------------------------+
442 * fbno fend
443 *
444 * Needs to be trimmed to:
445 * +-------+
446 * fbno fend
447 */
448 fend = bbno;
449 } else {
450 /* middle overlap */
451
452 /*
453 * Case 9:
454 * bbno bend
455 * +BBBBBBBBBBBBBBBBB+
456 * +-----------------------------------+
457 * fbno fend
458 *
459 * Can be trimmed to:
460 * +-------+ OR +-------+
461 * fbno fend fbno fend
462 *
463 * Backward allocation leads to significant
464 * fragmentation of directories, which degrades
465 * directory performance, therefore we always want to
466 * choose the option that produces forward allocation
467 * patterns.
468 * Preferring the lower bno extent will make the next
469 * request use "fend" as the start of the next
470 * allocation; if the segment is no longer busy at
471 * that point, we'll get a contiguous allocation, but
472 * even if it is still busy, we will get a forward
473 * allocation.
474 * We try to avoid choosing the segment at "bend",
475 * because that can lead to the next allocation
476 * taking the segment at "fbno", which would be a
477 * backward allocation. We only use the segment at
478 * "fbno" if it is much larger than the current
479 * requested size, because in that case there's a
480 * good chance subsequent allocations will be
481 * contiguous.
482 */
483 if (bbno - fbno >= args->maxlen) {
484 /* left candidate fits perfect */
485 fend = bbno;
486 } else if (fend - bend >= args->maxlen * 4) {
487 /* right candidate has enough free space */
488 fbno = bend;
489 } else if (bbno - fbno >= args->minlen) {
490 /* left candidate fits minimum requirement */
491 fend = bbno;
492 } else {
493 goto fail;
494 }
495 }
496
497 flen = fend - fbno;
498 }
499out:
500
501 if (fbno != *bno || flen != *len) {
502 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
503 fbno, flen);
504 *bno = fbno;
505 *len = flen;
506 *busy_gen = args->pag->pagb_gen;
507 ret = true;
508 }
509 spin_unlock(lock: &args->pag->pagb_lock);
510 return ret;
511fail:
512 /*
513 * Return a zero extent length as failure indications. All callers
514 * re-check if the trimmed extent satisfies the minlen requirement.
515 */
516 flen = 0;
517 goto out;
518}
519
520STATIC void
521xfs_extent_busy_clear_one(
522 struct xfs_mount *mp,
523 struct xfs_perag *pag,
524 struct xfs_extent_busy *busyp)
525{
526 if (busyp->length) {
527 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
528 busyp->length);
529 rb_erase(&busyp->rb_node, &pag->pagb_tree);
530 }
531
532 list_del_init(entry: &busyp->list);
533 kmem_free(ptr: busyp);
534}
535
536static void
537xfs_extent_busy_put_pag(
538 struct xfs_perag *pag,
539 bool wakeup)
540 __releases(pag->pagb_lock)
541{
542 if (wakeup) {
543 pag->pagb_gen++;
544 wake_up_all(&pag->pagb_wait);
545 }
546
547 spin_unlock(lock: &pag->pagb_lock);
548 xfs_perag_put(pag);
549}
550
551/*
552 * Remove all extents on the passed in list from the busy extents tree.
553 * If do_discard is set skip extents that need to be discarded, and mark
554 * these as undergoing a discard operation instead.
555 */
556void
557xfs_extent_busy_clear(
558 struct xfs_mount *mp,
559 struct list_head *list,
560 bool do_discard)
561{
562 struct xfs_extent_busy *busyp, *n;
563 struct xfs_perag *pag = NULL;
564 xfs_agnumber_t agno = NULLAGNUMBER;
565 bool wakeup = false;
566
567 list_for_each_entry_safe(busyp, n, list, list) {
568 if (busyp->agno != agno) {
569 if (pag)
570 xfs_extent_busy_put_pag(pag, wakeup);
571 agno = busyp->agno;
572 pag = xfs_perag_get(mp, agno);
573 spin_lock(lock: &pag->pagb_lock);
574 wakeup = false;
575 }
576
577 if (do_discard && busyp->length &&
578 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
579 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
580 } else {
581 xfs_extent_busy_clear_one(mp, pag, busyp);
582 wakeup = true;
583 }
584 }
585
586 if (pag)
587 xfs_extent_busy_put_pag(pag, wakeup);
588}
589
590/*
591 * Flush out all busy extents for this AG.
592 *
593 * If the current transaction is holding busy extents, the caller may not want
594 * to wait for committed busy extents to resolve. If we are being told just to
595 * try a flush or progress has been made since we last skipped a busy extent,
596 * return immediately to allow the caller to try again.
597 *
598 * If we are freeing extents, we might actually be holding the only free extents
599 * in the transaction busy list and the log force won't resolve that situation.
600 * In this case, we must return -EAGAIN to avoid a deadlock by informing the
601 * caller it needs to commit the busy extents it holds before retrying the
602 * extent free operation.
603 */
604int
605xfs_extent_busy_flush(
606 struct xfs_trans *tp,
607 struct xfs_perag *pag,
608 unsigned busy_gen,
609 uint32_t alloc_flags)
610{
611 DEFINE_WAIT (wait);
612 int error;
613
614 error = xfs_log_force(mp: tp->t_mountp, XFS_LOG_SYNC);
615 if (error)
616 return error;
617
618 /* Avoid deadlocks on uncommitted busy extents. */
619 if (!list_empty(head: &tp->t_busy)) {
620 if (alloc_flags & XFS_ALLOC_FLAG_TRYFLUSH)
621 return 0;
622
623 if (busy_gen != READ_ONCE(pag->pagb_gen))
624 return 0;
625
626 if (alloc_flags & XFS_ALLOC_FLAG_FREEING)
627 return -EAGAIN;
628 }
629
630 /* Wait for committed busy extents to resolve. */
631 do {
632 prepare_to_wait(wq_head: &pag->pagb_wait, wq_entry: &wait, TASK_KILLABLE);
633 if (busy_gen != READ_ONCE(pag->pagb_gen))
634 break;
635 schedule();
636 } while (1);
637
638 finish_wait(wq_head: &pag->pagb_wait, wq_entry: &wait);
639 return 0;
640}
641
642void
643xfs_extent_busy_wait_all(
644 struct xfs_mount *mp)
645{
646 struct xfs_perag *pag;
647 DEFINE_WAIT (wait);
648 xfs_agnumber_t agno;
649
650 for_each_perag(mp, agno, pag) {
651 do {
652 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
653 if (RB_EMPTY_ROOT(&pag->pagb_tree))
654 break;
655 schedule();
656 } while (1);
657 finish_wait(&pag->pagb_wait, &wait);
658 }
659}
660
661/*
662 * Callback for list_sort to sort busy extents by the AG they reside in.
663 */
664int
665xfs_extent_busy_ag_cmp(
666 void *priv,
667 const struct list_head *l1,
668 const struct list_head *l2)
669{
670 struct xfs_extent_busy *b1 =
671 container_of(l1, struct xfs_extent_busy, list);
672 struct xfs_extent_busy *b2 =
673 container_of(l2, struct xfs_extent_busy, list);
674 s32 diff;
675
676 diff = b1->agno - b2->agno;
677 if (!diff)
678 diff = b1->bno - b2->bno;
679 return diff;
680}
681

source code of linux/fs/xfs/xfs_extent_busy.c