1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2010 David Chinner.
5 * Copyright (c) 2011 Christoph Hellwig.
6 * All Rights Reserved.
7 */
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_shared.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_alloc.h"
17 #include "xfs_extent_busy.h"
18 #include "xfs_trace.h"
19 #include "xfs_trans.h"
20 #include "xfs_log.h"
21
22 void
xfs_extent_busy_insert(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,unsigned int flags)23 xfs_extent_busy_insert(
24 struct xfs_trans *tp,
25 xfs_agnumber_t agno,
26 xfs_agblock_t bno,
27 xfs_extlen_t len,
28 unsigned int flags)
29 {
30 struct xfs_extent_busy *new;
31 struct xfs_extent_busy *busyp;
32 struct xfs_perag *pag;
33 struct rb_node **rbp;
34 struct rb_node *parent = NULL;
35
36 new = kmem_zalloc(sizeof(struct xfs_extent_busy), 0);
37 new->agno = agno;
38 new->bno = bno;
39 new->length = len;
40 INIT_LIST_HEAD(&new->list);
41 new->flags = flags;
42
43 /* trace before insert to be able to see failed inserts */
44 trace_xfs_extent_busy(tp->t_mountp, agno, bno, len);
45
46 pag = xfs_perag_get(tp->t_mountp, new->agno);
47 spin_lock(&pag->pagb_lock);
48 rbp = &pag->pagb_tree.rb_node;
49 while (*rbp) {
50 parent = *rbp;
51 busyp = rb_entry(parent, struct xfs_extent_busy, rb_node);
52
53 if (new->bno < busyp->bno) {
54 rbp = &(*rbp)->rb_left;
55 ASSERT(new->bno + new->length <= busyp->bno);
56 } else if (new->bno > busyp->bno) {
57 rbp = &(*rbp)->rb_right;
58 ASSERT(bno >= busyp->bno + busyp->length);
59 } else {
60 ASSERT(0);
61 }
62 }
63
64 rb_link_node(&new->rb_node, parent, rbp);
65 rb_insert_color(&new->rb_node, &pag->pagb_tree);
66
67 list_add(&new->list, &tp->t_busy);
68 spin_unlock(&pag->pagb_lock);
69 xfs_perag_put(pag);
70 }
71
72 /*
73 * Search for a busy extent within the range of the extent we are about to
74 * allocate. You need to be holding the busy extent tree lock when calling
75 * xfs_extent_busy_search(). This function returns 0 for no overlapping busy
76 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
77 * match. This is done so that a non-zero return indicates an overlap that
78 * will require a synchronous transaction, but it can still be
79 * used to distinguish between a partial or exact match.
80 */
81 int
xfs_extent_busy_search(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len)82 xfs_extent_busy_search(
83 struct xfs_mount *mp,
84 xfs_agnumber_t agno,
85 xfs_agblock_t bno,
86 xfs_extlen_t len)
87 {
88 struct xfs_perag *pag;
89 struct rb_node *rbp;
90 struct xfs_extent_busy *busyp;
91 int match = 0;
92
93 pag = xfs_perag_get(mp, agno);
94 spin_lock(&pag->pagb_lock);
95
96 rbp = pag->pagb_tree.rb_node;
97
98 /* find closest start bno overlap */
99 while (rbp) {
100 busyp = rb_entry(rbp, struct xfs_extent_busy, rb_node);
101 if (bno < busyp->bno) {
102 /* may overlap, but exact start block is lower */
103 if (bno + len > busyp->bno)
104 match = -1;
105 rbp = rbp->rb_left;
106 } else if (bno > busyp->bno) {
107 /* may overlap, but exact start block is higher */
108 if (bno < busyp->bno + busyp->length)
109 match = -1;
110 rbp = rbp->rb_right;
111 } else {
112 /* bno matches busyp, length determines exact match */
113 match = (busyp->length == len) ? 1 : -1;
114 break;
115 }
116 }
117 spin_unlock(&pag->pagb_lock);
118 xfs_perag_put(pag);
119 return match;
120 }
121
122 /*
123 * The found free extent [fbno, fend] overlaps part or all of the given busy
124 * extent. If the overlap covers the beginning, the end, or all of the busy
125 * extent, the overlapping portion can be made unbusy and used for the
126 * allocation. We can't split a busy extent because we can't modify a
127 * transaction/CIL context busy list, but we can update an entry's block
128 * number or length.
129 *
130 * Returns true if the extent can safely be reused, or false if the search
131 * needs to be restarted.
132 */
133 STATIC bool
xfs_extent_busy_update_extent(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_extent_busy * busyp,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)134 xfs_extent_busy_update_extent(
135 struct xfs_mount *mp,
136 struct xfs_perag *pag,
137 struct xfs_extent_busy *busyp,
138 xfs_agblock_t fbno,
139 xfs_extlen_t flen,
140 bool userdata) __releases(&pag->pagb_lock)
141 __acquires(&pag->pagb_lock)
142 {
143 xfs_agblock_t fend = fbno + flen;
144 xfs_agblock_t bbno = busyp->bno;
145 xfs_agblock_t bend = bbno + busyp->length;
146
147 /*
148 * This extent is currently being discarded. Give the thread
149 * performing the discard a chance to mark the extent unbusy
150 * and retry.
151 */
152 if (busyp->flags & XFS_EXTENT_BUSY_DISCARDED) {
153 spin_unlock(&pag->pagb_lock);
154 delay(1);
155 spin_lock(&pag->pagb_lock);
156 return false;
157 }
158
159 /*
160 * If there is a busy extent overlapping a user allocation, we have
161 * no choice but to force the log and retry the search.
162 *
163 * Fortunately this does not happen during normal operation, but
164 * only if the filesystem is very low on space and has to dip into
165 * the AGFL for normal allocations.
166 */
167 if (userdata)
168 goto out_force_log;
169
170 if (bbno < fbno && bend > fend) {
171 /*
172 * Case 1:
173 * bbno bend
174 * +BBBBBBBBBBBBBBBBB+
175 * +---------+
176 * fbno fend
177 */
178
179 /*
180 * We would have to split the busy extent to be able to track
181 * it correct, which we cannot do because we would have to
182 * modify the list of busy extents attached to the transaction
183 * or CIL context, which is immutable.
184 *
185 * Force out the log to clear the busy extent and retry the
186 * search.
187 */
188 goto out_force_log;
189 } else if (bbno >= fbno && bend <= fend) {
190 /*
191 * Case 2:
192 * bbno bend
193 * +BBBBBBBBBBBBBBBBB+
194 * +-----------------+
195 * fbno fend
196 *
197 * Case 3:
198 * bbno bend
199 * +BBBBBBBBBBBBBBBBB+
200 * +--------------------------+
201 * fbno fend
202 *
203 * Case 4:
204 * bbno bend
205 * +BBBBBBBBBBBBBBBBB+
206 * +--------------------------+
207 * fbno fend
208 *
209 * Case 5:
210 * bbno bend
211 * +BBBBBBBBBBBBBBBBB+
212 * +-----------------------------------+
213 * fbno fend
214 *
215 */
216
217 /*
218 * The busy extent is fully covered by the extent we are
219 * allocating, and can simply be removed from the rbtree.
220 * However we cannot remove it from the immutable list
221 * tracking busy extents in the transaction or CIL context,
222 * so set the length to zero to mark it invalid.
223 *
224 * We also need to restart the busy extent search from the
225 * tree root, because erasing the node can rearrange the
226 * tree topology.
227 */
228 rb_erase(&busyp->rb_node, &pag->pagb_tree);
229 busyp->length = 0;
230 return false;
231 } else if (fend < bend) {
232 /*
233 * Case 6:
234 * bbno bend
235 * +BBBBBBBBBBBBBBBBB+
236 * +---------+
237 * fbno fend
238 *
239 * Case 7:
240 * bbno bend
241 * +BBBBBBBBBBBBBBBBB+
242 * +------------------+
243 * fbno fend
244 *
245 */
246 busyp->bno = fend;
247 } else if (bbno < fbno) {
248 /*
249 * Case 8:
250 * bbno bend
251 * +BBBBBBBBBBBBBBBBB+
252 * +-------------+
253 * fbno fend
254 *
255 * Case 9:
256 * bbno bend
257 * +BBBBBBBBBBBBBBBBB+
258 * +----------------------+
259 * fbno fend
260 */
261 busyp->length = fbno - busyp->bno;
262 } else {
263 ASSERT(0);
264 }
265
266 trace_xfs_extent_busy_reuse(mp, pag->pag_agno, fbno, flen);
267 return true;
268
269 out_force_log:
270 spin_unlock(&pag->pagb_lock);
271 xfs_log_force(mp, XFS_LOG_SYNC);
272 trace_xfs_extent_busy_force(mp, pag->pag_agno, fbno, flen);
273 spin_lock(&pag->pagb_lock);
274 return false;
275 }
276
277
278 /*
279 * For a given extent [fbno, flen], make sure we can reuse it safely.
280 */
281 void
xfs_extent_busy_reuse(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)282 xfs_extent_busy_reuse(
283 struct xfs_mount *mp,
284 xfs_agnumber_t agno,
285 xfs_agblock_t fbno,
286 xfs_extlen_t flen,
287 bool userdata)
288 {
289 struct xfs_perag *pag;
290 struct rb_node *rbp;
291
292 ASSERT(flen > 0);
293
294 pag = xfs_perag_get(mp, agno);
295 spin_lock(&pag->pagb_lock);
296 restart:
297 rbp = pag->pagb_tree.rb_node;
298 while (rbp) {
299 struct xfs_extent_busy *busyp =
300 rb_entry(rbp, struct xfs_extent_busy, rb_node);
301 xfs_agblock_t bbno = busyp->bno;
302 xfs_agblock_t bend = bbno + busyp->length;
303
304 if (fbno + flen <= bbno) {
305 rbp = rbp->rb_left;
306 continue;
307 } else if (fbno >= bend) {
308 rbp = rbp->rb_right;
309 continue;
310 }
311
312 if (!xfs_extent_busy_update_extent(mp, pag, busyp, fbno, flen,
313 userdata))
314 goto restart;
315 }
316 spin_unlock(&pag->pagb_lock);
317 xfs_perag_put(pag);
318 }
319
320 /*
321 * For a given extent [fbno, flen], search the busy extent list to find a
322 * subset of the extent that is not busy. If *rlen is smaller than
323 * args->minlen no suitable extent could be found, and the higher level
324 * code needs to force out the log and retry the allocation.
325 *
326 * Return the current busy generation for the AG if the extent is busy. This
327 * value can be used to wait for at least one of the currently busy extents
328 * to be cleared. Note that the busy list is not guaranteed to be empty after
329 * the gen is woken. The state of a specific extent must always be confirmed
330 * with another call to xfs_extent_busy_trim() before it can be used.
331 */
332 bool
xfs_extent_busy_trim(struct xfs_alloc_arg * args,xfs_agblock_t * bno,xfs_extlen_t * len,unsigned * busy_gen)333 xfs_extent_busy_trim(
334 struct xfs_alloc_arg *args,
335 xfs_agblock_t *bno,
336 xfs_extlen_t *len,
337 unsigned *busy_gen)
338 {
339 xfs_agblock_t fbno;
340 xfs_extlen_t flen;
341 struct rb_node *rbp;
342 bool ret = false;
343
344 ASSERT(*len > 0);
345
346 spin_lock(&args->pag->pagb_lock);
347 fbno = *bno;
348 flen = *len;
349 rbp = args->pag->pagb_tree.rb_node;
350 while (rbp && flen >= args->minlen) {
351 struct xfs_extent_busy *busyp =
352 rb_entry(rbp, struct xfs_extent_busy, rb_node);
353 xfs_agblock_t fend = fbno + flen;
354 xfs_agblock_t bbno = busyp->bno;
355 xfs_agblock_t bend = bbno + busyp->length;
356
357 if (fend <= bbno) {
358 rbp = rbp->rb_left;
359 continue;
360 } else if (fbno >= bend) {
361 rbp = rbp->rb_right;
362 continue;
363 }
364
365 if (bbno <= fbno) {
366 /* start overlap */
367
368 /*
369 * Case 1:
370 * bbno bend
371 * +BBBBBBBBBBBBBBBBB+
372 * +---------+
373 * fbno fend
374 *
375 * Case 2:
376 * bbno bend
377 * +BBBBBBBBBBBBBBBBB+
378 * +-------------+
379 * fbno fend
380 *
381 * Case 3:
382 * bbno bend
383 * +BBBBBBBBBBBBBBBBB+
384 * +-------------+
385 * fbno fend
386 *
387 * Case 4:
388 * bbno bend
389 * +BBBBBBBBBBBBBBBBB+
390 * +-----------------+
391 * fbno fend
392 *
393 * No unbusy region in extent, return failure.
394 */
395 if (fend <= bend)
396 goto fail;
397
398 /*
399 * Case 5:
400 * bbno bend
401 * +BBBBBBBBBBBBBBBBB+
402 * +----------------------+
403 * fbno fend
404 *
405 * Case 6:
406 * bbno bend
407 * +BBBBBBBBBBBBBBBBB+
408 * +--------------------------+
409 * fbno fend
410 *
411 * Needs to be trimmed to:
412 * +-------+
413 * fbno fend
414 */
415 fbno = bend;
416 } else if (bend >= fend) {
417 /* end overlap */
418
419 /*
420 * Case 7:
421 * bbno bend
422 * +BBBBBBBBBBBBBBBBB+
423 * +------------------+
424 * fbno fend
425 *
426 * Case 8:
427 * bbno bend
428 * +BBBBBBBBBBBBBBBBB+
429 * +--------------------------+
430 * fbno fend
431 *
432 * Needs to be trimmed to:
433 * +-------+
434 * fbno fend
435 */
436 fend = bbno;
437 } else {
438 /* middle overlap */
439
440 /*
441 * Case 9:
442 * bbno bend
443 * +BBBBBBBBBBBBBBBBB+
444 * +-----------------------------------+
445 * fbno fend
446 *
447 * Can be trimmed to:
448 * +-------+ OR +-------+
449 * fbno fend fbno fend
450 *
451 * Backward allocation leads to significant
452 * fragmentation of directories, which degrades
453 * directory performance, therefore we always want to
454 * choose the option that produces forward allocation
455 * patterns.
456 * Preferring the lower bno extent will make the next
457 * request use "fend" as the start of the next
458 * allocation; if the segment is no longer busy at
459 * that point, we'll get a contiguous allocation, but
460 * even if it is still busy, we will get a forward
461 * allocation.
462 * We try to avoid choosing the segment at "bend",
463 * because that can lead to the next allocation
464 * taking the segment at "fbno", which would be a
465 * backward allocation. We only use the segment at
466 * "fbno" if it is much larger than the current
467 * requested size, because in that case there's a
468 * good chance subsequent allocations will be
469 * contiguous.
470 */
471 if (bbno - fbno >= args->maxlen) {
472 /* left candidate fits perfect */
473 fend = bbno;
474 } else if (fend - bend >= args->maxlen * 4) {
475 /* right candidate has enough free space */
476 fbno = bend;
477 } else if (bbno - fbno >= args->minlen) {
478 /* left candidate fits minimum requirement */
479 fend = bbno;
480 } else {
481 goto fail;
482 }
483 }
484
485 flen = fend - fbno;
486 }
487 out:
488
489 if (fbno != *bno || flen != *len) {
490 trace_xfs_extent_busy_trim(args->mp, args->agno, *bno, *len,
491 fbno, flen);
492 *bno = fbno;
493 *len = flen;
494 *busy_gen = args->pag->pagb_gen;
495 ret = true;
496 }
497 spin_unlock(&args->pag->pagb_lock);
498 return ret;
499 fail:
500 /*
501 * Return a zero extent length as failure indications. All callers
502 * re-check if the trimmed extent satisfies the minlen requirement.
503 */
504 flen = 0;
505 goto out;
506 }
507
508 STATIC void
xfs_extent_busy_clear_one(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_extent_busy * busyp)509 xfs_extent_busy_clear_one(
510 struct xfs_mount *mp,
511 struct xfs_perag *pag,
512 struct xfs_extent_busy *busyp)
513 {
514 if (busyp->length) {
515 trace_xfs_extent_busy_clear(mp, busyp->agno, busyp->bno,
516 busyp->length);
517 rb_erase(&busyp->rb_node, &pag->pagb_tree);
518 }
519
520 list_del_init(&busyp->list);
521 kmem_free(busyp);
522 }
523
524 static void
xfs_extent_busy_put_pag(struct xfs_perag * pag,bool wakeup)525 xfs_extent_busy_put_pag(
526 struct xfs_perag *pag,
527 bool wakeup)
528 __releases(pag->pagb_lock)
529 {
530 if (wakeup) {
531 pag->pagb_gen++;
532 wake_up_all(&pag->pagb_wait);
533 }
534
535 spin_unlock(&pag->pagb_lock);
536 xfs_perag_put(pag);
537 }
538
539 /*
540 * Remove all extents on the passed in list from the busy extents tree.
541 * If do_discard is set skip extents that need to be discarded, and mark
542 * these as undergoing a discard operation instead.
543 */
544 void
xfs_extent_busy_clear(struct xfs_mount * mp,struct list_head * list,bool do_discard)545 xfs_extent_busy_clear(
546 struct xfs_mount *mp,
547 struct list_head *list,
548 bool do_discard)
549 {
550 struct xfs_extent_busy *busyp, *n;
551 struct xfs_perag *pag = NULL;
552 xfs_agnumber_t agno = NULLAGNUMBER;
553 bool wakeup = false;
554
555 list_for_each_entry_safe(busyp, n, list, list) {
556 if (busyp->agno != agno) {
557 if (pag)
558 xfs_extent_busy_put_pag(pag, wakeup);
559 agno = busyp->agno;
560 pag = xfs_perag_get(mp, agno);
561 spin_lock(&pag->pagb_lock);
562 wakeup = false;
563 }
564
565 if (do_discard && busyp->length &&
566 !(busyp->flags & XFS_EXTENT_BUSY_SKIP_DISCARD)) {
567 busyp->flags = XFS_EXTENT_BUSY_DISCARDED;
568 } else {
569 xfs_extent_busy_clear_one(mp, pag, busyp);
570 wakeup = true;
571 }
572 }
573
574 if (pag)
575 xfs_extent_busy_put_pag(pag, wakeup);
576 }
577
578 /*
579 * Flush out all busy extents for this AG.
580 */
581 void
xfs_extent_busy_flush(struct xfs_mount * mp,struct xfs_perag * pag,unsigned busy_gen)582 xfs_extent_busy_flush(
583 struct xfs_mount *mp,
584 struct xfs_perag *pag,
585 unsigned busy_gen)
586 {
587 DEFINE_WAIT (wait);
588 int error;
589
590 error = xfs_log_force(mp, XFS_LOG_SYNC);
591 if (error)
592 return;
593
594 do {
595 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
596 if (busy_gen != READ_ONCE(pag->pagb_gen))
597 break;
598 schedule();
599 } while (1);
600
601 finish_wait(&pag->pagb_wait, &wait);
602 }
603
604 void
xfs_extent_busy_wait_all(struct xfs_mount * mp)605 xfs_extent_busy_wait_all(
606 struct xfs_mount *mp)
607 {
608 DEFINE_WAIT (wait);
609 xfs_agnumber_t agno;
610
611 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
612 struct xfs_perag *pag = xfs_perag_get(mp, agno);
613
614 do {
615 prepare_to_wait(&pag->pagb_wait, &wait, TASK_KILLABLE);
616 if (RB_EMPTY_ROOT(&pag->pagb_tree))
617 break;
618 schedule();
619 } while (1);
620 finish_wait(&pag->pagb_wait, &wait);
621
622 xfs_perag_put(pag);
623 }
624 }
625
626 /*
627 * Callback for list_sort to sort busy extents by the AG they reside in.
628 */
629 int
xfs_extent_busy_ag_cmp(void * priv,struct list_head * l1,struct list_head * l2)630 xfs_extent_busy_ag_cmp(
631 void *priv,
632 struct list_head *l1,
633 struct list_head *l2)
634 {
635 struct xfs_extent_busy *b1 =
636 container_of(l1, struct xfs_extent_busy, list);
637 struct xfs_extent_busy *b2 =
638 container_of(l2, struct xfs_extent_busy, list);
639 s32 diff;
640
641 diff = b1->agno - b2->agno;
642 if (!diff)
643 diff = b1->bno - b2->bno;
644 return diff;
645 }
646