• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_shared.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_bit.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_btree.h"
17 #include "xfs_rmap.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_extent_busy.h"
21 #include "xfs_errortag.h"
22 #include "xfs_error.h"
23 #include "xfs_trace.h"
24 #include "xfs_trans.h"
25 #include "xfs_buf_item.h"
26 #include "xfs_log.h"
27 #include "xfs_ag_resv.h"
28 #include "xfs_bmap.h"
29 
30 extern kmem_zone_t	*xfs_bmap_free_item_zone;
31 
32 struct workqueue_struct *xfs_alloc_wq;
33 
34 #define XFS_ABSDIFF(a,b)	(((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
35 
36 #define	XFSA_FIXUP_BNO_OK	1
37 #define	XFSA_FIXUP_CNT_OK	2
38 
39 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
40 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
41 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
42 
43 /*
44  * Size of the AGFL.  For CRC-enabled filesystes we steal a couple of slots in
45  * the beginning of the block for a proper header with the location information
46  * and CRC.
47  */
48 unsigned int
xfs_agfl_size(struct xfs_mount * mp)49 xfs_agfl_size(
50 	struct xfs_mount	*mp)
51 {
52 	unsigned int		size = mp->m_sb.sb_sectsize;
53 
54 	if (xfs_sb_version_hascrc(&mp->m_sb))
55 		size -= sizeof(struct xfs_agfl);
56 
57 	return size / sizeof(xfs_agblock_t);
58 }
59 
60 unsigned int
xfs_refc_block(struct xfs_mount * mp)61 xfs_refc_block(
62 	struct xfs_mount	*mp)
63 {
64 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
65 		return XFS_RMAP_BLOCK(mp) + 1;
66 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
67 		return XFS_FIBT_BLOCK(mp) + 1;
68 	return XFS_IBT_BLOCK(mp) + 1;
69 }
70 
71 xfs_extlen_t
xfs_prealloc_blocks(struct xfs_mount * mp)72 xfs_prealloc_blocks(
73 	struct xfs_mount	*mp)
74 {
75 	if (xfs_sb_version_hasreflink(&mp->m_sb))
76 		return xfs_refc_block(mp) + 1;
77 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
78 		return XFS_RMAP_BLOCK(mp) + 1;
79 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
80 		return XFS_FIBT_BLOCK(mp) + 1;
81 	return XFS_IBT_BLOCK(mp) + 1;
82 }
83 
84 /*
85  * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
86  * AGF buffer (PV 947395), we place constraints on the relationship among
87  * actual allocations for data blocks, freelist blocks, and potential file data
88  * bmap btree blocks. However, these restrictions may result in no actual space
89  * allocated for a delayed extent, for example, a data block in a certain AG is
90  * allocated but there is no additional block for the additional bmap btree
91  * block due to a split of the bmap btree of the file. The result of this may
92  * lead to an infinite loop when the file gets flushed to disk and all delayed
93  * extents need to be actually allocated. To get around this, we explicitly set
94  * aside a few blocks which will not be reserved in delayed allocation.
95  *
96  * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
97  * potential split of the file's bmap btree.
98  */
99 unsigned int
xfs_alloc_set_aside(struct xfs_mount * mp)100 xfs_alloc_set_aside(
101 	struct xfs_mount	*mp)
102 {
103 	return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
104 }
105 
106 /*
107  * When deciding how much space to allocate out of an AG, we limit the
108  * allocation maximum size to the size the AG. However, we cannot use all the
109  * blocks in the AG - some are permanently used by metadata. These
110  * blocks are generally:
111  *	- the AG superblock, AGF, AGI and AGFL
112  *	- the AGF (bno and cnt) and AGI btree root blocks, and optionally
113  *	  the AGI free inode and rmap btree root blocks.
114  *	- blocks on the AGFL according to xfs_alloc_set_aside() limits
115  *	- the rmapbt root block
116  *
117  * The AG headers are sector sized, so the amount of space they take up is
118  * dependent on filesystem geometry. The others are all single blocks.
119  */
120 unsigned int
xfs_alloc_ag_max_usable(struct xfs_mount * mp)121 xfs_alloc_ag_max_usable(
122 	struct xfs_mount	*mp)
123 {
124 	unsigned int		blocks;
125 
126 	blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
127 	blocks += XFS_ALLOC_AGFL_RESERVE;
128 	blocks += 3;			/* AGF, AGI btree root blocks */
129 	if (xfs_sb_version_hasfinobt(&mp->m_sb))
130 		blocks++;		/* finobt root block */
131 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
132 		blocks++; 		/* rmap root block */
133 	if (xfs_sb_version_hasreflink(&mp->m_sb))
134 		blocks++;		/* refcount root block */
135 
136 	return mp->m_sb.sb_agblocks - blocks;
137 }
138 
139 /*
140  * Lookup the record equal to [bno, len] in the btree given by cur.
141  */
142 STATIC int				/* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)143 xfs_alloc_lookup_eq(
144 	struct xfs_btree_cur	*cur,	/* btree cursor */
145 	xfs_agblock_t		bno,	/* starting block of extent */
146 	xfs_extlen_t		len,	/* length of extent */
147 	int			*stat)	/* success/failure */
148 {
149 	cur->bc_rec.a.ar_startblock = bno;
150 	cur->bc_rec.a.ar_blockcount = len;
151 	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
152 }
153 
154 /*
155  * Lookup the first record greater than or equal to [bno, len]
156  * in the btree given by cur.
157  */
158 int				/* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)159 xfs_alloc_lookup_ge(
160 	struct xfs_btree_cur	*cur,	/* btree cursor */
161 	xfs_agblock_t		bno,	/* starting block of extent */
162 	xfs_extlen_t		len,	/* length of extent */
163 	int			*stat)	/* success/failure */
164 {
165 	cur->bc_rec.a.ar_startblock = bno;
166 	cur->bc_rec.a.ar_blockcount = len;
167 	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
168 }
169 
170 /*
171  * Lookup the first record less than or equal to [bno, len]
172  * in the btree given by cur.
173  */
174 int					/* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)175 xfs_alloc_lookup_le(
176 	struct xfs_btree_cur	*cur,	/* btree cursor */
177 	xfs_agblock_t		bno,	/* starting block of extent */
178 	xfs_extlen_t		len,	/* length of extent */
179 	int			*stat)	/* success/failure */
180 {
181 	cur->bc_rec.a.ar_startblock = bno;
182 	cur->bc_rec.a.ar_blockcount = len;
183 	return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
184 }
185 
186 /*
187  * Update the record referred to by cur to the value given
188  * by [bno, len].
189  * This either works (return 0) or gets an EFSCORRUPTED error.
190  */
191 STATIC int				/* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)192 xfs_alloc_update(
193 	struct xfs_btree_cur	*cur,	/* btree cursor */
194 	xfs_agblock_t		bno,	/* starting block of extent */
195 	xfs_extlen_t		len)	/* length of extent */
196 {
197 	union xfs_btree_rec	rec;
198 
199 	rec.alloc.ar_startblock = cpu_to_be32(bno);
200 	rec.alloc.ar_blockcount = cpu_to_be32(len);
201 	return xfs_btree_update(cur, &rec);
202 }
203 
204 /*
205  * Get the data from the pointed-to record.
206  */
207 int					/* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)208 xfs_alloc_get_rec(
209 	struct xfs_btree_cur	*cur,	/* btree cursor */
210 	xfs_agblock_t		*bno,	/* output: starting block of extent */
211 	xfs_extlen_t		*len,	/* output: length of extent */
212 	int			*stat)	/* output: success/failure */
213 {
214 	struct xfs_mount	*mp = cur->bc_mp;
215 	xfs_agnumber_t		agno = cur->bc_private.a.agno;
216 	union xfs_btree_rec	*rec;
217 	int			error;
218 
219 	error = xfs_btree_get_rec(cur, &rec, stat);
220 	if (error || !(*stat))
221 		return error;
222 
223 	*bno = be32_to_cpu(rec->alloc.ar_startblock);
224 	*len = be32_to_cpu(rec->alloc.ar_blockcount);
225 
226 	if (*len == 0)
227 		goto out_bad_rec;
228 
229 	/* check for valid extent range, including overflow */
230 	if (!xfs_verify_agbno(mp, agno, *bno))
231 		goto out_bad_rec;
232 	if (*bno > *bno + *len)
233 		goto out_bad_rec;
234 	if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
235 		goto out_bad_rec;
236 
237 	return 0;
238 
239 out_bad_rec:
240 	xfs_warn(mp,
241 		"%s Freespace BTree record corruption in AG %d detected!",
242 		cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
243 	xfs_warn(mp,
244 		"start block 0x%x block count 0x%x", *bno, *len);
245 	return -EFSCORRUPTED;
246 }
247 
248 /*
249  * Compute aligned version of the found extent.
250  * Takes alignment and min length into account.
251  */
252 STATIC bool
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen,unsigned * busy_gen)253 xfs_alloc_compute_aligned(
254 	xfs_alloc_arg_t	*args,		/* allocation argument structure */
255 	xfs_agblock_t	foundbno,	/* starting block in found extent */
256 	xfs_extlen_t	foundlen,	/* length in found extent */
257 	xfs_agblock_t	*resbno,	/* result block number */
258 	xfs_extlen_t	*reslen,	/* result length */
259 	unsigned	*busy_gen)
260 {
261 	xfs_agblock_t	bno = foundbno;
262 	xfs_extlen_t	len = foundlen;
263 	xfs_extlen_t	diff;
264 	bool		busy;
265 
266 	/* Trim busy sections out of found extent */
267 	busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
268 
269 	/*
270 	 * If we have a largish extent that happens to start before min_agbno,
271 	 * see if we can shift it into range...
272 	 */
273 	if (bno < args->min_agbno && bno + len > args->min_agbno) {
274 		diff = args->min_agbno - bno;
275 		if (len > diff) {
276 			bno += diff;
277 			len -= diff;
278 		}
279 	}
280 
281 	if (args->alignment > 1 && len >= args->minlen) {
282 		xfs_agblock_t	aligned_bno = roundup(bno, args->alignment);
283 
284 		diff = aligned_bno - bno;
285 
286 		*resbno = aligned_bno;
287 		*reslen = diff >= len ? 0 : len - diff;
288 	} else {
289 		*resbno = bno;
290 		*reslen = len;
291 	}
292 
293 	return busy;
294 }
295 
296 /*
297  * Compute best start block and diff for "near" allocations.
298  * freelen >= wantlen already checked by caller.
299  */
300 STATIC xfs_extlen_t			/* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,int datatype,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)301 xfs_alloc_compute_diff(
302 	xfs_agblock_t	wantbno,	/* target starting block */
303 	xfs_extlen_t	wantlen,	/* target length */
304 	xfs_extlen_t	alignment,	/* target alignment */
305 	int		datatype,	/* are we allocating data? */
306 	xfs_agblock_t	freebno,	/* freespace's starting block */
307 	xfs_extlen_t	freelen,	/* freespace's length */
308 	xfs_agblock_t	*newbnop)	/* result: best start block from free */
309 {
310 	xfs_agblock_t	freeend;	/* end of freespace extent */
311 	xfs_agblock_t	newbno1;	/* return block number */
312 	xfs_agblock_t	newbno2;	/* other new block number */
313 	xfs_extlen_t	newlen1=0;	/* length with newbno1 */
314 	xfs_extlen_t	newlen2=0;	/* length with newbno2 */
315 	xfs_agblock_t	wantend;	/* end of target extent */
316 	bool		userdata = xfs_alloc_is_userdata(datatype);
317 
318 	ASSERT(freelen >= wantlen);
319 	freeend = freebno + freelen;
320 	wantend = wantbno + wantlen;
321 	/*
322 	 * We want to allocate from the start of a free extent if it is past
323 	 * the desired block or if we are allocating user data and the free
324 	 * extent is before desired block. The second case is there to allow
325 	 * for contiguous allocation from the remaining free space if the file
326 	 * grows in the short term.
327 	 */
328 	if (freebno >= wantbno || (userdata && freeend < wantend)) {
329 		if ((newbno1 = roundup(freebno, alignment)) >= freeend)
330 			newbno1 = NULLAGBLOCK;
331 	} else if (freeend >= wantend && alignment > 1) {
332 		newbno1 = roundup(wantbno, alignment);
333 		newbno2 = newbno1 - alignment;
334 		if (newbno1 >= freeend)
335 			newbno1 = NULLAGBLOCK;
336 		else
337 			newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
338 		if (newbno2 < freebno)
339 			newbno2 = NULLAGBLOCK;
340 		else
341 			newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
342 		if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
343 			if (newlen1 < newlen2 ||
344 			    (newlen1 == newlen2 &&
345 			     XFS_ABSDIFF(newbno1, wantbno) >
346 			     XFS_ABSDIFF(newbno2, wantbno)))
347 				newbno1 = newbno2;
348 		} else if (newbno2 != NULLAGBLOCK)
349 			newbno1 = newbno2;
350 	} else if (freeend >= wantend) {
351 		newbno1 = wantbno;
352 	} else if (alignment > 1) {
353 		newbno1 = roundup(freeend - wantlen, alignment);
354 		if (newbno1 > freeend - wantlen &&
355 		    newbno1 - alignment >= freebno)
356 			newbno1 -= alignment;
357 		else if (newbno1 >= freeend)
358 			newbno1 = NULLAGBLOCK;
359 	} else
360 		newbno1 = freeend - wantlen;
361 	*newbnop = newbno1;
362 	return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
363 }
364 
365 /*
366  * Fix up the length, based on mod and prod.
367  * len should be k * prod + mod for some k.
368  * If len is too small it is returned unchanged.
369  * If len hits maxlen it is left alone.
370  */
371 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)372 xfs_alloc_fix_len(
373 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
374 {
375 	xfs_extlen_t	k;
376 	xfs_extlen_t	rlen;
377 
378 	ASSERT(args->mod < args->prod);
379 	rlen = args->len;
380 	ASSERT(rlen >= args->minlen);
381 	ASSERT(rlen <= args->maxlen);
382 	if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
383 	    (args->mod == 0 && rlen < args->prod))
384 		return;
385 	k = rlen % args->prod;
386 	if (k == args->mod)
387 		return;
388 	if (k > args->mod)
389 		rlen = rlen - (k - args->mod);
390 	else
391 		rlen = rlen - args->prod + (args->mod - k);
392 	/* casts to (int) catch length underflows */
393 	if ((int)rlen < (int)args->minlen)
394 		return;
395 	ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
396 	ASSERT(rlen % args->prod == args->mod);
397 	ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
398 		rlen + args->minleft);
399 	args->len = rlen;
400 }
401 
402 /*
403  * Update the two btrees, logically removing from freespace the extent
404  * starting at rbno, rlen blocks.  The extent is contained within the
405  * actual (current) free extent fbno for flen blocks.
406  * Flags are passed in indicating whether the cursors are set to the
407  * relevant records.
408  */
409 STATIC int				/* error code */
xfs_alloc_fixup_trees(xfs_btree_cur_t * cnt_cur,xfs_btree_cur_t * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)410 xfs_alloc_fixup_trees(
411 	xfs_btree_cur_t	*cnt_cur,	/* cursor for by-size btree */
412 	xfs_btree_cur_t	*bno_cur,	/* cursor for by-block btree */
413 	xfs_agblock_t	fbno,		/* starting block of free extent */
414 	xfs_extlen_t	flen,		/* length of free extent */
415 	xfs_agblock_t	rbno,		/* starting block of returned extent */
416 	xfs_extlen_t	rlen,		/* length of returned extent */
417 	int		flags)		/* flags, XFSA_FIXUP_... */
418 {
419 	int		error;		/* error code */
420 	int		i;		/* operation results */
421 	xfs_agblock_t	nfbno1;		/* first new free startblock */
422 	xfs_agblock_t	nfbno2;		/* second new free startblock */
423 	xfs_extlen_t	nflen1=0;	/* first new free length */
424 	xfs_extlen_t	nflen2=0;	/* second new free length */
425 	struct xfs_mount *mp;
426 
427 	mp = cnt_cur->bc_mp;
428 
429 	/*
430 	 * Look up the record in the by-size tree if necessary.
431 	 */
432 	if (flags & XFSA_FIXUP_CNT_OK) {
433 #ifdef DEBUG
434 		if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
435 			return error;
436 		XFS_WANT_CORRUPTED_RETURN(mp,
437 			i == 1 && nfbno1 == fbno && nflen1 == flen);
438 #endif
439 	} else {
440 		if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
441 			return error;
442 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
443 	}
444 	/*
445 	 * Look up the record in the by-block tree if necessary.
446 	 */
447 	if (flags & XFSA_FIXUP_BNO_OK) {
448 #ifdef DEBUG
449 		if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
450 			return error;
451 		XFS_WANT_CORRUPTED_RETURN(mp,
452 			i == 1 && nfbno1 == fbno && nflen1 == flen);
453 #endif
454 	} else {
455 		if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
456 			return error;
457 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
458 	}
459 
460 #ifdef DEBUG
461 	if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
462 		struct xfs_btree_block	*bnoblock;
463 		struct xfs_btree_block	*cntblock;
464 
465 		bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
466 		cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
467 
468 		XFS_WANT_CORRUPTED_RETURN(mp,
469 			bnoblock->bb_numrecs == cntblock->bb_numrecs);
470 	}
471 #endif
472 
473 	/*
474 	 * Deal with all four cases: the allocated record is contained
475 	 * within the freespace record, so we can have new freespace
476 	 * at either (or both) end, or no freespace remaining.
477 	 */
478 	if (rbno == fbno && rlen == flen)
479 		nfbno1 = nfbno2 = NULLAGBLOCK;
480 	else if (rbno == fbno) {
481 		nfbno1 = rbno + rlen;
482 		nflen1 = flen - rlen;
483 		nfbno2 = NULLAGBLOCK;
484 	} else if (rbno + rlen == fbno + flen) {
485 		nfbno1 = fbno;
486 		nflen1 = flen - rlen;
487 		nfbno2 = NULLAGBLOCK;
488 	} else {
489 		nfbno1 = fbno;
490 		nflen1 = rbno - fbno;
491 		nfbno2 = rbno + rlen;
492 		nflen2 = (fbno + flen) - nfbno2;
493 	}
494 	/*
495 	 * Delete the entry from the by-size btree.
496 	 */
497 	if ((error = xfs_btree_delete(cnt_cur, &i)))
498 		return error;
499 	XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
500 	/*
501 	 * Add new by-size btree entry(s).
502 	 */
503 	if (nfbno1 != NULLAGBLOCK) {
504 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
505 			return error;
506 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
507 		if ((error = xfs_btree_insert(cnt_cur, &i)))
508 			return error;
509 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
510 	}
511 	if (nfbno2 != NULLAGBLOCK) {
512 		if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
513 			return error;
514 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
515 		if ((error = xfs_btree_insert(cnt_cur, &i)))
516 			return error;
517 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
518 	}
519 	/*
520 	 * Fix up the by-block btree entry(s).
521 	 */
522 	if (nfbno1 == NULLAGBLOCK) {
523 		/*
524 		 * No remaining freespace, just delete the by-block tree entry.
525 		 */
526 		if ((error = xfs_btree_delete(bno_cur, &i)))
527 			return error;
528 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
529 	} else {
530 		/*
531 		 * Update the by-block entry to start later|be shorter.
532 		 */
533 		if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
534 			return error;
535 	}
536 	if (nfbno2 != NULLAGBLOCK) {
537 		/*
538 		 * 2 resulting free entries, need to add one.
539 		 */
540 		if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
541 			return error;
542 		XFS_WANT_CORRUPTED_RETURN(mp, i == 0);
543 		if ((error = xfs_btree_insert(bno_cur, &i)))
544 			return error;
545 		XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
546 	}
547 	return 0;
548 }
549 
550 static xfs_failaddr_t
xfs_agfl_verify(struct xfs_buf * bp)551 xfs_agfl_verify(
552 	struct xfs_buf	*bp)
553 {
554 	struct xfs_mount *mp = bp->b_mount;
555 	struct xfs_agfl	*agfl = XFS_BUF_TO_AGFL(bp);
556 	int		i;
557 
558 	/*
559 	 * There is no verification of non-crc AGFLs because mkfs does not
560 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
561 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
562 	 * can't verify just those entries are valid.
563 	 */
564 	if (!xfs_sb_version_hascrc(&mp->m_sb))
565 		return NULL;
566 
567 	if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
568 		return __this_address;
569 	if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
570 		return __this_address;
571 	/*
572 	 * during growfs operations, the perag is not fully initialised,
573 	 * so we can't use it for any useful checking. growfs ensures we can't
574 	 * use it by using uncached buffers that don't have the perag attached
575 	 * so we can detect and avoid this problem.
576 	 */
577 	if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
578 		return __this_address;
579 
580 	for (i = 0; i < xfs_agfl_size(mp); i++) {
581 		if (be32_to_cpu(agfl->agfl_bno[i]) != NULLAGBLOCK &&
582 		    be32_to_cpu(agfl->agfl_bno[i]) >= mp->m_sb.sb_agblocks)
583 			return __this_address;
584 	}
585 
586 	if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
587 		return __this_address;
588 	return NULL;
589 }
590 
591 static void
xfs_agfl_read_verify(struct xfs_buf * bp)592 xfs_agfl_read_verify(
593 	struct xfs_buf	*bp)
594 {
595 	struct xfs_mount *mp = bp->b_mount;
596 	xfs_failaddr_t	fa;
597 
598 	/*
599 	 * There is no verification of non-crc AGFLs because mkfs does not
600 	 * initialise the AGFL to zero or NULL. Hence the only valid part of the
601 	 * AGFL is what the AGF says is active. We can't get to the AGF, so we
602 	 * can't verify just those entries are valid.
603 	 */
604 	if (!xfs_sb_version_hascrc(&mp->m_sb))
605 		return;
606 
607 	if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
608 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
609 	else {
610 		fa = xfs_agfl_verify(bp);
611 		if (fa)
612 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
613 	}
614 }
615 
616 static void
xfs_agfl_write_verify(struct xfs_buf * bp)617 xfs_agfl_write_verify(
618 	struct xfs_buf	*bp)
619 {
620 	struct xfs_mount	*mp = bp->b_mount;
621 	struct xfs_buf_log_item	*bip = bp->b_log_item;
622 	xfs_failaddr_t		fa;
623 
624 	/* no verification of non-crc AGFLs */
625 	if (!xfs_sb_version_hascrc(&mp->m_sb))
626 		return;
627 
628 	fa = xfs_agfl_verify(bp);
629 	if (fa) {
630 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
631 		return;
632 	}
633 
634 	if (bip)
635 		XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
636 
637 	xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
638 }
639 
640 const struct xfs_buf_ops xfs_agfl_buf_ops = {
641 	.name = "xfs_agfl",
642 	.magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
643 	.verify_read = xfs_agfl_read_verify,
644 	.verify_write = xfs_agfl_write_verify,
645 	.verify_struct = xfs_agfl_verify,
646 };
647 
648 /*
649  * Read in the allocation group free block array.
650  */
651 int					/* error */
xfs_alloc_read_agfl(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,xfs_buf_t ** bpp)652 xfs_alloc_read_agfl(
653 	xfs_mount_t	*mp,		/* mount point structure */
654 	xfs_trans_t	*tp,		/* transaction pointer */
655 	xfs_agnumber_t	agno,		/* allocation group number */
656 	xfs_buf_t	**bpp)		/* buffer for the ag free block array */
657 {
658 	xfs_buf_t	*bp;		/* return value */
659 	int		error;
660 
661 	ASSERT(agno != NULLAGNUMBER);
662 	error = xfs_trans_read_buf(
663 			mp, tp, mp->m_ddev_targp,
664 			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
665 			XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
666 	if (error)
667 		return error;
668 	xfs_buf_set_ref(bp, XFS_AGFL_REF);
669 	*bpp = bp;
670 	return 0;
671 }
672 
673 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agbp,long len)674 xfs_alloc_update_counters(
675 	struct xfs_trans	*tp,
676 	struct xfs_perag	*pag,
677 	struct xfs_buf		*agbp,
678 	long			len)
679 {
680 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
681 
682 	pag->pagf_freeblks += len;
683 	be32_add_cpu(&agf->agf_freeblks, len);
684 
685 	xfs_trans_agblocks_delta(tp, len);
686 	if (unlikely(be32_to_cpu(agf->agf_freeblks) >
687 		     be32_to_cpu(agf->agf_length))) {
688 		xfs_buf_mark_corrupt(agbp);
689 		return -EFSCORRUPTED;
690 	}
691 
692 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
693 	return 0;
694 }
695 
696 /*
697  * Allocation group level functions.
698  */
699 
700 /*
701  * Deal with the case where only small freespaces remain. Either return the
702  * contents of the last freespace record, or allocate space from the freelist if
703  * there is nothing in the tree.
704  */
705 STATIC int			/* error */
xfs_alloc_ag_vextent_small(struct xfs_alloc_arg * args,struct xfs_btree_cur * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)706 xfs_alloc_ag_vextent_small(
707 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
708 	struct xfs_btree_cur	*ccur,	/* optional by-size cursor */
709 	xfs_agblock_t		*fbnop,	/* result block number */
710 	xfs_extlen_t		*flenp,	/* result length */
711 	int			*stat)	/* status: 0-freelist, 1-normal/none */
712 {
713 	int			error = 0;
714 	xfs_agblock_t		fbno = NULLAGBLOCK;
715 	xfs_extlen_t		flen = 0;
716 	int			i = 0;
717 
718 	/*
719 	 * If a cntbt cursor is provided, try to allocate the largest record in
720 	 * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
721 	 * allocation. Make sure to respect minleft even when pulling from the
722 	 * freelist.
723 	 */
724 	if (ccur)
725 		error = xfs_btree_decrement(ccur, 0, &i);
726 	if (error)
727 		goto error;
728 	if (i) {
729 		error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
730 		if (error)
731 			goto error;
732 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error);
733 		goto out;
734 	}
735 
736 	if (args->minlen != 1 || args->alignment != 1 ||
737 	    args->resv == XFS_AG_RESV_AGFL ||
738 	    (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount) <=
739 	     args->minleft))
740 		goto out;
741 
742 	error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
743 	if (error)
744 		goto error;
745 	if (fbno == NULLAGBLOCK)
746 		goto out;
747 
748 	xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
749 			      xfs_alloc_allow_busy_reuse(args->datatype));
750 
751 	if (xfs_alloc_is_userdata(args->datatype)) {
752 		struct xfs_buf	*bp;
753 
754 		bp = xfs_btree_get_bufs(args->mp, args->tp, args->agno, fbno);
755 		if (!bp) {
756 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, args->mp);
757 			error = -EFSCORRUPTED;
758 			goto error;
759 		}
760 		xfs_trans_binval(args->tp, bp);
761 	}
762 	*fbnop = args->agbno = fbno;
763 	*flenp = args->len = 1;
764 	XFS_WANT_CORRUPTED_GOTO(args->mp,
765 		fbno < be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
766 		error);
767 	args->wasfromfl = 1;
768 	trace_xfs_alloc_small_freelist(args);
769 
770 	/*
771 	 * If we're feeding an AGFL block to something that doesn't live in the
772 	 * free space, we need to clear out the OWN_AG rmap.
773 	 */
774 	error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1,
775 			      &XFS_RMAP_OINFO_AG);
776 	if (error)
777 		goto error;
778 
779 	*stat = 0;
780 	return 0;
781 
782 out:
783 	/*
784 	 * Can't do the allocation, give up.
785 	 */
786 	if (flen < args->minlen) {
787 		args->agbno = NULLAGBLOCK;
788 		trace_xfs_alloc_small_notenough(args);
789 		flen = 0;
790 	}
791 	*fbnop = fbno;
792 	*flenp = flen;
793 	*stat = 1;
794 	trace_xfs_alloc_small_done(args);
795 	return 0;
796 
797 error:
798 	trace_xfs_alloc_small_error(args);
799 	return error;
800 }
801 
802 /*
803  * Allocate a variable extent in the allocation group agno.
804  * Type and bno are used to determine where in the allocation group the
805  * extent will start.
806  * Extent's length (returned in *len) will be between minlen and maxlen,
807  * and of the form k * prod + mod unless there's nothing that large.
808  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
809  */
810 STATIC int			/* error */
xfs_alloc_ag_vextent(xfs_alloc_arg_t * args)811 xfs_alloc_ag_vextent(
812 	xfs_alloc_arg_t	*args)	/* argument structure for allocation */
813 {
814 	int		error=0;
815 
816 	ASSERT(args->minlen > 0);
817 	ASSERT(args->maxlen > 0);
818 	ASSERT(args->minlen <= args->maxlen);
819 	ASSERT(args->mod < args->prod);
820 	ASSERT(args->alignment > 0);
821 
822 	/*
823 	 * Branch to correct routine based on the type.
824 	 */
825 	args->wasfromfl = 0;
826 	switch (args->type) {
827 	case XFS_ALLOCTYPE_THIS_AG:
828 		error = xfs_alloc_ag_vextent_size(args);
829 		break;
830 	case XFS_ALLOCTYPE_NEAR_BNO:
831 		error = xfs_alloc_ag_vextent_near(args);
832 		break;
833 	case XFS_ALLOCTYPE_THIS_BNO:
834 		error = xfs_alloc_ag_vextent_exact(args);
835 		break;
836 	default:
837 		ASSERT(0);
838 		/* NOTREACHED */
839 	}
840 
841 	if (error || args->agbno == NULLAGBLOCK)
842 		return error;
843 
844 	ASSERT(args->len >= args->minlen);
845 	ASSERT(args->len <= args->maxlen);
846 	ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
847 	ASSERT(args->agbno % args->alignment == 0);
848 
849 	/* if not file data, insert new block into the reverse map btree */
850 	if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
851 		error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
852 				       args->agbno, args->len, &args->oinfo);
853 		if (error)
854 			return error;
855 	}
856 
857 	if (!args->wasfromfl) {
858 		error = xfs_alloc_update_counters(args->tp, args->pag,
859 						  args->agbp,
860 						  -((long)(args->len)));
861 		if (error)
862 			return error;
863 
864 		ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
865 					      args->agbno, args->len));
866 	}
867 
868 	xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
869 
870 	XFS_STATS_INC(args->mp, xs_allocx);
871 	XFS_STATS_ADD(args->mp, xs_allocb, args->len);
872 	return error;
873 }
874 
875 /*
876  * Allocate a variable extent at exactly agno/bno.
877  * Extent's length (returned in *len) will be between minlen and maxlen,
878  * and of the form k * prod + mod unless there's nothing that large.
879  * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
880  */
881 STATIC int			/* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)882 xfs_alloc_ag_vextent_exact(
883 	xfs_alloc_arg_t	*args)	/* allocation argument structure */
884 {
885 	xfs_btree_cur_t	*bno_cur;/* by block-number btree cursor */
886 	xfs_btree_cur_t	*cnt_cur;/* by count btree cursor */
887 	int		error;
888 	xfs_agblock_t	fbno;	/* start block of found extent */
889 	xfs_extlen_t	flen;	/* length of found extent */
890 	xfs_agblock_t	tbno;	/* start block of busy extent */
891 	xfs_extlen_t	tlen;	/* length of busy extent */
892 	xfs_agblock_t	tend;	/* end block of busy extent */
893 	int		i;	/* success/failure of operation */
894 	unsigned	busy_gen;
895 
896 	ASSERT(args->alignment == 1);
897 
898 	/*
899 	 * Allocate/initialize a cursor for the by-number freespace btree.
900 	 */
901 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
902 					  args->agno, XFS_BTNUM_BNO);
903 
904 	/*
905 	 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
906 	 * Look for the closest free block <= bno, it must contain bno
907 	 * if any free block does.
908 	 */
909 	error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
910 	if (error)
911 		goto error0;
912 	if (!i)
913 		goto not_found;
914 
915 	/*
916 	 * Grab the freespace record.
917 	 */
918 	error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
919 	if (error)
920 		goto error0;
921 	XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
922 	ASSERT(fbno <= args->agbno);
923 
924 	/*
925 	 * Check for overlapping busy extents.
926 	 */
927 	tbno = fbno;
928 	tlen = flen;
929 	xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
930 
931 	/*
932 	 * Give up if the start of the extent is busy, or the freespace isn't
933 	 * long enough for the minimum request.
934 	 */
935 	if (tbno > args->agbno)
936 		goto not_found;
937 	if (tlen < args->minlen)
938 		goto not_found;
939 	tend = tbno + tlen;
940 	if (tend < args->agbno + args->minlen)
941 		goto not_found;
942 
943 	/*
944 	 * End of extent will be smaller of the freespace end and the
945 	 * maximal requested end.
946 	 *
947 	 * Fix the length according to mod and prod if given.
948 	 */
949 	args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
950 						- args->agbno;
951 	xfs_alloc_fix_len(args);
952 	ASSERT(args->agbno + args->len <= tend);
953 
954 	/*
955 	 * We are allocating agbno for args->len
956 	 * Allocate/initialize a cursor for the by-size btree.
957 	 */
958 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
959 		args->agno, XFS_BTNUM_CNT);
960 	ASSERT(args->agbno + args->len <=
961 		be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
962 	error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
963 				      args->len, XFSA_FIXUP_BNO_OK);
964 	if (error) {
965 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
966 		goto error0;
967 	}
968 
969 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
970 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
971 
972 	args->wasfromfl = 0;
973 	trace_xfs_alloc_exact_done(args);
974 	return 0;
975 
976 not_found:
977 	/* Didn't find it, return null. */
978 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
979 	args->agbno = NULLAGBLOCK;
980 	trace_xfs_alloc_exact_notfound(args);
981 	return 0;
982 
983 error0:
984 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
985 	trace_xfs_alloc_exact_error(args);
986 	return error;
987 }
988 
989 /*
990  * Search the btree in a given direction via the search cursor and compare
991  * the records found against the good extent we've already found.
992  */
993 STATIC int
xfs_alloc_find_best_extent(struct xfs_alloc_arg * args,struct xfs_btree_cur ** gcur,struct xfs_btree_cur ** scur,xfs_agblock_t gdiff,xfs_agblock_t * sbno,xfs_extlen_t * slen,xfs_agblock_t * sbnoa,xfs_extlen_t * slena,int dir)994 xfs_alloc_find_best_extent(
995 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
996 	struct xfs_btree_cur	**gcur,	/* good cursor */
997 	struct xfs_btree_cur	**scur,	/* searching cursor */
998 	xfs_agblock_t		gdiff,	/* difference for search comparison */
999 	xfs_agblock_t		*sbno,	/* extent found by search */
1000 	xfs_extlen_t		*slen,	/* extent length */
1001 	xfs_agblock_t		*sbnoa,	/* aligned extent found by search */
1002 	xfs_extlen_t		*slena,	/* aligned extent length */
1003 	int			dir)	/* 0 = search right, 1 = search left */
1004 {
1005 	xfs_agblock_t		new;
1006 	xfs_agblock_t		sdiff;
1007 	int			error;
1008 	int			i;
1009 	unsigned		busy_gen;
1010 
1011 	/* The good extent is perfect, no need to  search. */
1012 	if (!gdiff)
1013 		goto out_use_good;
1014 
1015 	/*
1016 	 * Look until we find a better one, run out of space or run off the end.
1017 	 */
1018 	do {
1019 		error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
1020 		if (error)
1021 			goto error0;
1022 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1023 		xfs_alloc_compute_aligned(args, *sbno, *slen,
1024 				sbnoa, slena, &busy_gen);
1025 
1026 		/*
1027 		 * The good extent is closer than this one.
1028 		 */
1029 		if (!dir) {
1030 			if (*sbnoa > args->max_agbno)
1031 				goto out_use_good;
1032 			if (*sbnoa >= args->agbno + gdiff)
1033 				goto out_use_good;
1034 		} else {
1035 			if (*sbnoa < args->min_agbno)
1036 				goto out_use_good;
1037 			if (*sbnoa <= args->agbno - gdiff)
1038 				goto out_use_good;
1039 		}
1040 
1041 		/*
1042 		 * Same distance, compare length and pick the best.
1043 		 */
1044 		if (*slena >= args->minlen) {
1045 			args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
1046 			xfs_alloc_fix_len(args);
1047 
1048 			sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1049 						       args->alignment,
1050 						       args->datatype, *sbnoa,
1051 						       *slena, &new);
1052 
1053 			/*
1054 			 * Choose closer size and invalidate other cursor.
1055 			 */
1056 			if (sdiff < gdiff)
1057 				goto out_use_search;
1058 			goto out_use_good;
1059 		}
1060 
1061 		if (!dir)
1062 			error = xfs_btree_increment(*scur, 0, &i);
1063 		else
1064 			error = xfs_btree_decrement(*scur, 0, &i);
1065 		if (error)
1066 			goto error0;
1067 	} while (i);
1068 
1069 out_use_good:
1070 	xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
1071 	*scur = NULL;
1072 	return 0;
1073 
1074 out_use_search:
1075 	xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
1076 	*gcur = NULL;
1077 	return 0;
1078 
1079 error0:
1080 	/* caller invalidates cursors */
1081 	return error;
1082 }
1083 
1084 /*
1085  * Allocate a variable extent near bno in the allocation group agno.
1086  * Extent's length (returned in len) will be between minlen and maxlen,
1087  * and of the form k * prod + mod unless there's nothing that large.
1088  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1089  */
1090 STATIC int				/* error */
xfs_alloc_ag_vextent_near(xfs_alloc_arg_t * args)1091 xfs_alloc_ag_vextent_near(
1092 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1093 {
1094 	xfs_btree_cur_t	*bno_cur_gt;	/* cursor for bno btree, right side */
1095 	xfs_btree_cur_t	*bno_cur_lt;	/* cursor for bno btree, left side */
1096 	xfs_btree_cur_t	*cnt_cur;	/* cursor for count btree */
1097 	xfs_agblock_t	gtbno;		/* start bno of right side entry */
1098 	xfs_agblock_t	gtbnoa;		/* aligned ... */
1099 	xfs_extlen_t	gtdiff;		/* difference to right side entry */
1100 	xfs_extlen_t	gtlen;		/* length of right side entry */
1101 	xfs_extlen_t	gtlena;		/* aligned ... */
1102 	xfs_agblock_t	gtnew;		/* useful start bno of right side */
1103 	int		error;		/* error code */
1104 	int		i;		/* result code, temporary */
1105 	int		j;		/* result code, temporary */
1106 	xfs_agblock_t	ltbno;		/* start bno of left side entry */
1107 	xfs_agblock_t	ltbnoa;		/* aligned ... */
1108 	xfs_extlen_t	ltdiff;		/* difference to left side entry */
1109 	xfs_extlen_t	ltlen;		/* length of left side entry */
1110 	xfs_extlen_t	ltlena;		/* aligned ... */
1111 	xfs_agblock_t	ltnew;		/* useful start bno of left side */
1112 	xfs_extlen_t	rlen;		/* length of returned extent */
1113 	bool		busy;
1114 	unsigned	busy_gen;
1115 #ifdef DEBUG
1116 	/*
1117 	 * Randomly don't execute the first algorithm.
1118 	 */
1119 	int		dofirst;	/* set to do first algorithm */
1120 
1121 	dofirst = prandom_u32() & 1;
1122 #endif
1123 
1124 	/* handle unitialized agbno range so caller doesn't have to */
1125 	if (!args->min_agbno && !args->max_agbno)
1126 		args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
1127 	ASSERT(args->min_agbno <= args->max_agbno);
1128 
1129 	/* clamp agbno to the range if it's outside */
1130 	if (args->agbno < args->min_agbno)
1131 		args->agbno = args->min_agbno;
1132 	if (args->agbno > args->max_agbno)
1133 		args->agbno = args->max_agbno;
1134 
1135 restart:
1136 	bno_cur_lt = NULL;
1137 	bno_cur_gt = NULL;
1138 	ltlen = 0;
1139 	gtlena = 0;
1140 	ltlena = 0;
1141 	busy = false;
1142 
1143 	/*
1144 	 * Get a cursor for the by-size btree.
1145 	 */
1146 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1147 		args->agno, XFS_BTNUM_CNT);
1148 
1149 	/*
1150 	 * See if there are any free extents as big as maxlen.
1151 	 */
1152 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
1153 		goto error0;
1154 	/*
1155 	 * If none, then pick up the last entry in the tree unless the
1156 	 * tree is empty.
1157 	 */
1158 	if (!i) {
1159 		if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, &ltbno,
1160 				&ltlen, &i)))
1161 			goto error0;
1162 		if (i == 0 || ltlen == 0) {
1163 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1164 			trace_xfs_alloc_near_noentry(args);
1165 			return 0;
1166 		}
1167 		ASSERT(i == 1);
1168 	}
1169 	args->wasfromfl = 0;
1170 
1171 	/*
1172 	 * First algorithm.
1173 	 * If the requested extent is large wrt the freespaces available
1174 	 * in this a.g., then the cursor will be pointing to a btree entry
1175 	 * near the right edge of the tree.  If it's in the last btree leaf
1176 	 * block, then we just examine all the entries in that block
1177 	 * that are big enough, and pick the best one.
1178 	 * This is written as a while loop so we can break out of it,
1179 	 * but we never loop back to the top.
1180 	 */
1181 	while (xfs_btree_islastblock(cnt_cur, 0)) {
1182 		xfs_extlen_t	bdiff;
1183 		int		besti=0;
1184 		xfs_extlen_t	blen=0;
1185 		xfs_agblock_t	bnew=0;
1186 
1187 #ifdef DEBUG
1188 		if (dofirst)
1189 			break;
1190 #endif
1191 		/*
1192 		 * Start from the entry that lookup found, sequence through
1193 		 * all larger free blocks.  If we're actually pointing at a
1194 		 * record smaller than maxlen, go to the start of this block,
1195 		 * and skip all those smaller than minlen.
1196 		 */
1197 		if (ltlen || args->alignment > 1) {
1198 			cnt_cur->bc_ptrs[0] = 1;
1199 			do {
1200 				if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno,
1201 						&ltlen, &i)))
1202 					goto error0;
1203 				XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1204 				if (ltlen >= args->minlen)
1205 					break;
1206 				if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
1207 					goto error0;
1208 			} while (i);
1209 			ASSERT(ltlen >= args->minlen);
1210 			if (!i)
1211 				break;
1212 		}
1213 		i = cnt_cur->bc_ptrs[0];
1214 		for (j = 1, blen = 0, bdiff = 0;
1215 		     !error && j && (blen < args->maxlen || bdiff > 0);
1216 		     error = xfs_btree_increment(cnt_cur, 0, &j)) {
1217 			/*
1218 			 * For each entry, decide if it's better than
1219 			 * the previous best entry.
1220 			 */
1221 			if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1222 				goto error0;
1223 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1224 			busy = xfs_alloc_compute_aligned(args, ltbno, ltlen,
1225 					&ltbnoa, &ltlena, &busy_gen);
1226 			if (ltlena < args->minlen)
1227 				continue;
1228 			if (ltbnoa < args->min_agbno || ltbnoa > args->max_agbno)
1229 				continue;
1230 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1231 			xfs_alloc_fix_len(args);
1232 			ASSERT(args->len >= args->minlen);
1233 			if (args->len < blen)
1234 				continue;
1235 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1236 				args->alignment, args->datatype, ltbnoa,
1237 				ltlena, &ltnew);
1238 			if (ltnew != NULLAGBLOCK &&
1239 			    (args->len > blen || ltdiff < bdiff)) {
1240 				bdiff = ltdiff;
1241 				bnew = ltnew;
1242 				blen = args->len;
1243 				besti = cnt_cur->bc_ptrs[0];
1244 			}
1245 		}
1246 		/*
1247 		 * It didn't work.  We COULD be in a case where
1248 		 * there's a good record somewhere, so try again.
1249 		 */
1250 		if (blen == 0)
1251 			break;
1252 		/*
1253 		 * Point at the best entry, and retrieve it again.
1254 		 */
1255 		cnt_cur->bc_ptrs[0] = besti;
1256 		if ((error = xfs_alloc_get_rec(cnt_cur, &ltbno, &ltlen, &i)))
1257 			goto error0;
1258 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1259 		ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1260 		args->len = blen;
1261 
1262 		/*
1263 		 * We are allocating starting at bnew for blen blocks.
1264 		 */
1265 		args->agbno = bnew;
1266 		ASSERT(bnew >= ltbno);
1267 		ASSERT(bnew + blen <= ltbno + ltlen);
1268 		/*
1269 		 * Set up a cursor for the by-bno tree.
1270 		 */
1271 		bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
1272 			args->agbp, args->agno, XFS_BTNUM_BNO);
1273 		/*
1274 		 * Fix up the btree entries.
1275 		 */
1276 		if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
1277 				ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
1278 			goto error0;
1279 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1280 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1281 
1282 		trace_xfs_alloc_near_first(args);
1283 		return 0;
1284 	}
1285 	/*
1286 	 * Second algorithm.
1287 	 * Search in the by-bno tree to the left and to the right
1288 	 * simultaneously, until in each case we find a space big enough,
1289 	 * or run into the edge of the tree.  When we run into the edge,
1290 	 * we deallocate that cursor.
1291 	 * If both searches succeed, we compare the two spaces and pick
1292 	 * the better one.
1293 	 * With alignment, it's possible for both to fail; the upper
1294 	 * level algorithm that picks allocation groups for allocations
1295 	 * is not supposed to do this.
1296 	 */
1297 	/*
1298 	 * Allocate and initialize the cursor for the leftward search.
1299 	 */
1300 	bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1301 		args->agno, XFS_BTNUM_BNO);
1302 	/*
1303 	 * Lookup <= bno to find the leftward search's starting point.
1304 	 */
1305 	if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
1306 		goto error0;
1307 	if (!i) {
1308 		/*
1309 		 * Didn't find anything; use this cursor for the rightward
1310 		 * search.
1311 		 */
1312 		bno_cur_gt = bno_cur_lt;
1313 		bno_cur_lt = NULL;
1314 	}
1315 	/*
1316 	 * Found something.  Duplicate the cursor for the rightward search.
1317 	 */
1318 	else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
1319 		goto error0;
1320 	/*
1321 	 * Increment the cursor, so we will point at the entry just right
1322 	 * of the leftward entry if any, or to the leftmost entry.
1323 	 */
1324 	if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1325 		goto error0;
1326 	if (!i) {
1327 		/*
1328 		 * It failed, there are no rightward entries.
1329 		 */
1330 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
1331 		bno_cur_gt = NULL;
1332 	}
1333 	/*
1334 	 * Loop going left with the leftward cursor, right with the
1335 	 * rightward cursor, until either both directions give up or
1336 	 * we find an entry at least as big as minlen.
1337 	 */
1338 	do {
1339 		if (bno_cur_lt) {
1340 			if ((error = xfs_alloc_get_rec(bno_cur_lt, &ltbno, &ltlen, &i)))
1341 				goto error0;
1342 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1343 			busy |= xfs_alloc_compute_aligned(args, ltbno, ltlen,
1344 					&ltbnoa, &ltlena, &busy_gen);
1345 			if (ltlena >= args->minlen && ltbnoa >= args->min_agbno)
1346 				break;
1347 			if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1348 				goto error0;
1349 			if (!i || ltbnoa < args->min_agbno) {
1350 				xfs_btree_del_cursor(bno_cur_lt,
1351 						     XFS_BTREE_NOERROR);
1352 				bno_cur_lt = NULL;
1353 			}
1354 		}
1355 		if (bno_cur_gt) {
1356 			if ((error = xfs_alloc_get_rec(bno_cur_gt, &gtbno, &gtlen, &i)))
1357 				goto error0;
1358 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1359 			busy |= xfs_alloc_compute_aligned(args, gtbno, gtlen,
1360 					&gtbnoa, &gtlena, &busy_gen);
1361 			if (gtlena >= args->minlen && gtbnoa <= args->max_agbno)
1362 				break;
1363 			if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1364 				goto error0;
1365 			if (!i || gtbnoa > args->max_agbno) {
1366 				xfs_btree_del_cursor(bno_cur_gt,
1367 						     XFS_BTREE_NOERROR);
1368 				bno_cur_gt = NULL;
1369 			}
1370 		}
1371 	} while (bno_cur_lt || bno_cur_gt);
1372 
1373 	/*
1374 	 * Got both cursors still active, need to find better entry.
1375 	 */
1376 	if (bno_cur_lt && bno_cur_gt) {
1377 		if (ltlena >= args->minlen) {
1378 			/*
1379 			 * Left side is good, look for a right side entry.
1380 			 */
1381 			args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1382 			xfs_alloc_fix_len(args);
1383 			ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1384 				args->alignment, args->datatype, ltbnoa,
1385 				ltlena, &ltnew);
1386 
1387 			error = xfs_alloc_find_best_extent(args,
1388 						&bno_cur_lt, &bno_cur_gt,
1389 						ltdiff, &gtbno, &gtlen,
1390 						&gtbnoa, &gtlena,
1391 						0 /* search right */);
1392 		} else {
1393 			ASSERT(gtlena >= args->minlen);
1394 
1395 			/*
1396 			 * Right side is good, look for a left side entry.
1397 			 */
1398 			args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1399 			xfs_alloc_fix_len(args);
1400 			gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1401 				args->alignment, args->datatype, gtbnoa,
1402 				gtlena, &gtnew);
1403 
1404 			error = xfs_alloc_find_best_extent(args,
1405 						&bno_cur_gt, &bno_cur_lt,
1406 						gtdiff, &ltbno, &ltlen,
1407 						&ltbnoa, &ltlena,
1408 						1 /* search left */);
1409 		}
1410 
1411 		if (error)
1412 			goto error0;
1413 	}
1414 
1415 	/*
1416 	 * If we couldn't get anything, give up.
1417 	 */
1418 	if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1419 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1420 
1421 		if (busy) {
1422 			trace_xfs_alloc_near_busy(args);
1423 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1424 			goto restart;
1425 		}
1426 		trace_xfs_alloc_size_neither(args);
1427 		args->agbno = NULLAGBLOCK;
1428 		return 0;
1429 	}
1430 
1431 	/*
1432 	 * At this point we have selected a freespace entry, either to the
1433 	 * left or to the right.  If it's on the right, copy all the
1434 	 * useful variables to the "left" set so we only have one
1435 	 * copy of this code.
1436 	 */
1437 	if (bno_cur_gt) {
1438 		bno_cur_lt = bno_cur_gt;
1439 		bno_cur_gt = NULL;
1440 		ltbno = gtbno;
1441 		ltbnoa = gtbnoa;
1442 		ltlen = gtlen;
1443 		ltlena = gtlena;
1444 		j = 1;
1445 	} else
1446 		j = 0;
1447 
1448 	/*
1449 	 * Fix up the length and compute the useful address.
1450 	 */
1451 	args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1452 	xfs_alloc_fix_len(args);
1453 	rlen = args->len;
1454 	(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1455 				     args->datatype, ltbnoa, ltlena, &ltnew);
1456 	ASSERT(ltnew >= ltbno);
1457 	ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1458 	ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1459 	ASSERT(ltnew >= args->min_agbno && ltnew <= args->max_agbno);
1460 	args->agbno = ltnew;
1461 
1462 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1463 			ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1464 		goto error0;
1465 
1466 	if (j)
1467 		trace_xfs_alloc_near_greater(args);
1468 	else
1469 		trace_xfs_alloc_near_lesser(args);
1470 
1471 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1472 	xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1473 	return 0;
1474 
1475  error0:
1476 	trace_xfs_alloc_near_error(args);
1477 	if (cnt_cur != NULL)
1478 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1479 	if (bno_cur_lt != NULL)
1480 		xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1481 	if (bno_cur_gt != NULL)
1482 		xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1483 	return error;
1484 }
1485 
1486 /*
1487  * Allocate a variable extent anywhere in the allocation group agno.
1488  * Extent's length (returned in len) will be between minlen and maxlen,
1489  * and of the form k * prod + mod unless there's nothing that large.
1490  * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1491  */
1492 STATIC int				/* error */
xfs_alloc_ag_vextent_size(xfs_alloc_arg_t * args)1493 xfs_alloc_ag_vextent_size(
1494 	xfs_alloc_arg_t	*args)		/* allocation argument structure */
1495 {
1496 	xfs_btree_cur_t	*bno_cur;	/* cursor for bno btree */
1497 	xfs_btree_cur_t	*cnt_cur;	/* cursor for cnt btree */
1498 	int		error;		/* error result */
1499 	xfs_agblock_t	fbno;		/* start of found freespace */
1500 	xfs_extlen_t	flen;		/* length of found freespace */
1501 	int		i;		/* temp status variable */
1502 	xfs_agblock_t	rbno;		/* returned block number */
1503 	xfs_extlen_t	rlen;		/* length of returned extent */
1504 	bool		busy;
1505 	unsigned	busy_gen;
1506 
1507 restart:
1508 	/*
1509 	 * Allocate and initialize a cursor for the by-size btree.
1510 	 */
1511 	cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1512 		args->agno, XFS_BTNUM_CNT);
1513 	bno_cur = NULL;
1514 	busy = false;
1515 
1516 	/*
1517 	 * Look for an entry >= maxlen+alignment-1 blocks.
1518 	 */
1519 	if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1520 			args->maxlen + args->alignment - 1, &i)))
1521 		goto error0;
1522 
1523 	/*
1524 	 * If none then we have to settle for a smaller extent. In the case that
1525 	 * there are no large extents, this will return the last entry in the
1526 	 * tree unless the tree is empty. In the case that there are only busy
1527 	 * large extents, this will return the largest small extent unless there
1528 	 * are no smaller extents available.
1529 	 */
1530 	if (!i) {
1531 		error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1532 						   &fbno, &flen, &i);
1533 		if (error)
1534 			goto error0;
1535 		if (i == 0 || flen == 0) {
1536 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1537 			trace_xfs_alloc_size_noentry(args);
1538 			return 0;
1539 		}
1540 		ASSERT(i == 1);
1541 		busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
1542 				&rlen, &busy_gen);
1543 	} else {
1544 		/*
1545 		 * Search for a non-busy extent that is large enough.
1546 		 */
1547 		for (;;) {
1548 			error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1549 			if (error)
1550 				goto error0;
1551 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1552 
1553 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1554 					&rbno, &rlen, &busy_gen);
1555 
1556 			if (rlen >= args->maxlen)
1557 				break;
1558 
1559 			error = xfs_btree_increment(cnt_cur, 0, &i);
1560 			if (error)
1561 				goto error0;
1562 			if (i == 0) {
1563 				/*
1564 				 * Our only valid extents must have been busy.
1565 				 * Make it unbusy by forcing the log out and
1566 				 * retrying.
1567 				 */
1568 				xfs_btree_del_cursor(cnt_cur,
1569 						     XFS_BTREE_NOERROR);
1570 				trace_xfs_alloc_size_busy(args);
1571 				xfs_extent_busy_flush(args->mp,
1572 							args->pag, busy_gen);
1573 				goto restart;
1574 			}
1575 		}
1576 	}
1577 
1578 	/*
1579 	 * In the first case above, we got the last entry in the
1580 	 * by-size btree.  Now we check to see if the space hits maxlen
1581 	 * once aligned; if not, we search left for something better.
1582 	 * This can't happen in the second case above.
1583 	 */
1584 	rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1585 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1586 			(rlen <= flen && rbno + rlen <= fbno + flen), error0);
1587 	if (rlen < args->maxlen) {
1588 		xfs_agblock_t	bestfbno;
1589 		xfs_extlen_t	bestflen;
1590 		xfs_agblock_t	bestrbno;
1591 		xfs_extlen_t	bestrlen;
1592 
1593 		bestrlen = rlen;
1594 		bestrbno = rbno;
1595 		bestflen = flen;
1596 		bestfbno = fbno;
1597 		for (;;) {
1598 			if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1599 				goto error0;
1600 			if (i == 0)
1601 				break;
1602 			if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1603 					&i)))
1604 				goto error0;
1605 			XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1606 			if (flen < bestrlen)
1607 				break;
1608 			busy = xfs_alloc_compute_aligned(args, fbno, flen,
1609 					&rbno, &rlen, &busy_gen);
1610 			rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1611 			XFS_WANT_CORRUPTED_GOTO(args->mp, rlen == 0 ||
1612 				(rlen <= flen && rbno + rlen <= fbno + flen),
1613 				error0);
1614 			if (rlen > bestrlen) {
1615 				bestrlen = rlen;
1616 				bestrbno = rbno;
1617 				bestflen = flen;
1618 				bestfbno = fbno;
1619 				if (rlen == args->maxlen)
1620 					break;
1621 			}
1622 		}
1623 		if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1624 				&i)))
1625 			goto error0;
1626 		XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
1627 		rlen = bestrlen;
1628 		rbno = bestrbno;
1629 		flen = bestflen;
1630 		fbno = bestfbno;
1631 	}
1632 	args->wasfromfl = 0;
1633 	/*
1634 	 * Fix up the length.
1635 	 */
1636 	args->len = rlen;
1637 	if (rlen < args->minlen) {
1638 		if (busy) {
1639 			xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1640 			trace_xfs_alloc_size_busy(args);
1641 			xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
1642 			goto restart;
1643 		}
1644 		goto out_nominleft;
1645 	}
1646 	xfs_alloc_fix_len(args);
1647 
1648 	rlen = args->len;
1649 	XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
1650 	/*
1651 	 * Allocate and initialize a cursor for the by-block tree.
1652 	 */
1653 	bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1654 		args->agno, XFS_BTNUM_BNO);
1655 	if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1656 			rbno, rlen, XFSA_FIXUP_CNT_OK)))
1657 		goto error0;
1658 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1659 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1660 	cnt_cur = bno_cur = NULL;
1661 	args->len = rlen;
1662 	args->agbno = rbno;
1663 	XFS_WANT_CORRUPTED_GOTO(args->mp,
1664 		args->agbno + args->len <=
1665 			be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1666 		error0);
1667 	trace_xfs_alloc_size_done(args);
1668 	return 0;
1669 
1670 error0:
1671 	trace_xfs_alloc_size_error(args);
1672 	if (cnt_cur)
1673 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1674 	if (bno_cur)
1675 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1676 	return error;
1677 
1678 out_nominleft:
1679 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1680 	trace_xfs_alloc_size_nominleft(args);
1681 	args->agbno = NULLAGBLOCK;
1682 	return 0;
1683 }
1684 
1685 /*
1686  * Free the extent starting at agno/bno for length.
1687  */
1688 STATIC int
xfs_free_ag_extent(struct xfs_trans * tp,struct xfs_buf * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)1689 xfs_free_ag_extent(
1690 	struct xfs_trans		*tp,
1691 	struct xfs_buf			*agbp,
1692 	xfs_agnumber_t			agno,
1693 	xfs_agblock_t			bno,
1694 	xfs_extlen_t			len,
1695 	const struct xfs_owner_info	*oinfo,
1696 	enum xfs_ag_resv_type		type)
1697 {
1698 	struct xfs_mount		*mp;
1699 	struct xfs_perag		*pag;
1700 	struct xfs_btree_cur		*bno_cur;
1701 	struct xfs_btree_cur		*cnt_cur;
1702 	xfs_agblock_t			gtbno; /* start of right neighbor */
1703 	xfs_extlen_t			gtlen; /* length of right neighbor */
1704 	xfs_agblock_t			ltbno; /* start of left neighbor */
1705 	xfs_extlen_t			ltlen; /* length of left neighbor */
1706 	xfs_agblock_t			nbno; /* new starting block of freesp */
1707 	xfs_extlen_t			nlen; /* new length of freespace */
1708 	int				haveleft; /* have a left neighbor */
1709 	int				haveright; /* have a right neighbor */
1710 	int				i;
1711 	int				error;
1712 
1713 	bno_cur = cnt_cur = NULL;
1714 	mp = tp->t_mountp;
1715 
1716 	if (!xfs_rmap_should_skip_owner_update(oinfo)) {
1717 		error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
1718 		if (error)
1719 			goto error0;
1720 	}
1721 
1722 	/*
1723 	 * Allocate and initialize a cursor for the by-block btree.
1724 	 */
1725 	bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1726 	/*
1727 	 * Look for a neighboring block on the left (lower block numbers)
1728 	 * that is contiguous with this space.
1729 	 */
1730 	if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1731 		goto error0;
1732 	if (haveleft) {
1733 		/*
1734 		 * There is a block to our left.
1735 		 */
1736 		if ((error = xfs_alloc_get_rec(bno_cur, &ltbno, &ltlen, &i)))
1737 			goto error0;
1738 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1739 		/*
1740 		 * It's not contiguous, though.
1741 		 */
1742 		if (ltbno + ltlen < bno)
1743 			haveleft = 0;
1744 		else {
1745 			/*
1746 			 * If this failure happens the request to free this
1747 			 * space was invalid, it's (partly) already free.
1748 			 * Very bad.
1749 			 */
1750 			XFS_WANT_CORRUPTED_GOTO(mp,
1751 						ltbno + ltlen <= bno, error0);
1752 		}
1753 	}
1754 	/*
1755 	 * Look for a neighboring block on the right (higher block numbers)
1756 	 * that is contiguous with this space.
1757 	 */
1758 	if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1759 		goto error0;
1760 	if (haveright) {
1761 		/*
1762 		 * There is a block to our right.
1763 		 */
1764 		if ((error = xfs_alloc_get_rec(bno_cur, &gtbno, &gtlen, &i)))
1765 			goto error0;
1766 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1767 		/*
1768 		 * It's not contiguous, though.
1769 		 */
1770 		if (bno + len < gtbno)
1771 			haveright = 0;
1772 		else {
1773 			/*
1774 			 * If this failure happens the request to free this
1775 			 * space was invalid, it's (partly) already free.
1776 			 * Very bad.
1777 			 */
1778 			XFS_WANT_CORRUPTED_GOTO(mp, gtbno >= bno + len, error0);
1779 		}
1780 	}
1781 	/*
1782 	 * Now allocate and initialize a cursor for the by-size tree.
1783 	 */
1784 	cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1785 	/*
1786 	 * Have both left and right contiguous neighbors.
1787 	 * Merge all three into a single free block.
1788 	 */
1789 	if (haveleft && haveright) {
1790 		/*
1791 		 * Delete the old by-size entry on the left.
1792 		 */
1793 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1794 			goto error0;
1795 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1796 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1797 			goto error0;
1798 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1799 		/*
1800 		 * Delete the old by-size entry on the right.
1801 		 */
1802 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1803 			goto error0;
1804 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1805 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1806 			goto error0;
1807 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1808 		/*
1809 		 * Delete the old by-block entry for the right block.
1810 		 */
1811 		if ((error = xfs_btree_delete(bno_cur, &i)))
1812 			goto error0;
1813 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1814 		/*
1815 		 * Move the by-block cursor back to the left neighbor.
1816 		 */
1817 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1818 			goto error0;
1819 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1820 #ifdef DEBUG
1821 		/*
1822 		 * Check that this is the right record: delete didn't
1823 		 * mangle the cursor.
1824 		 */
1825 		{
1826 			xfs_agblock_t	xxbno;
1827 			xfs_extlen_t	xxlen;
1828 
1829 			if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1830 					&i)))
1831 				goto error0;
1832 			XFS_WANT_CORRUPTED_GOTO(mp,
1833 				i == 1 && xxbno == ltbno && xxlen == ltlen,
1834 				error0);
1835 		}
1836 #endif
1837 		/*
1838 		 * Update remaining by-block entry to the new, joined block.
1839 		 */
1840 		nbno = ltbno;
1841 		nlen = len + ltlen + gtlen;
1842 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1843 			goto error0;
1844 	}
1845 	/*
1846 	 * Have only a left contiguous neighbor.
1847 	 * Merge it together with the new freespace.
1848 	 */
1849 	else if (haveleft) {
1850 		/*
1851 		 * Delete the old by-size entry on the left.
1852 		 */
1853 		if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1854 			goto error0;
1855 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1856 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1857 			goto error0;
1858 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1859 		/*
1860 		 * Back up the by-block cursor to the left neighbor, and
1861 		 * update its length.
1862 		 */
1863 		if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1864 			goto error0;
1865 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1866 		nbno = ltbno;
1867 		nlen = len + ltlen;
1868 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1869 			goto error0;
1870 	}
1871 	/*
1872 	 * Have only a right contiguous neighbor.
1873 	 * Merge it together with the new freespace.
1874 	 */
1875 	else if (haveright) {
1876 		/*
1877 		 * Delete the old by-size entry on the right.
1878 		 */
1879 		if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1880 			goto error0;
1881 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1882 		if ((error = xfs_btree_delete(cnt_cur, &i)))
1883 			goto error0;
1884 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1885 		/*
1886 		 * Update the starting block and length of the right
1887 		 * neighbor in the by-block tree.
1888 		 */
1889 		nbno = bno;
1890 		nlen = len + gtlen;
1891 		if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1892 			goto error0;
1893 	}
1894 	/*
1895 	 * No contiguous neighbors.
1896 	 * Insert the new freespace into the by-block tree.
1897 	 */
1898 	else {
1899 		nbno = bno;
1900 		nlen = len;
1901 		if ((error = xfs_btree_insert(bno_cur, &i)))
1902 			goto error0;
1903 		XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1904 	}
1905 	xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1906 	bno_cur = NULL;
1907 	/*
1908 	 * In all cases we need to insert the new freespace in the by-size tree.
1909 	 */
1910 	if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1911 		goto error0;
1912 	XFS_WANT_CORRUPTED_GOTO(mp, i == 0, error0);
1913 	if ((error = xfs_btree_insert(cnt_cur, &i)))
1914 		goto error0;
1915 	XFS_WANT_CORRUPTED_GOTO(mp, i == 1, error0);
1916 	xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1917 	cnt_cur = NULL;
1918 
1919 	/*
1920 	 * Update the freespace totals in the ag and superblock.
1921 	 */
1922 	pag = xfs_perag_get(mp, agno);
1923 	error = xfs_alloc_update_counters(tp, pag, agbp, len);
1924 	xfs_ag_resv_free_extent(pag, type, tp, len);
1925 	xfs_perag_put(pag);
1926 	if (error)
1927 		goto error0;
1928 
1929 	XFS_STATS_INC(mp, xs_freex);
1930 	XFS_STATS_ADD(mp, xs_freeb, len);
1931 
1932 	trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
1933 
1934 	return 0;
1935 
1936  error0:
1937 	trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
1938 	if (bno_cur)
1939 		xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1940 	if (cnt_cur)
1941 		xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1942 	return error;
1943 }
1944 
1945 /*
1946  * Visible (exported) allocation/free functions.
1947  * Some of these are used just by xfs_alloc_btree.c and this file.
1948  */
1949 
1950 /*
1951  * Compute and fill in value of m_ag_maxlevels.
1952  */
1953 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)1954 xfs_alloc_compute_maxlevels(
1955 	xfs_mount_t	*mp)	/* file system mount structure */
1956 {
1957 	mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
1958 			(mp->m_sb.sb_agblocks + 1) / 2);
1959 }
1960 
1961 /*
1962  * Find the length of the longest extent in an AG.  The 'need' parameter
1963  * specifies how much space we're going to need for the AGFL and the
1964  * 'reserved' parameter tells us how many blocks in this AG are reserved for
1965  * other callers.
1966  */
1967 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_perag * pag,xfs_extlen_t need,xfs_extlen_t reserved)1968 xfs_alloc_longest_free_extent(
1969 	struct xfs_perag	*pag,
1970 	xfs_extlen_t		need,
1971 	xfs_extlen_t		reserved)
1972 {
1973 	xfs_extlen_t		delta = 0;
1974 
1975 	/*
1976 	 * If the AGFL needs a recharge, we'll have to subtract that from the
1977 	 * longest extent.
1978 	 */
1979 	if (need > pag->pagf_flcount)
1980 		delta = need - pag->pagf_flcount;
1981 
1982 	/*
1983 	 * If we cannot maintain others' reservations with space from the
1984 	 * not-longest freesp extents, we'll have to subtract /that/ from
1985 	 * the longest extent too.
1986 	 */
1987 	if (pag->pagf_freeblks - pag->pagf_longest < reserved)
1988 		delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
1989 
1990 	/*
1991 	 * If the longest extent is long enough to satisfy all the
1992 	 * reservations and AGFL rules in place, we can return this extent.
1993 	 */
1994 	if (pag->pagf_longest > delta)
1995 		return pag->pagf_longest - delta;
1996 
1997 	/* Otherwise, let the caller try for 1 block if there's space. */
1998 	return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1999 }
2000 
2001 /*
2002  * Compute the minimum length of the AGFL in the given AG.  If @pag is NULL,
2003  * return the largest possible minimum length.
2004  */
2005 unsigned int
xfs_alloc_min_freelist(struct xfs_mount * mp,struct xfs_perag * pag)2006 xfs_alloc_min_freelist(
2007 	struct xfs_mount	*mp,
2008 	struct xfs_perag	*pag)
2009 {
2010 	/* AG btrees have at least 1 level. */
2011 	static const uint8_t	fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
2012 	const uint8_t		*levels = pag ? pag->pagf_levels : fake_levels;
2013 	unsigned int		min_free;
2014 
2015 	ASSERT(mp->m_ag_maxlevels > 0);
2016 
2017 	/* space needed by-bno freespace btree */
2018 	min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
2019 				       mp->m_ag_maxlevels);
2020 	/* space needed by-size freespace btree */
2021 	min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
2022 				       mp->m_ag_maxlevels);
2023 	/* space needed reverse mapping used space btree */
2024 	if (xfs_sb_version_hasrmapbt(&mp->m_sb))
2025 		min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
2026 						mp->m_rmap_maxlevels);
2027 
2028 	return min_free;
2029 }
2030 
2031 /*
2032  * Check if the operation we are fixing up the freelist for should go ahead or
2033  * not. If we are freeing blocks, we always allow it, otherwise the allocation
2034  * is dependent on whether the size and shape of free space available will
2035  * permit the requested allocation to take place.
2036  */
2037 static bool
xfs_alloc_space_available(struct xfs_alloc_arg * args,xfs_extlen_t min_free,int flags)2038 xfs_alloc_space_available(
2039 	struct xfs_alloc_arg	*args,
2040 	xfs_extlen_t		min_free,
2041 	int			flags)
2042 {
2043 	struct xfs_perag	*pag = args->pag;
2044 	xfs_extlen_t		alloc_len, longest;
2045 	xfs_extlen_t		reservation; /* blocks that are still reserved */
2046 	int			available;
2047 	xfs_extlen_t		agflcount;
2048 
2049 	if (flags & XFS_ALLOC_FLAG_FREEING)
2050 		return true;
2051 
2052 	reservation = xfs_ag_resv_needed(pag, args->resv);
2053 
2054 	/* do we have enough contiguous free space for the allocation? */
2055 	alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
2056 	longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
2057 	if (longest < alloc_len)
2058 		return false;
2059 
2060 	/*
2061 	 * Do we have enough free space remaining for the allocation? Don't
2062 	 * account extra agfl blocks because we are about to defer free them,
2063 	 * making them unavailable until the current transaction commits.
2064 	 */
2065 	agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
2066 	available = (int)(pag->pagf_freeblks + agflcount -
2067 			  reservation - min_free - args->minleft);
2068 	if (available < (int)max(args->total, alloc_len))
2069 		return false;
2070 
2071 	/*
2072 	 * Clamp maxlen to the amount of free space available for the actual
2073 	 * extent allocation.
2074 	 */
2075 	if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
2076 		args->maxlen = available;
2077 		ASSERT(args->maxlen > 0);
2078 		ASSERT(args->maxlen >= args->minlen);
2079 	}
2080 
2081 	return true;
2082 }
2083 
2084 int
xfs_free_agfl_block(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t agbno,struct xfs_buf * agbp,struct xfs_owner_info * oinfo)2085 xfs_free_agfl_block(
2086 	struct xfs_trans	*tp,
2087 	xfs_agnumber_t		agno,
2088 	xfs_agblock_t		agbno,
2089 	struct xfs_buf		*agbp,
2090 	struct xfs_owner_info	*oinfo)
2091 {
2092 	int			error;
2093 	struct xfs_buf		*bp;
2094 
2095 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
2096 				   XFS_AG_RESV_AGFL);
2097 	if (error)
2098 		return error;
2099 
2100 	bp = xfs_btree_get_bufs(tp->t_mountp, tp, agno, agbno);
2101 	if (!bp) {
2102 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp);
2103 		return -EFSCORRUPTED;
2104 	}
2105 	xfs_trans_binval(tp, bp);
2106 
2107 	return 0;
2108 }
2109 
2110 /*
2111  * Check the agfl fields of the agf for inconsistency or corruption. The purpose
2112  * is to detect an agfl header padding mismatch between current and early v5
2113  * kernels. This problem manifests as a 1-slot size difference between the
2114  * on-disk flcount and the active [first, last] range of a wrapped agfl. This
2115  * may also catch variants of agfl count corruption unrelated to padding. Either
2116  * way, we'll reset the agfl and warn the user.
2117  *
2118  * Return true if a reset is required before the agfl can be used, false
2119  * otherwise.
2120  */
2121 static bool
xfs_agfl_needs_reset(struct xfs_mount * mp,struct xfs_agf * agf)2122 xfs_agfl_needs_reset(
2123 	struct xfs_mount	*mp,
2124 	struct xfs_agf		*agf)
2125 {
2126 	uint32_t		f = be32_to_cpu(agf->agf_flfirst);
2127 	uint32_t		l = be32_to_cpu(agf->agf_fllast);
2128 	uint32_t		c = be32_to_cpu(agf->agf_flcount);
2129 	int			agfl_size = xfs_agfl_size(mp);
2130 	int			active;
2131 
2132 	/* no agfl header on v4 supers */
2133 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2134 		return false;
2135 
2136 	/*
2137 	 * The agf read verifier catches severe corruption of these fields.
2138 	 * Repeat some sanity checks to cover a packed -> unpacked mismatch if
2139 	 * the verifier allows it.
2140 	 */
2141 	if (f >= agfl_size || l >= agfl_size)
2142 		return true;
2143 	if (c > agfl_size)
2144 		return true;
2145 
2146 	/*
2147 	 * Check consistency between the on-disk count and the active range. An
2148 	 * agfl padding mismatch manifests as an inconsistent flcount.
2149 	 */
2150 	if (c && l >= f)
2151 		active = l - f + 1;
2152 	else if (c)
2153 		active = agfl_size - f + l + 1;
2154 	else
2155 		active = 0;
2156 
2157 	return active != c;
2158 }
2159 
2160 /*
2161  * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
2162  * agfl content cannot be trusted. Warn the user that a repair is required to
2163  * recover leaked blocks.
2164  *
2165  * The purpose of this mechanism is to handle filesystems affected by the agfl
2166  * header padding mismatch problem. A reset keeps the filesystem online with a
2167  * relatively minor free space accounting inconsistency rather than suffer the
2168  * inevitable crash from use of an invalid agfl block.
2169  */
2170 static void
xfs_agfl_reset(struct xfs_trans * tp,struct xfs_buf * agbp,struct xfs_perag * pag)2171 xfs_agfl_reset(
2172 	struct xfs_trans	*tp,
2173 	struct xfs_buf		*agbp,
2174 	struct xfs_perag	*pag)
2175 {
2176 	struct xfs_mount	*mp = tp->t_mountp;
2177 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
2178 
2179 	ASSERT(pag->pagf_agflreset);
2180 	trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
2181 
2182 	xfs_warn(mp,
2183 	       "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
2184 	       "Please unmount and run xfs_repair.",
2185 	         pag->pag_agno, pag->pagf_flcount);
2186 
2187 	agf->agf_flfirst = 0;
2188 	agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
2189 	agf->agf_flcount = 0;
2190 	xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
2191 				    XFS_AGF_FLCOUNT);
2192 
2193 	pag->pagf_flcount = 0;
2194 	pag->pagf_agflreset = false;
2195 }
2196 
2197 /*
2198  * Defer an AGFL block free. This is effectively equivalent to
2199  * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
2200  *
2201  * Deferring AGFL frees helps prevent log reservation overruns due to too many
2202  * allocation operations in a transaction. AGFL frees are prone to this problem
2203  * because for one they are always freed one at a time. Further, an immediate
2204  * AGFL block free can cause a btree join and require another block free before
2205  * the real allocation can proceed. Deferring the free disconnects freeing up
2206  * the AGFL slot from freeing the block.
2207  */
2208 STATIC void
xfs_defer_agfl_block(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_fsblock_t agbno,struct xfs_owner_info * oinfo)2209 xfs_defer_agfl_block(
2210 	struct xfs_trans		*tp,
2211 	xfs_agnumber_t			agno,
2212 	xfs_fsblock_t			agbno,
2213 	struct xfs_owner_info		*oinfo)
2214 {
2215 	struct xfs_mount		*mp = tp->t_mountp;
2216 	struct xfs_extent_free_item	*new;		/* new element */
2217 
2218 	ASSERT(xfs_bmap_free_item_zone != NULL);
2219 	ASSERT(oinfo != NULL);
2220 
2221 	new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0);
2222 	new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
2223 	new->xefi_blockcount = 1;
2224 	new->xefi_oinfo = *oinfo;
2225 	new->xefi_skip_discard = false;
2226 
2227 	trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
2228 
2229 	xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
2230 }
2231 
2232 /*
2233  * Decide whether to use this allocation group for this allocation.
2234  * If so, fix up the btree freelist's size.
2235  */
2236 int			/* error */
xfs_alloc_fix_freelist(struct xfs_alloc_arg * args,int flags)2237 xfs_alloc_fix_freelist(
2238 	struct xfs_alloc_arg	*args,	/* allocation argument structure */
2239 	int			flags)	/* XFS_ALLOC_FLAG_... */
2240 {
2241 	struct xfs_mount	*mp = args->mp;
2242 	struct xfs_perag	*pag = args->pag;
2243 	struct xfs_trans	*tp = args->tp;
2244 	struct xfs_buf		*agbp = NULL;
2245 	struct xfs_buf		*agflbp = NULL;
2246 	struct xfs_alloc_arg	targs;	/* local allocation arguments */
2247 	xfs_agblock_t		bno;	/* freelist block */
2248 	xfs_extlen_t		need;	/* total blocks needed in freelist */
2249 	int			error = 0;
2250 
2251 	/* deferred ops (AGFL block frees) require permanent transactions */
2252 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
2253 
2254 	if (!pag->pagf_init) {
2255 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2256 		if (error)
2257 			goto out_no_agbp;
2258 		if (!pag->pagf_init) {
2259 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2260 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2261 			goto out_agbp_relse;
2262 		}
2263 	}
2264 
2265 	/*
2266 	 * If this is a metadata preferred pag and we are user data then try
2267 	 * somewhere else if we are not being asked to try harder at this
2268 	 * point
2269 	 */
2270 	if (pag->pagf_metadata && xfs_alloc_is_userdata(args->datatype) &&
2271 	    (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
2272 		ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2273 		goto out_agbp_relse;
2274 	}
2275 
2276 	need = xfs_alloc_min_freelist(mp, pag);
2277 	if (!xfs_alloc_space_available(args, need, flags |
2278 			XFS_ALLOC_FLAG_CHECK))
2279 		goto out_agbp_relse;
2280 
2281 	/*
2282 	 * Get the a.g. freespace buffer.
2283 	 * Can fail if we're not blocking on locks, and it's held.
2284 	 */
2285 	if (!agbp) {
2286 		error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
2287 		if (error)
2288 			goto out_no_agbp;
2289 		if (!agbp) {
2290 			ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
2291 			ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
2292 			goto out_no_agbp;
2293 		}
2294 	}
2295 
2296 	/* reset a padding mismatched agfl before final free space check */
2297 	if (pag->pagf_agflreset)
2298 		xfs_agfl_reset(tp, agbp, pag);
2299 
2300 	/* If there isn't enough total space or single-extent, reject it. */
2301 	need = xfs_alloc_min_freelist(mp, pag);
2302 	if (!xfs_alloc_space_available(args, need, flags))
2303 		goto out_agbp_relse;
2304 
2305 	/*
2306 	 * Make the freelist shorter if it's too long.
2307 	 *
2308 	 * Note that from this point onwards, we will always release the agf and
2309 	 * agfl buffers on error. This handles the case where we error out and
2310 	 * the buffers are clean or may not have been joined to the transaction
2311 	 * and hence need to be released manually. If they have been joined to
2312 	 * the transaction, then xfs_trans_brelse() will handle them
2313 	 * appropriately based on the recursion count and dirty state of the
2314 	 * buffer.
2315 	 *
2316 	 * XXX (dgc): When we have lots of free space, does this buy us
2317 	 * anything other than extra overhead when we need to put more blocks
2318 	 * back on the free list? Maybe we should only do this when space is
2319 	 * getting low or the AGFL is more than half full?
2320 	 *
2321 	 * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
2322 	 * big; the NORMAP flag prevents AGFL expand/shrink operations from
2323 	 * updating the rmapbt.  Both flags are used in xfs_repair while we're
2324 	 * rebuilding the rmapbt, and neither are used by the kernel.  They're
2325 	 * both required to ensure that rmaps are correctly recorded for the
2326 	 * regenerated AGFL, bnobt, and cntbt.  See repair/phase5.c and
2327 	 * repair/rmap.c in xfsprogs for details.
2328 	 */
2329 	memset(&targs, 0, sizeof(targs));
2330 	/* struct copy below */
2331 	if (flags & XFS_ALLOC_FLAG_NORMAP)
2332 		targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
2333 	else
2334 		targs.oinfo = XFS_RMAP_OINFO_AG;
2335 	while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
2336 		error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
2337 		if (error)
2338 			goto out_agbp_relse;
2339 
2340 		/* defer agfl frees */
2341 		xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
2342 	}
2343 
2344 	targs.tp = tp;
2345 	targs.mp = mp;
2346 	targs.agbp = agbp;
2347 	targs.agno = args->agno;
2348 	targs.alignment = targs.minlen = targs.prod = 1;
2349 	targs.type = XFS_ALLOCTYPE_THIS_AG;
2350 	targs.pag = pag;
2351 	error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
2352 	if (error)
2353 		goto out_agbp_relse;
2354 
2355 	/* Make the freelist longer if it's too short. */
2356 	while (pag->pagf_flcount < need) {
2357 		targs.agbno = 0;
2358 		targs.maxlen = need - pag->pagf_flcount;
2359 		targs.resv = XFS_AG_RESV_AGFL;
2360 
2361 		/* Allocate as many blocks as possible at once. */
2362 		error = xfs_alloc_ag_vextent(&targs);
2363 		if (error)
2364 			goto out_agflbp_relse;
2365 
2366 		/*
2367 		 * Stop if we run out.  Won't happen if callers are obeying
2368 		 * the restrictions correctly.  Can happen for free calls
2369 		 * on a completely full ag.
2370 		 */
2371 		if (targs.agbno == NULLAGBLOCK) {
2372 			if (flags & XFS_ALLOC_FLAG_FREEING)
2373 				break;
2374 			goto out_agflbp_relse;
2375 		}
2376 		/*
2377 		 * Put each allocated block on the list.
2378 		 */
2379 		for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
2380 			error = xfs_alloc_put_freelist(tp, agbp,
2381 							agflbp, bno, 0);
2382 			if (error)
2383 				goto out_agflbp_relse;
2384 		}
2385 	}
2386 	xfs_trans_brelse(tp, agflbp);
2387 	args->agbp = agbp;
2388 	return 0;
2389 
2390 out_agflbp_relse:
2391 	xfs_trans_brelse(tp, agflbp);
2392 out_agbp_relse:
2393 	if (agbp)
2394 		xfs_trans_brelse(tp, agbp);
2395 out_no_agbp:
2396 	args->agbp = NULL;
2397 	return error;
2398 }
2399 
2400 /*
2401  * Get a block from the freelist.
2402  * Returns with the buffer for the block gotten.
2403  */
2404 int				/* error */
xfs_alloc_get_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agblock_t * bnop,int btreeblk)2405 xfs_alloc_get_freelist(
2406 	xfs_trans_t	*tp,	/* transaction pointer */
2407 	xfs_buf_t	*agbp,	/* buffer containing the agf structure */
2408 	xfs_agblock_t	*bnop,	/* block address retrieved from freelist */
2409 	int		btreeblk) /* destination is a AGF btree */
2410 {
2411 	xfs_agf_t	*agf;	/* a.g. freespace structure */
2412 	xfs_buf_t	*agflbp;/* buffer for a.g. freelist structure */
2413 	xfs_agblock_t	bno;	/* block number returned */
2414 	__be32		*agfl_bno;
2415 	int		error;
2416 	int		logflags;
2417 	xfs_mount_t	*mp = tp->t_mountp;
2418 	xfs_perag_t	*pag;	/* per allocation group data */
2419 
2420 	/*
2421 	 * Freelist is empty, give up.
2422 	 */
2423 	agf = XFS_BUF_TO_AGF(agbp);
2424 	if (!agf->agf_flcount) {
2425 		*bnop = NULLAGBLOCK;
2426 		return 0;
2427 	}
2428 	/*
2429 	 * Read the array of free blocks.
2430 	 */
2431 	error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
2432 				    &agflbp);
2433 	if (error)
2434 		return error;
2435 
2436 
2437 	/*
2438 	 * Get the block number and update the data structures.
2439 	 */
2440 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2441 	bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
2442 	be32_add_cpu(&agf->agf_flfirst, 1);
2443 	xfs_trans_brelse(tp, agflbp);
2444 	if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
2445 		agf->agf_flfirst = 0;
2446 
2447 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2448 	ASSERT(!pag->pagf_agflreset);
2449 	be32_add_cpu(&agf->agf_flcount, -1);
2450 	xfs_trans_agflist_delta(tp, -1);
2451 	pag->pagf_flcount--;
2452 
2453 	logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
2454 	if (btreeblk) {
2455 		be32_add_cpu(&agf->agf_btreeblks, 1);
2456 		pag->pagf_btreeblks++;
2457 		logflags |= XFS_AGF_BTREEBLKS;
2458 	}
2459 	xfs_perag_put(pag);
2460 
2461 	xfs_alloc_log_agf(tp, agbp, logflags);
2462 	*bnop = bno;
2463 
2464 	return 0;
2465 }
2466 
2467 /*
2468  * Log the given fields from the agf structure.
2469  */
2470 void
xfs_alloc_log_agf(xfs_trans_t * tp,xfs_buf_t * bp,int fields)2471 xfs_alloc_log_agf(
2472 	xfs_trans_t	*tp,	/* transaction pointer */
2473 	xfs_buf_t	*bp,	/* buffer for a.g. freelist header */
2474 	int		fields)	/* mask of fields to be logged (XFS_AGF_...) */
2475 {
2476 	int	first;		/* first byte offset */
2477 	int	last;		/* last byte offset */
2478 	static const short	offsets[] = {
2479 		offsetof(xfs_agf_t, agf_magicnum),
2480 		offsetof(xfs_agf_t, agf_versionnum),
2481 		offsetof(xfs_agf_t, agf_seqno),
2482 		offsetof(xfs_agf_t, agf_length),
2483 		offsetof(xfs_agf_t, agf_roots[0]),
2484 		offsetof(xfs_agf_t, agf_levels[0]),
2485 		offsetof(xfs_agf_t, agf_flfirst),
2486 		offsetof(xfs_agf_t, agf_fllast),
2487 		offsetof(xfs_agf_t, agf_flcount),
2488 		offsetof(xfs_agf_t, agf_freeblks),
2489 		offsetof(xfs_agf_t, agf_longest),
2490 		offsetof(xfs_agf_t, agf_btreeblks),
2491 		offsetof(xfs_agf_t, agf_uuid),
2492 		offsetof(xfs_agf_t, agf_rmap_blocks),
2493 		offsetof(xfs_agf_t, agf_refcount_blocks),
2494 		offsetof(xfs_agf_t, agf_refcount_root),
2495 		offsetof(xfs_agf_t, agf_refcount_level),
2496 		/* needed so that we don't log the whole rest of the structure: */
2497 		offsetof(xfs_agf_t, agf_spare64),
2498 		sizeof(xfs_agf_t)
2499 	};
2500 
2501 	trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2502 
2503 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
2504 
2505 	xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2506 	xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2507 }
2508 
2509 /*
2510  * Interface for inode allocation to force the pag data to be initialized.
2511  */
2512 int					/* error */
xfs_alloc_pagf_init(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,int flags)2513 xfs_alloc_pagf_init(
2514 	xfs_mount_t		*mp,	/* file system mount structure */
2515 	xfs_trans_t		*tp,	/* transaction pointer */
2516 	xfs_agnumber_t		agno,	/* allocation group number */
2517 	int			flags)	/* XFS_ALLOC_FLAGS_... */
2518 {
2519 	xfs_buf_t		*bp;
2520 	int			error;
2521 
2522 	if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2523 		return error;
2524 	if (bp)
2525 		xfs_trans_brelse(tp, bp);
2526 	return 0;
2527 }
2528 
2529 /*
2530  * Put the block on the freelist for the allocation group.
2531  */
2532 int					/* error */
xfs_alloc_put_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_buf_t * agflbp,xfs_agblock_t bno,int btreeblk)2533 xfs_alloc_put_freelist(
2534 	xfs_trans_t		*tp,	/* transaction pointer */
2535 	xfs_buf_t		*agbp,	/* buffer for a.g. freelist header */
2536 	xfs_buf_t		*agflbp,/* buffer for a.g. free block array */
2537 	xfs_agblock_t		bno,	/* block being freed */
2538 	int			btreeblk) /* block came from a AGF btree */
2539 {
2540 	xfs_agf_t		*agf;	/* a.g. freespace structure */
2541 	__be32			*blockp;/* pointer to array entry */
2542 	int			error;
2543 	int			logflags;
2544 	xfs_mount_t		*mp;	/* mount structure */
2545 	xfs_perag_t		*pag;	/* per allocation group data */
2546 	__be32			*agfl_bno;
2547 	int			startoff;
2548 
2549 	agf = XFS_BUF_TO_AGF(agbp);
2550 	mp = tp->t_mountp;
2551 
2552 	if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2553 			be32_to_cpu(agf->agf_seqno), &agflbp)))
2554 		return error;
2555 	be32_add_cpu(&agf->agf_fllast, 1);
2556 	if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
2557 		agf->agf_fllast = 0;
2558 
2559 	pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2560 	ASSERT(!pag->pagf_agflreset);
2561 	be32_add_cpu(&agf->agf_flcount, 1);
2562 	xfs_trans_agflist_delta(tp, 1);
2563 	pag->pagf_flcount++;
2564 
2565 	logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2566 	if (btreeblk) {
2567 		be32_add_cpu(&agf->agf_btreeblks, -1);
2568 		pag->pagf_btreeblks--;
2569 		logflags |= XFS_AGF_BTREEBLKS;
2570 	}
2571 	xfs_perag_put(pag);
2572 
2573 	xfs_alloc_log_agf(tp, agbp, logflags);
2574 
2575 	ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
2576 
2577 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
2578 	blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
2579 	*blockp = cpu_to_be32(bno);
2580 	startoff = (char *)blockp - (char *)agflbp->b_addr;
2581 
2582 	xfs_alloc_log_agf(tp, agbp, logflags);
2583 
2584 	xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
2585 	xfs_trans_log_buf(tp, agflbp, startoff,
2586 			  startoff + sizeof(xfs_agblock_t) - 1);
2587 	return 0;
2588 }
2589 
2590 static xfs_failaddr_t
xfs_agf_verify(struct xfs_buf * bp)2591 xfs_agf_verify(
2592 	struct xfs_buf		*bp)
2593 {
2594 	struct xfs_mount	*mp = bp->b_mount;
2595 	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
2596 
2597 	if (xfs_sb_version_hascrc(&mp->m_sb)) {
2598 		if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
2599 			return __this_address;
2600 		if (!xfs_log_check_lsn(mp,
2601 				be64_to_cpu(XFS_BUF_TO_AGF(bp)->agf_lsn)))
2602 			return __this_address;
2603 	}
2604 
2605 	if (!xfs_verify_magic(bp, agf->agf_magicnum))
2606 		return __this_address;
2607 
2608 	if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2609 	      be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2610 	      be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
2611 	      be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
2612 	      be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
2613 		return __this_address;
2614 
2615 	if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
2616 		return __this_address;
2617 
2618 	if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
2619 	    be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
2620 		return __this_address;
2621 
2622 	if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
2623 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
2624 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
2625 	    be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
2626 		return __this_address;
2627 
2628 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2629 	    (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
2630 	     be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
2631 		return __this_address;
2632 
2633 	if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
2634 	    be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
2635 		return __this_address;
2636 
2637 	/*
2638 	 * during growfs operations, the perag is not fully initialised,
2639 	 * so we can't use it for any useful checking. growfs ensures we can't
2640 	 * use it by using uncached buffers that don't have the perag attached
2641 	 * so we can detect and avoid this problem.
2642 	 */
2643 	if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
2644 		return __this_address;
2645 
2646 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
2647 	    be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
2648 		return __this_address;
2649 
2650 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2651 	    be32_to_cpu(agf->agf_refcount_blocks) >
2652 	    be32_to_cpu(agf->agf_length))
2653 		return __this_address;
2654 
2655 	if (xfs_sb_version_hasreflink(&mp->m_sb) &&
2656 	    (be32_to_cpu(agf->agf_refcount_level) < 1 ||
2657 	     be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
2658 		return __this_address;
2659 
2660 	return NULL;
2661 
2662 }
2663 
2664 static void
xfs_agf_read_verify(struct xfs_buf * bp)2665 xfs_agf_read_verify(
2666 	struct xfs_buf	*bp)
2667 {
2668 	struct xfs_mount *mp = bp->b_mount;
2669 	xfs_failaddr_t	fa;
2670 
2671 	if (xfs_sb_version_hascrc(&mp->m_sb) &&
2672 	    !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
2673 		xfs_verifier_error(bp, -EFSBADCRC, __this_address);
2674 	else {
2675 		fa = xfs_agf_verify(bp);
2676 		if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
2677 			xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2678 	}
2679 }
2680 
2681 static void
xfs_agf_write_verify(struct xfs_buf * bp)2682 xfs_agf_write_verify(
2683 	struct xfs_buf	*bp)
2684 {
2685 	struct xfs_mount	*mp = bp->b_mount;
2686 	struct xfs_buf_log_item	*bip = bp->b_log_item;
2687 	xfs_failaddr_t		fa;
2688 
2689 	fa = xfs_agf_verify(bp);
2690 	if (fa) {
2691 		xfs_verifier_error(bp, -EFSCORRUPTED, fa);
2692 		return;
2693 	}
2694 
2695 	if (!xfs_sb_version_hascrc(&mp->m_sb))
2696 		return;
2697 
2698 	if (bip)
2699 		XFS_BUF_TO_AGF(bp)->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
2700 
2701 	xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
2702 }
2703 
2704 const struct xfs_buf_ops xfs_agf_buf_ops = {
2705 	.name = "xfs_agf",
2706 	.magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
2707 	.verify_read = xfs_agf_read_verify,
2708 	.verify_write = xfs_agf_write_verify,
2709 	.verify_struct = xfs_agf_verify,
2710 };
2711 
2712 /*
2713  * Read in the allocation group header (free/alloc section).
2714  */
2715 int					/* error */
xfs_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2716 xfs_read_agf(
2717 	struct xfs_mount	*mp,	/* mount point structure */
2718 	struct xfs_trans	*tp,	/* transaction pointer */
2719 	xfs_agnumber_t		agno,	/* allocation group number */
2720 	int			flags,	/* XFS_BUF_ */
2721 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2722 {
2723 	int		error;
2724 
2725 	trace_xfs_read_agf(mp, agno);
2726 
2727 	ASSERT(agno != NULLAGNUMBER);
2728 	error = xfs_trans_read_buf(
2729 			mp, tp, mp->m_ddev_targp,
2730 			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2731 			XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
2732 	if (error)
2733 		return error;
2734 	if (!*bpp)
2735 		return 0;
2736 
2737 	ASSERT(!(*bpp)->b_error);
2738 	xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2739 	return 0;
2740 }
2741 
2742 /*
2743  * Read in the allocation group header (free/alloc section).
2744  */
2745 int					/* error */
xfs_alloc_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2746 xfs_alloc_read_agf(
2747 	struct xfs_mount	*mp,	/* mount point structure */
2748 	struct xfs_trans	*tp,	/* transaction pointer */
2749 	xfs_agnumber_t		agno,	/* allocation group number */
2750 	int			flags,	/* XFS_ALLOC_FLAG_... */
2751 	struct xfs_buf		**bpp)	/* buffer for the ag freelist header */
2752 {
2753 	struct xfs_agf		*agf;		/* ag freelist header */
2754 	struct xfs_perag	*pag;		/* per allocation group data */
2755 	int			error;
2756 
2757 	trace_xfs_alloc_read_agf(mp, agno);
2758 
2759 	ASSERT(agno != NULLAGNUMBER);
2760 	error = xfs_read_agf(mp, tp, agno,
2761 			(flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2762 			bpp);
2763 	if (error)
2764 		return error;
2765 	if (!*bpp)
2766 		return 0;
2767 	ASSERT(!(*bpp)->b_error);
2768 
2769 	agf = XFS_BUF_TO_AGF(*bpp);
2770 	pag = xfs_perag_get(mp, agno);
2771 	if (!pag->pagf_init) {
2772 		pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2773 		pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2774 		pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2775 		pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2776 		pag->pagf_levels[XFS_BTNUM_BNOi] =
2777 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2778 		pag->pagf_levels[XFS_BTNUM_CNTi] =
2779 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2780 		pag->pagf_levels[XFS_BTNUM_RMAPi] =
2781 			be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
2782 		pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
2783 		pag->pagf_init = 1;
2784 		pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
2785 	}
2786 #ifdef DEBUG
2787 	else if (!XFS_FORCED_SHUTDOWN(mp)) {
2788 		ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2789 		ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2790 		ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2791 		ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2792 		ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2793 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2794 		ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2795 		       be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2796 	}
2797 #endif
2798 	xfs_perag_put(pag);
2799 	return 0;
2800 }
2801 
2802 /*
2803  * Allocate an extent (variable-size).
2804  * Depending on the allocation type, we either look in a single allocation
2805  * group or loop over the allocation groups to find the result.
2806  */
2807 int				/* error */
xfs_alloc_vextent(struct xfs_alloc_arg * args)2808 xfs_alloc_vextent(
2809 	struct xfs_alloc_arg	*args)	/* allocation argument structure */
2810 {
2811 	xfs_agblock_t		agsize;	/* allocation group size */
2812 	int			error;
2813 	int			flags;	/* XFS_ALLOC_FLAG_... locking flags */
2814 	struct xfs_mount	*mp;	/* mount structure pointer */
2815 	xfs_agnumber_t		sagno;	/* starting allocation group number */
2816 	xfs_alloctype_t		type;	/* input allocation type */
2817 	int			bump_rotor = 0;
2818 	xfs_agnumber_t		rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2819 
2820 	mp = args->mp;
2821 	type = args->otype = args->type;
2822 	args->agbno = NULLAGBLOCK;
2823 	/*
2824 	 * Just fix this up, for the case where the last a.g. is shorter
2825 	 * (or there's only one a.g.) and the caller couldn't easily figure
2826 	 * that out (xfs_bmap_alloc).
2827 	 */
2828 	agsize = mp->m_sb.sb_agblocks;
2829 	if (args->maxlen > agsize)
2830 		args->maxlen = agsize;
2831 	if (args->alignment == 0)
2832 		args->alignment = 1;
2833 	ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2834 	ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2835 	ASSERT(args->minlen <= args->maxlen);
2836 	ASSERT(args->minlen <= agsize);
2837 	ASSERT(args->mod < args->prod);
2838 	if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2839 	    XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2840 	    args->minlen > args->maxlen || args->minlen > agsize ||
2841 	    args->mod >= args->prod) {
2842 		args->fsbno = NULLFSBLOCK;
2843 		trace_xfs_alloc_vextent_badargs(args);
2844 		return 0;
2845 	}
2846 
2847 	switch (type) {
2848 	case XFS_ALLOCTYPE_THIS_AG:
2849 	case XFS_ALLOCTYPE_NEAR_BNO:
2850 	case XFS_ALLOCTYPE_THIS_BNO:
2851 		/*
2852 		 * These three force us into a single a.g.
2853 		 */
2854 		args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2855 		args->pag = xfs_perag_get(mp, args->agno);
2856 		error = xfs_alloc_fix_freelist(args, 0);
2857 		if (error) {
2858 			trace_xfs_alloc_vextent_nofix(args);
2859 			goto error0;
2860 		}
2861 		if (!args->agbp) {
2862 			trace_xfs_alloc_vextent_noagbp(args);
2863 			break;
2864 		}
2865 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2866 		if ((error = xfs_alloc_ag_vextent(args)))
2867 			goto error0;
2868 		break;
2869 	case XFS_ALLOCTYPE_START_BNO:
2870 		/*
2871 		 * Try near allocation first, then anywhere-in-ag after
2872 		 * the first a.g. fails.
2873 		 */
2874 		if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
2875 		    (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2876 			args->fsbno = XFS_AGB_TO_FSB(mp,
2877 					((mp->m_agfrotor / rotorstep) %
2878 					mp->m_sb.sb_agcount), 0);
2879 			bump_rotor = 1;
2880 		}
2881 		args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2882 		args->type = XFS_ALLOCTYPE_NEAR_BNO;
2883 		/* FALLTHROUGH */
2884 	case XFS_ALLOCTYPE_FIRST_AG:
2885 		/*
2886 		 * Rotate through the allocation groups looking for a winner.
2887 		 */
2888 		if (type == XFS_ALLOCTYPE_FIRST_AG) {
2889 			/*
2890 			 * Start with allocation group given by bno.
2891 			 */
2892 			args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2893 			args->type = XFS_ALLOCTYPE_THIS_AG;
2894 			sagno = 0;
2895 			flags = 0;
2896 		} else {
2897 			/*
2898 			 * Start with the given allocation group.
2899 			 */
2900 			args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2901 			flags = XFS_ALLOC_FLAG_TRYLOCK;
2902 		}
2903 		/*
2904 		 * Loop over allocation groups twice; first time with
2905 		 * trylock set, second time without.
2906 		 */
2907 		for (;;) {
2908 			args->pag = xfs_perag_get(mp, args->agno);
2909 			error = xfs_alloc_fix_freelist(args, flags);
2910 			if (error) {
2911 				trace_xfs_alloc_vextent_nofix(args);
2912 				goto error0;
2913 			}
2914 			/*
2915 			 * If we get a buffer back then the allocation will fly.
2916 			 */
2917 			if (args->agbp) {
2918 				if ((error = xfs_alloc_ag_vextent(args)))
2919 					goto error0;
2920 				break;
2921 			}
2922 
2923 			trace_xfs_alloc_vextent_loopfailed(args);
2924 
2925 			/*
2926 			 * Didn't work, figure out the next iteration.
2927 			 */
2928 			if (args->agno == sagno &&
2929 			    type == XFS_ALLOCTYPE_START_BNO)
2930 				args->type = XFS_ALLOCTYPE_THIS_AG;
2931 			/*
2932 			* For the first allocation, we can try any AG to get
2933 			* space.  However, if we already have allocated a
2934 			* block, we don't want to try AGs whose number is below
2935 			* sagno. Otherwise, we may end up with out-of-order
2936 			* locking of AGF, which might cause deadlock.
2937 			*/
2938 			if (++(args->agno) == mp->m_sb.sb_agcount) {
2939 				if (args->tp->t_firstblock != NULLFSBLOCK)
2940 					args->agno = sagno;
2941 				else
2942 					args->agno = 0;
2943 			}
2944 			/*
2945 			 * Reached the starting a.g., must either be done
2946 			 * or switch to non-trylock mode.
2947 			 */
2948 			if (args->agno == sagno) {
2949 				if (flags == 0) {
2950 					args->agbno = NULLAGBLOCK;
2951 					trace_xfs_alloc_vextent_allfailed(args);
2952 					break;
2953 				}
2954 
2955 				flags = 0;
2956 				if (type == XFS_ALLOCTYPE_START_BNO) {
2957 					args->agbno = XFS_FSB_TO_AGBNO(mp,
2958 						args->fsbno);
2959 					args->type = XFS_ALLOCTYPE_NEAR_BNO;
2960 				}
2961 			}
2962 			xfs_perag_put(args->pag);
2963 		}
2964 		if (bump_rotor) {
2965 			if (args->agno == sagno)
2966 				mp->m_agfrotor = (mp->m_agfrotor + 1) %
2967 					(mp->m_sb.sb_agcount * rotorstep);
2968 			else
2969 				mp->m_agfrotor = (args->agno * rotorstep + 1) %
2970 					(mp->m_sb.sb_agcount * rotorstep);
2971 		}
2972 		break;
2973 	default:
2974 		ASSERT(0);
2975 		/* NOTREACHED */
2976 	}
2977 	if (args->agbno == NULLAGBLOCK)
2978 		args->fsbno = NULLFSBLOCK;
2979 	else {
2980 		args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2981 #ifdef DEBUG
2982 		ASSERT(args->len >= args->minlen);
2983 		ASSERT(args->len <= args->maxlen);
2984 		ASSERT(args->agbno % args->alignment == 0);
2985 		XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2986 			args->len);
2987 #endif
2988 
2989 		/* Zero the extent if we were asked to do so */
2990 		if (args->datatype & XFS_ALLOC_USERDATA_ZERO) {
2991 			error = xfs_zero_extent(args->ip, args->fsbno, args->len);
2992 			if (error)
2993 				goto error0;
2994 		}
2995 
2996 	}
2997 	xfs_perag_put(args->pag);
2998 	return 0;
2999 error0:
3000 	xfs_perag_put(args->pag);
3001 	return error;
3002 }
3003 
3004 /* Ensure that the freelist is at full capacity. */
3005 int
xfs_free_extent_fix_freelist(struct xfs_trans * tp,xfs_agnumber_t agno,struct xfs_buf ** agbp)3006 xfs_free_extent_fix_freelist(
3007 	struct xfs_trans	*tp,
3008 	xfs_agnumber_t		agno,
3009 	struct xfs_buf		**agbp)
3010 {
3011 	struct xfs_alloc_arg	args;
3012 	int			error;
3013 
3014 	memset(&args, 0, sizeof(struct xfs_alloc_arg));
3015 	args.tp = tp;
3016 	args.mp = tp->t_mountp;
3017 	args.agno = agno;
3018 
3019 	/*
3020 	 * validate that the block number is legal - the enables us to detect
3021 	 * and handle a silent filesystem corruption rather than crashing.
3022 	 */
3023 	if (args.agno >= args.mp->m_sb.sb_agcount)
3024 		return -EFSCORRUPTED;
3025 
3026 	args.pag = xfs_perag_get(args.mp, args.agno);
3027 	ASSERT(args.pag);
3028 
3029 	error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
3030 	if (error)
3031 		goto out;
3032 
3033 	*agbp = args.agbp;
3034 out:
3035 	xfs_perag_put(args.pag);
3036 	return error;
3037 }
3038 
3039 /*
3040  * Free an extent.
3041  * Just break up the extent address and hand off to xfs_free_ag_extent
3042  * after fixing up the freelist.
3043  */
3044 int
__xfs_free_extent(struct xfs_trans * tp,xfs_fsblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type,bool skip_discard)3045 __xfs_free_extent(
3046 	struct xfs_trans		*tp,
3047 	xfs_fsblock_t			bno,
3048 	xfs_extlen_t			len,
3049 	const struct xfs_owner_info	*oinfo,
3050 	enum xfs_ag_resv_type		type,
3051 	bool				skip_discard)
3052 {
3053 	struct xfs_mount		*mp = tp->t_mountp;
3054 	struct xfs_buf			*agbp;
3055 	xfs_agnumber_t			agno = XFS_FSB_TO_AGNO(mp, bno);
3056 	xfs_agblock_t			agbno = XFS_FSB_TO_AGBNO(mp, bno);
3057 	int				error;
3058 	unsigned int			busy_flags = 0;
3059 
3060 	ASSERT(len != 0);
3061 	ASSERT(type != XFS_AG_RESV_AGFL);
3062 
3063 	if (XFS_TEST_ERROR(false, mp,
3064 			XFS_ERRTAG_FREE_EXTENT))
3065 		return -EIO;
3066 
3067 	error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
3068 	if (error)
3069 		return error;
3070 
3071 	XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
3072 
3073 	/* validate the extent size is legal now we have the agf locked */
3074 	XFS_WANT_CORRUPTED_GOTO(mp,
3075 		agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
3076 				err);
3077 
3078 	error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
3079 	if (error)
3080 		goto err;
3081 
3082 	if (skip_discard)
3083 		busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
3084 	xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
3085 	return 0;
3086 
3087 err:
3088 	xfs_trans_brelse(tp, agbp);
3089 	return error;
3090 }
3091 
3092 struct xfs_alloc_query_range_info {
3093 	xfs_alloc_query_range_fn	fn;
3094 	void				*priv;
3095 };
3096 
3097 /* Format btree record and pass to our callback. */
3098 STATIC int
xfs_alloc_query_range_helper(struct xfs_btree_cur * cur,union xfs_btree_rec * rec,void * priv)3099 xfs_alloc_query_range_helper(
3100 	struct xfs_btree_cur		*cur,
3101 	union xfs_btree_rec		*rec,
3102 	void				*priv)
3103 {
3104 	struct xfs_alloc_query_range_info	*query = priv;
3105 	struct xfs_alloc_rec_incore		irec;
3106 
3107 	irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
3108 	irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
3109 	return query->fn(cur, &irec, query->priv);
3110 }
3111 
3112 /* Find all free space within a given range of blocks. */
3113 int
xfs_alloc_query_range(struct xfs_btree_cur * cur,struct xfs_alloc_rec_incore * low_rec,struct xfs_alloc_rec_incore * high_rec,xfs_alloc_query_range_fn fn,void * priv)3114 xfs_alloc_query_range(
3115 	struct xfs_btree_cur			*cur,
3116 	struct xfs_alloc_rec_incore		*low_rec,
3117 	struct xfs_alloc_rec_incore		*high_rec,
3118 	xfs_alloc_query_range_fn		fn,
3119 	void					*priv)
3120 {
3121 	union xfs_btree_irec			low_brec;
3122 	union xfs_btree_irec			high_brec;
3123 	struct xfs_alloc_query_range_info	query;
3124 
3125 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3126 	low_brec.a = *low_rec;
3127 	high_brec.a = *high_rec;
3128 	query.priv = priv;
3129 	query.fn = fn;
3130 	return xfs_btree_query_range(cur, &low_brec, &high_brec,
3131 			xfs_alloc_query_range_helper, &query);
3132 }
3133 
3134 /* Find all free space records. */
3135 int
xfs_alloc_query_all(struct xfs_btree_cur * cur,xfs_alloc_query_range_fn fn,void * priv)3136 xfs_alloc_query_all(
3137 	struct xfs_btree_cur			*cur,
3138 	xfs_alloc_query_range_fn		fn,
3139 	void					*priv)
3140 {
3141 	struct xfs_alloc_query_range_info	query;
3142 
3143 	ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
3144 	query.priv = priv;
3145 	query.fn = fn;
3146 	return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
3147 }
3148 
3149 /* Is there a record covering a given extent? */
3150 int
xfs_alloc_has_record(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,bool * exists)3151 xfs_alloc_has_record(
3152 	struct xfs_btree_cur	*cur,
3153 	xfs_agblock_t		bno,
3154 	xfs_extlen_t		len,
3155 	bool			*exists)
3156 {
3157 	union xfs_btree_irec	low;
3158 	union xfs_btree_irec	high;
3159 
3160 	memset(&low, 0, sizeof(low));
3161 	low.a.ar_startblock = bno;
3162 	memset(&high, 0xFF, sizeof(high));
3163 	high.a.ar_startblock = bno + len - 1;
3164 
3165 	return xfs_btree_has_record(cur, &low, &high, exists);
3166 }
3167 
3168 /*
3169  * Walk all the blocks in the AGFL.  The @walk_fn can return any negative
3170  * error code or XFS_ITER_*.
3171  */
3172 int
xfs_agfl_walk(struct xfs_mount * mp,struct xfs_agf * agf,struct xfs_buf * agflbp,xfs_agfl_walk_fn walk_fn,void * priv)3173 xfs_agfl_walk(
3174 	struct xfs_mount	*mp,
3175 	struct xfs_agf		*agf,
3176 	struct xfs_buf		*agflbp,
3177 	xfs_agfl_walk_fn	walk_fn,
3178 	void			*priv)
3179 {
3180 	__be32			*agfl_bno;
3181 	unsigned int		i;
3182 	int			error;
3183 
3184 	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, agflbp);
3185 	i = be32_to_cpu(agf->agf_flfirst);
3186 
3187 	/* Nothing to walk in an empty AGFL. */
3188 	if (agf->agf_flcount == cpu_to_be32(0))
3189 		return 0;
3190 
3191 	/* Otherwise, walk from first to last, wrapping as needed. */
3192 	for (;;) {
3193 		error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
3194 		if (error)
3195 			return error;
3196 		if (i == be32_to_cpu(agf->agf_fllast))
3197 			break;
3198 		if (++i == xfs_agfl_size(mp))
3199 			i = 0;
3200 	}
3201 
3202 	return 0;
3203 }
3204