1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2018 Red Hat, Inc.
5 * All rights reserved.
6 */
7
8 #include "xfs.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_sb.h"
15 #include "xfs_mount.h"
16 #include "xfs_btree.h"
17 #include "xfs_alloc_btree.h"
18 #include "xfs_rmap_btree.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_rmap.h"
22 #include "xfs_ag.h"
23 #include "xfs_ag_resv.h"
24 #include "xfs_health.h"
25 #include "xfs_error.h"
26 #include "xfs_bmap.h"
27 #include "xfs_defer.h"
28 #include "xfs_log_format.h"
29 #include "xfs_trans.h"
30 #include "xfs_trace.h"
31 #include "xfs_inode.h"
32 #include "xfs_icache.h"
33
34
35 /*
36 * Passive reference counting access wrappers to the perag structures. If the
37 * per-ag structure is to be freed, the freeing code is responsible for cleaning
38 * up objects with passive references before freeing the structure. This is
39 * things like cached buffers.
40 */
41 struct xfs_perag *
xfs_perag_get(struct xfs_mount * mp,xfs_agnumber_t agno)42 xfs_perag_get(
43 struct xfs_mount *mp,
44 xfs_agnumber_t agno)
45 {
46 struct xfs_perag *pag;
47 int ref = 0;
48
49 rcu_read_lock();
50 pag = radix_tree_lookup(&mp->m_perag_tree, agno);
51 if (pag) {
52 ASSERT(atomic_read(&pag->pag_ref) >= 0);
53 ref = atomic_inc_return(&pag->pag_ref);
54 }
55 rcu_read_unlock();
56 trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
57 return pag;
58 }
59
60 /*
61 * search from @first to find the next perag with the given tag set.
62 */
63 struct xfs_perag *
xfs_perag_get_tag(struct xfs_mount * mp,xfs_agnumber_t first,unsigned int tag)64 xfs_perag_get_tag(
65 struct xfs_mount *mp,
66 xfs_agnumber_t first,
67 unsigned int tag)
68 {
69 struct xfs_perag *pag;
70 int found;
71 int ref;
72
73 rcu_read_lock();
74 found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
75 (void **)&pag, first, 1, tag);
76 if (found <= 0) {
77 rcu_read_unlock();
78 return NULL;
79 }
80 ref = atomic_inc_return(&pag->pag_ref);
81 rcu_read_unlock();
82 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_);
83 return pag;
84 }
85
86 void
xfs_perag_put(struct xfs_perag * pag)87 xfs_perag_put(
88 struct xfs_perag *pag)
89 {
90 int ref;
91
92 ASSERT(atomic_read(&pag->pag_ref) > 0);
93 ref = atomic_dec_return(&pag->pag_ref);
94 trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
95 }
96
97 /*
98 * xfs_initialize_perag_data
99 *
100 * Read in each per-ag structure so we can count up the number of
101 * allocated inodes, free inodes and used filesystem blocks as this
102 * information is no longer persistent in the superblock. Once we have
103 * this information, write it into the in-core superblock structure.
104 */
105 int
xfs_initialize_perag_data(struct xfs_mount * mp,xfs_agnumber_t agcount)106 xfs_initialize_perag_data(
107 struct xfs_mount *mp,
108 xfs_agnumber_t agcount)
109 {
110 xfs_agnumber_t index;
111 struct xfs_perag *pag;
112 struct xfs_sb *sbp = &mp->m_sb;
113 uint64_t ifree = 0;
114 uint64_t ialloc = 0;
115 uint64_t bfree = 0;
116 uint64_t bfreelst = 0;
117 uint64_t btree = 0;
118 uint64_t fdblocks;
119 int error = 0;
120
121 for (index = 0; index < agcount; index++) {
122 /*
123 * read the agf, then the agi. This gets us
124 * all the information we need and populates the
125 * per-ag structures for us.
126 */
127 error = xfs_alloc_pagf_init(mp, NULL, index, 0);
128 if (error)
129 return error;
130
131 error = xfs_ialloc_pagi_init(mp, NULL, index);
132 if (error)
133 return error;
134 pag = xfs_perag_get(mp, index);
135 ifree += pag->pagi_freecount;
136 ialloc += pag->pagi_count;
137 bfree += pag->pagf_freeblks;
138 bfreelst += pag->pagf_flcount;
139 btree += pag->pagf_btreeblks;
140 xfs_perag_put(pag);
141 }
142 fdblocks = bfree + bfreelst + btree;
143
144 /*
145 * If the new summary counts are obviously incorrect, fail the
146 * mount operation because that implies the AGFs are also corrupt.
147 * Clear FS_COUNTERS so that we don't unmount with a dirty log, which
148 * will prevent xfs_repair from fixing anything.
149 */
150 if (fdblocks > sbp->sb_dblocks || ifree > ialloc) {
151 xfs_alert(mp, "AGF corruption. Please run xfs_repair.");
152 error = -EFSCORRUPTED;
153 goto out;
154 }
155
156 /* Overwrite incore superblock counters with just-read data */
157 spin_lock(&mp->m_sb_lock);
158 sbp->sb_ifree = ifree;
159 sbp->sb_icount = ialloc;
160 sbp->sb_fdblocks = fdblocks;
161 spin_unlock(&mp->m_sb_lock);
162
163 xfs_reinit_percpu_counters(mp);
164 out:
165 xfs_fs_mark_healthy(mp, XFS_SICK_FS_COUNTERS);
166 return error;
167 }
168
169 STATIC void
__xfs_free_perag(struct rcu_head * head)170 __xfs_free_perag(
171 struct rcu_head *head)
172 {
173 struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
174
175 ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
176 kmem_free(pag);
177 }
178
179 /*
180 * Free up the per-ag resources associated with the mount structure.
181 */
182 void
xfs_free_perag(struct xfs_mount * mp)183 xfs_free_perag(
184 struct xfs_mount *mp)
185 {
186 struct xfs_perag *pag;
187 xfs_agnumber_t agno;
188
189 for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
190 spin_lock(&mp->m_perag_lock);
191 pag = radix_tree_delete(&mp->m_perag_tree, agno);
192 spin_unlock(&mp->m_perag_lock);
193 ASSERT(pag);
194 XFS_IS_CORRUPT(pag->pag_mount, atomic_read(&pag->pag_ref) != 0);
195
196 cancel_delayed_work_sync(&pag->pag_blockgc_work);
197 xfs_iunlink_destroy(pag);
198 xfs_buf_hash_destroy(pag);
199
200 call_rcu(&pag->rcu_head, __xfs_free_perag);
201 }
202 }
203
204 int
xfs_initialize_perag(struct xfs_mount * mp,xfs_agnumber_t agcount,xfs_agnumber_t * maxagi)205 xfs_initialize_perag(
206 struct xfs_mount *mp,
207 xfs_agnumber_t agcount,
208 xfs_agnumber_t *maxagi)
209 {
210 struct xfs_perag *pag;
211 xfs_agnumber_t index;
212 xfs_agnumber_t first_initialised = NULLAGNUMBER;
213 int error;
214
215 /*
216 * Walk the current per-ag tree so we don't try to initialise AGs
217 * that already exist (growfs case). Allocate and insert all the
218 * AGs we don't find ready for initialisation.
219 */
220 for (index = 0; index < agcount; index++) {
221 pag = xfs_perag_get(mp, index);
222 if (pag) {
223 xfs_perag_put(pag);
224 continue;
225 }
226
227 pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
228 if (!pag) {
229 error = -ENOMEM;
230 goto out_unwind_new_pags;
231 }
232 pag->pag_agno = index;
233 pag->pag_mount = mp;
234
235 error = radix_tree_preload(GFP_NOFS);
236 if (error)
237 goto out_free_pag;
238
239 spin_lock(&mp->m_perag_lock);
240 if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
241 WARN_ON_ONCE(1);
242 spin_unlock(&mp->m_perag_lock);
243 radix_tree_preload_end();
244 error = -EEXIST;
245 goto out_free_pag;
246 }
247 spin_unlock(&mp->m_perag_lock);
248 radix_tree_preload_end();
249
250 /* Place kernel structure only init below this point. */
251 spin_lock_init(&pag->pag_ici_lock);
252 spin_lock_init(&pag->pagb_lock);
253 spin_lock_init(&pag->pag_state_lock);
254 INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
255 INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
256 init_waitqueue_head(&pag->pagb_wait);
257 pag->pagb_count = 0;
258 pag->pagb_tree = RB_ROOT;
259
260 error = xfs_buf_hash_init(pag);
261 if (error)
262 goto out_remove_pag;
263
264 error = xfs_iunlink_init(pag);
265 if (error)
266 goto out_hash_destroy;
267
268 /* first new pag is fully initialized */
269 if (first_initialised == NULLAGNUMBER)
270 first_initialised = index;
271 }
272
273 index = xfs_set_inode_alloc(mp, agcount);
274
275 if (maxagi)
276 *maxagi = index;
277
278 mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
279 return 0;
280
281 out_hash_destroy:
282 xfs_buf_hash_destroy(pag);
283 out_remove_pag:
284 radix_tree_delete(&mp->m_perag_tree, index);
285 out_free_pag:
286 kmem_free(pag);
287 out_unwind_new_pags:
288 /* unwind any prior newly initialized pags */
289 for (index = first_initialised; index < agcount; index++) {
290 pag = radix_tree_delete(&mp->m_perag_tree, index);
291 if (!pag)
292 break;
293 xfs_buf_hash_destroy(pag);
294 xfs_iunlink_destroy(pag);
295 kmem_free(pag);
296 }
297 return error;
298 }
299
300 static int
xfs_get_aghdr_buf(struct xfs_mount * mp,xfs_daddr_t blkno,size_t numblks,struct xfs_buf ** bpp,const struct xfs_buf_ops * ops)301 xfs_get_aghdr_buf(
302 struct xfs_mount *mp,
303 xfs_daddr_t blkno,
304 size_t numblks,
305 struct xfs_buf **bpp,
306 const struct xfs_buf_ops *ops)
307 {
308 struct xfs_buf *bp;
309 int error;
310
311 error = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0, &bp);
312 if (error)
313 return error;
314
315 bp->b_maps[0].bm_bn = blkno;
316 bp->b_ops = ops;
317
318 *bpp = bp;
319 return 0;
320 }
321
is_log_ag(struct xfs_mount * mp,struct aghdr_init_data * id)322 static inline bool is_log_ag(struct xfs_mount *mp, struct aghdr_init_data *id)
323 {
324 return mp->m_sb.sb_logstart > 0 &&
325 id->agno == XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart);
326 }
327
328 /*
329 * Generic btree root block init function
330 */
331 static void
xfs_btroot_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)332 xfs_btroot_init(
333 struct xfs_mount *mp,
334 struct xfs_buf *bp,
335 struct aghdr_init_data *id)
336 {
337 xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno);
338 }
339
340 /* Finish initializing a free space btree. */
341 static void
xfs_freesp_init_recs(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)342 xfs_freesp_init_recs(
343 struct xfs_mount *mp,
344 struct xfs_buf *bp,
345 struct aghdr_init_data *id)
346 {
347 struct xfs_alloc_rec *arec;
348 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
349
350 arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
351 arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
352
353 if (is_log_ag(mp, id)) {
354 struct xfs_alloc_rec *nrec;
355 xfs_agblock_t start = XFS_FSB_TO_AGBNO(mp,
356 mp->m_sb.sb_logstart);
357
358 ASSERT(start >= mp->m_ag_prealloc_blocks);
359 if (start != mp->m_ag_prealloc_blocks) {
360 /*
361 * Modify first record to pad stripe align of log
362 */
363 arec->ar_blockcount = cpu_to_be32(start -
364 mp->m_ag_prealloc_blocks);
365 nrec = arec + 1;
366
367 /*
368 * Insert second record at start of internal log
369 * which then gets trimmed.
370 */
371 nrec->ar_startblock = cpu_to_be32(
372 be32_to_cpu(arec->ar_startblock) +
373 be32_to_cpu(arec->ar_blockcount));
374 arec = nrec;
375 be16_add_cpu(&block->bb_numrecs, 1);
376 }
377 /*
378 * Change record start to after the internal log
379 */
380 be32_add_cpu(&arec->ar_startblock, mp->m_sb.sb_logblocks);
381 }
382
383 /*
384 * Calculate the record block count and check for the case where
385 * the log might have consumed all available space in the AG. If
386 * so, reset the record count to 0 to avoid exposure of an invalid
387 * record start block.
388 */
389 arec->ar_blockcount = cpu_to_be32(id->agsize -
390 be32_to_cpu(arec->ar_startblock));
391 if (!arec->ar_blockcount)
392 block->bb_numrecs = 0;
393 }
394
395 /*
396 * Alloc btree root block init functions
397 */
398 static void
xfs_bnoroot_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)399 xfs_bnoroot_init(
400 struct xfs_mount *mp,
401 struct xfs_buf *bp,
402 struct aghdr_init_data *id)
403 {
404 xfs_btree_init_block(mp, bp, XFS_BTNUM_BNO, 0, 1, id->agno);
405 xfs_freesp_init_recs(mp, bp, id);
406 }
407
408 static void
xfs_cntroot_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)409 xfs_cntroot_init(
410 struct xfs_mount *mp,
411 struct xfs_buf *bp,
412 struct aghdr_init_data *id)
413 {
414 xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno);
415 xfs_freesp_init_recs(mp, bp, id);
416 }
417
418 /*
419 * Reverse map root block init
420 */
421 static void
xfs_rmaproot_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)422 xfs_rmaproot_init(
423 struct xfs_mount *mp,
424 struct xfs_buf *bp,
425 struct aghdr_init_data *id)
426 {
427 struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
428 struct xfs_rmap_rec *rrec;
429
430 xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno);
431
432 /*
433 * mark the AG header regions as static metadata The BNO
434 * btree block is the first block after the headers, so
435 * it's location defines the size of region the static
436 * metadata consumes.
437 *
438 * Note: unlike mkfs, we never have to account for log
439 * space when growing the data regions
440 */
441 rrec = XFS_RMAP_REC_ADDR(block, 1);
442 rrec->rm_startblock = 0;
443 rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
444 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
445 rrec->rm_offset = 0;
446
447 /* account freespace btree root blocks */
448 rrec = XFS_RMAP_REC_ADDR(block, 2);
449 rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
450 rrec->rm_blockcount = cpu_to_be32(2);
451 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
452 rrec->rm_offset = 0;
453
454 /* account inode btree root blocks */
455 rrec = XFS_RMAP_REC_ADDR(block, 3);
456 rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
457 rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
458 XFS_IBT_BLOCK(mp));
459 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
460 rrec->rm_offset = 0;
461
462 /* account for rmap btree root */
463 rrec = XFS_RMAP_REC_ADDR(block, 4);
464 rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
465 rrec->rm_blockcount = cpu_to_be32(1);
466 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
467 rrec->rm_offset = 0;
468
469 /* account for refc btree root */
470 if (xfs_has_reflink(mp)) {
471 rrec = XFS_RMAP_REC_ADDR(block, 5);
472 rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
473 rrec->rm_blockcount = cpu_to_be32(1);
474 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
475 rrec->rm_offset = 0;
476 be16_add_cpu(&block->bb_numrecs, 1);
477 }
478
479 /* account for the log space */
480 if (is_log_ag(mp, id)) {
481 rrec = XFS_RMAP_REC_ADDR(block,
482 be16_to_cpu(block->bb_numrecs) + 1);
483 rrec->rm_startblock = cpu_to_be32(
484 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart));
485 rrec->rm_blockcount = cpu_to_be32(mp->m_sb.sb_logblocks);
486 rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_LOG);
487 rrec->rm_offset = 0;
488 be16_add_cpu(&block->bb_numrecs, 1);
489 }
490 }
491
492 /*
493 * Initialise new secondary superblocks with the pre-grow geometry, but mark
494 * them as "in progress" so we know they haven't yet been activated. This will
495 * get cleared when the update with the new geometry information is done after
496 * changes to the primary are committed. This isn't strictly necessary, but we
497 * get it for free with the delayed buffer write lists and it means we can tell
498 * if a grow operation didn't complete properly after the fact.
499 */
500 static void
xfs_sbblock_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)501 xfs_sbblock_init(
502 struct xfs_mount *mp,
503 struct xfs_buf *bp,
504 struct aghdr_init_data *id)
505 {
506 struct xfs_dsb *dsb = bp->b_addr;
507
508 xfs_sb_to_disk(dsb, &mp->m_sb);
509 dsb->sb_inprogress = 1;
510 }
511
512 static void
xfs_agfblock_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)513 xfs_agfblock_init(
514 struct xfs_mount *mp,
515 struct xfs_buf *bp,
516 struct aghdr_init_data *id)
517 {
518 struct xfs_agf *agf = bp->b_addr;
519 xfs_extlen_t tmpsize;
520
521 agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
522 agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
523 agf->agf_seqno = cpu_to_be32(id->agno);
524 agf->agf_length = cpu_to_be32(id->agsize);
525 agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
526 agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
527 agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
528 agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
529 if (xfs_has_rmapbt(mp)) {
530 agf->agf_roots[XFS_BTNUM_RMAPi] =
531 cpu_to_be32(XFS_RMAP_BLOCK(mp));
532 agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
533 agf->agf_rmap_blocks = cpu_to_be32(1);
534 }
535
536 agf->agf_flfirst = cpu_to_be32(1);
537 agf->agf_fllast = 0;
538 agf->agf_flcount = 0;
539 tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
540 agf->agf_freeblks = cpu_to_be32(tmpsize);
541 agf->agf_longest = cpu_to_be32(tmpsize);
542 if (xfs_has_crc(mp))
543 uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
544 if (xfs_has_reflink(mp)) {
545 agf->agf_refcount_root = cpu_to_be32(
546 xfs_refc_block(mp));
547 agf->agf_refcount_level = cpu_to_be32(1);
548 agf->agf_refcount_blocks = cpu_to_be32(1);
549 }
550
551 if (is_log_ag(mp, id)) {
552 int64_t logblocks = mp->m_sb.sb_logblocks;
553
554 be32_add_cpu(&agf->agf_freeblks, -logblocks);
555 agf->agf_longest = cpu_to_be32(id->agsize -
556 XFS_FSB_TO_AGBNO(mp, mp->m_sb.sb_logstart) - logblocks);
557 }
558 }
559
560 static void
xfs_agflblock_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)561 xfs_agflblock_init(
562 struct xfs_mount *mp,
563 struct xfs_buf *bp,
564 struct aghdr_init_data *id)
565 {
566 struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
567 __be32 *agfl_bno;
568 int bucket;
569
570 if (xfs_has_crc(mp)) {
571 agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
572 agfl->agfl_seqno = cpu_to_be32(id->agno);
573 uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
574 }
575
576 agfl_bno = xfs_buf_to_agfl_bno(bp);
577 for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
578 agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
579 }
580
581 static void
xfs_agiblock_init(struct xfs_mount * mp,struct xfs_buf * bp,struct aghdr_init_data * id)582 xfs_agiblock_init(
583 struct xfs_mount *mp,
584 struct xfs_buf *bp,
585 struct aghdr_init_data *id)
586 {
587 struct xfs_agi *agi = bp->b_addr;
588 int bucket;
589
590 agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC);
591 agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION);
592 agi->agi_seqno = cpu_to_be32(id->agno);
593 agi->agi_length = cpu_to_be32(id->agsize);
594 agi->agi_count = 0;
595 agi->agi_root = cpu_to_be32(XFS_IBT_BLOCK(mp));
596 agi->agi_level = cpu_to_be32(1);
597 agi->agi_freecount = 0;
598 agi->agi_newino = cpu_to_be32(NULLAGINO);
599 agi->agi_dirino = cpu_to_be32(NULLAGINO);
600 if (xfs_has_crc(mp))
601 uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid);
602 if (xfs_has_finobt(mp)) {
603 agi->agi_free_root = cpu_to_be32(XFS_FIBT_BLOCK(mp));
604 agi->agi_free_level = cpu_to_be32(1);
605 }
606 for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++)
607 agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
608 if (xfs_has_inobtcounts(mp)) {
609 agi->agi_iblocks = cpu_to_be32(1);
610 if (xfs_has_finobt(mp))
611 agi->agi_fblocks = cpu_to_be32(1);
612 }
613 }
614
615 typedef void (*aghdr_init_work_f)(struct xfs_mount *mp, struct xfs_buf *bp,
616 struct aghdr_init_data *id);
617 static int
xfs_ag_init_hdr(struct xfs_mount * mp,struct aghdr_init_data * id,aghdr_init_work_f work,const struct xfs_buf_ops * ops)618 xfs_ag_init_hdr(
619 struct xfs_mount *mp,
620 struct aghdr_init_data *id,
621 aghdr_init_work_f work,
622 const struct xfs_buf_ops *ops)
623 {
624 struct xfs_buf *bp;
625 int error;
626
627 error = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, &bp, ops);
628 if (error)
629 return error;
630
631 (*work)(mp, bp, id);
632
633 xfs_buf_delwri_queue(bp, &id->buffer_list);
634 xfs_buf_relse(bp);
635 return 0;
636 }
637
638 struct xfs_aghdr_grow_data {
639 xfs_daddr_t daddr;
640 size_t numblks;
641 const struct xfs_buf_ops *ops;
642 aghdr_init_work_f work;
643 xfs_btnum_t type;
644 bool need_init;
645 };
646
647 /*
648 * Prepare new AG headers to be written to disk. We use uncached buffers here,
649 * as it is assumed these new AG headers are currently beyond the currently
650 * valid filesystem address space. Using cached buffers would trip over EOFS
651 * corruption detection alogrithms in the buffer cache lookup routines.
652 *
653 * This is a non-transactional function, but the prepared buffers are added to a
654 * delayed write buffer list supplied by the caller so they can submit them to
655 * disk and wait on them as required.
656 */
657 int
xfs_ag_init_headers(struct xfs_mount * mp,struct aghdr_init_data * id)658 xfs_ag_init_headers(
659 struct xfs_mount *mp,
660 struct aghdr_init_data *id)
661
662 {
663 struct xfs_aghdr_grow_data aghdr_data[] = {
664 { /* SB */
665 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_SB_DADDR),
666 .numblks = XFS_FSS_TO_BB(mp, 1),
667 .ops = &xfs_sb_buf_ops,
668 .work = &xfs_sbblock_init,
669 .need_init = true
670 },
671 { /* AGF */
672 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGF_DADDR(mp)),
673 .numblks = XFS_FSS_TO_BB(mp, 1),
674 .ops = &xfs_agf_buf_ops,
675 .work = &xfs_agfblock_init,
676 .need_init = true
677 },
678 { /* AGFL */
679 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGFL_DADDR(mp)),
680 .numblks = XFS_FSS_TO_BB(mp, 1),
681 .ops = &xfs_agfl_buf_ops,
682 .work = &xfs_agflblock_init,
683 .need_init = true
684 },
685 { /* AGI */
686 .daddr = XFS_AG_DADDR(mp, id->agno, XFS_AGI_DADDR(mp)),
687 .numblks = XFS_FSS_TO_BB(mp, 1),
688 .ops = &xfs_agi_buf_ops,
689 .work = &xfs_agiblock_init,
690 .need_init = true
691 },
692 { /* BNO root block */
693 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_BNO_BLOCK(mp)),
694 .numblks = BTOBB(mp->m_sb.sb_blocksize),
695 .ops = &xfs_bnobt_buf_ops,
696 .work = &xfs_bnoroot_init,
697 .need_init = true
698 },
699 { /* CNT root block */
700 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_CNT_BLOCK(mp)),
701 .numblks = BTOBB(mp->m_sb.sb_blocksize),
702 .ops = &xfs_cntbt_buf_ops,
703 .work = &xfs_cntroot_init,
704 .need_init = true
705 },
706 { /* INO root block */
707 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_IBT_BLOCK(mp)),
708 .numblks = BTOBB(mp->m_sb.sb_blocksize),
709 .ops = &xfs_inobt_buf_ops,
710 .work = &xfs_btroot_init,
711 .type = XFS_BTNUM_INO,
712 .need_init = true
713 },
714 { /* FINO root block */
715 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_FIBT_BLOCK(mp)),
716 .numblks = BTOBB(mp->m_sb.sb_blocksize),
717 .ops = &xfs_finobt_buf_ops,
718 .work = &xfs_btroot_init,
719 .type = XFS_BTNUM_FINO,
720 .need_init = xfs_has_finobt(mp)
721 },
722 { /* RMAP root block */
723 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, XFS_RMAP_BLOCK(mp)),
724 .numblks = BTOBB(mp->m_sb.sb_blocksize),
725 .ops = &xfs_rmapbt_buf_ops,
726 .work = &xfs_rmaproot_init,
727 .need_init = xfs_has_rmapbt(mp)
728 },
729 { /* REFC root block */
730 .daddr = XFS_AGB_TO_DADDR(mp, id->agno, xfs_refc_block(mp)),
731 .numblks = BTOBB(mp->m_sb.sb_blocksize),
732 .ops = &xfs_refcountbt_buf_ops,
733 .work = &xfs_btroot_init,
734 .type = XFS_BTNUM_REFC,
735 .need_init = xfs_has_reflink(mp)
736 },
737 { /* NULL terminating block */
738 .daddr = XFS_BUF_DADDR_NULL,
739 }
740 };
741 struct xfs_aghdr_grow_data *dp;
742 int error = 0;
743
744 /* Account for AG free space in new AG */
745 id->nfree += id->agsize - mp->m_ag_prealloc_blocks;
746 for (dp = &aghdr_data[0]; dp->daddr != XFS_BUF_DADDR_NULL; dp++) {
747 if (!dp->need_init)
748 continue;
749
750 id->daddr = dp->daddr;
751 id->numblks = dp->numblks;
752 id->type = dp->type;
753 error = xfs_ag_init_hdr(mp, id, dp->work, dp->ops);
754 if (error)
755 break;
756 }
757 return error;
758 }
759
760 int
xfs_ag_shrink_space(struct xfs_mount * mp,struct xfs_trans ** tpp,xfs_agnumber_t agno,xfs_extlen_t delta)761 xfs_ag_shrink_space(
762 struct xfs_mount *mp,
763 struct xfs_trans **tpp,
764 xfs_agnumber_t agno,
765 xfs_extlen_t delta)
766 {
767 struct xfs_alloc_arg args = {
768 .tp = *tpp,
769 .mp = mp,
770 .type = XFS_ALLOCTYPE_THIS_BNO,
771 .minlen = delta,
772 .maxlen = delta,
773 .oinfo = XFS_RMAP_OINFO_SKIP_UPDATE,
774 .resv = XFS_AG_RESV_NONE,
775 .prod = 1
776 };
777 struct xfs_buf *agibp, *agfbp;
778 struct xfs_agi *agi;
779 struct xfs_agf *agf;
780 xfs_agblock_t aglen;
781 int error, err2;
782
783 ASSERT(agno == mp->m_sb.sb_agcount - 1);
784 error = xfs_ialloc_read_agi(mp, *tpp, agno, &agibp);
785 if (error)
786 return error;
787
788 agi = agibp->b_addr;
789
790 error = xfs_alloc_read_agf(mp, *tpp, agno, 0, &agfbp);
791 if (error)
792 return error;
793
794 agf = agfbp->b_addr;
795 aglen = be32_to_cpu(agi->agi_length);
796 /* some extra paranoid checks before we shrink the ag */
797 if (XFS_IS_CORRUPT(mp, agf->agf_length != agi->agi_length))
798 return -EFSCORRUPTED;
799 if (delta >= aglen)
800 return -EINVAL;
801
802 args.fsbno = XFS_AGB_TO_FSB(mp, agno, aglen - delta);
803
804 /*
805 * Make sure that the last inode cluster cannot overlap with the new
806 * end of the AG, even if it's sparse.
807 */
808 error = xfs_ialloc_check_shrink(*tpp, agno, agibp, aglen - delta);
809 if (error)
810 return error;
811
812 /*
813 * Disable perag reservations so it doesn't cause the allocation request
814 * to fail. We'll reestablish reservation before we return.
815 */
816 error = xfs_ag_resv_free(agibp->b_pag);
817 if (error)
818 return error;
819
820 /* internal log shouldn't also show up in the free space btrees */
821 error = xfs_alloc_vextent(&args);
822 if (!error && args.agbno == NULLAGBLOCK)
823 error = -ENOSPC;
824
825 if (error) {
826 /*
827 * if extent allocation fails, need to roll the transaction to
828 * ensure that the AGFL fixup has been committed anyway.
829 */
830 xfs_trans_bhold(*tpp, agfbp);
831 err2 = xfs_trans_roll(tpp);
832 if (err2)
833 return err2;
834 xfs_trans_bjoin(*tpp, agfbp);
835 goto resv_init_out;
836 }
837
838 /*
839 * if successfully deleted from freespace btrees, need to confirm
840 * per-AG reservation works as expected.
841 */
842 be32_add_cpu(&agi->agi_length, -delta);
843 be32_add_cpu(&agf->agf_length, -delta);
844
845 err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
846 if (err2) {
847 be32_add_cpu(&agi->agi_length, delta);
848 be32_add_cpu(&agf->agf_length, delta);
849 if (err2 != -ENOSPC)
850 goto resv_err;
851
852 __xfs_bmap_add_free(*tpp, args.fsbno, delta, NULL, true);
853
854 /*
855 * Roll the transaction before trying to re-init the per-ag
856 * reservation. The new transaction is clean so it will cancel
857 * without any side effects.
858 */
859 error = xfs_defer_finish(tpp);
860 if (error)
861 return error;
862
863 error = -ENOSPC;
864 goto resv_init_out;
865 }
866 xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
867 xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
868 return 0;
869 resv_init_out:
870 err2 = xfs_ag_resv_init(agibp->b_pag, *tpp);
871 if (!err2)
872 return error;
873 resv_err:
874 xfs_warn(mp, "Error %d reserving per-AG metadata reserve pool.", err2);
875 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
876 return err2;
877 }
878
879 /*
880 * Extent the AG indicated by the @id by the length passed in
881 */
882 int
xfs_ag_extend_space(struct xfs_mount * mp,struct xfs_trans * tp,struct aghdr_init_data * id,xfs_extlen_t len)883 xfs_ag_extend_space(
884 struct xfs_mount *mp,
885 struct xfs_trans *tp,
886 struct aghdr_init_data *id,
887 xfs_extlen_t len)
888 {
889 struct xfs_buf *bp;
890 struct xfs_agi *agi;
891 struct xfs_agf *agf;
892 int error;
893
894 /*
895 * Change the agi length.
896 */
897 error = xfs_ialloc_read_agi(mp, tp, id->agno, &bp);
898 if (error)
899 return error;
900
901 agi = bp->b_addr;
902 be32_add_cpu(&agi->agi_length, len);
903 ASSERT(id->agno == mp->m_sb.sb_agcount - 1 ||
904 be32_to_cpu(agi->agi_length) == mp->m_sb.sb_agblocks);
905 xfs_ialloc_log_agi(tp, bp, XFS_AGI_LENGTH);
906
907 /*
908 * Change agf length.
909 */
910 error = xfs_alloc_read_agf(mp, tp, id->agno, 0, &bp);
911 if (error)
912 return error;
913
914 agf = bp->b_addr;
915 be32_add_cpu(&agf->agf_length, len);
916 ASSERT(agf->agf_length == agi->agi_length);
917 xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
918
919 /*
920 * Free the new space.
921 *
922 * XFS_RMAP_OINFO_SKIP_UPDATE is used here to tell the rmap btree that
923 * this doesn't actually exist in the rmap btree.
924 */
925 error = xfs_rmap_free(tp, bp, bp->b_pag,
926 be32_to_cpu(agf->agf_length) - len,
927 len, &XFS_RMAP_OINFO_SKIP_UPDATE);
928 if (error)
929 return error;
930
931 return xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, id->agno,
932 be32_to_cpu(agf->agf_length) - len),
933 len, &XFS_RMAP_OINFO_SKIP_UPDATE,
934 XFS_AG_RESV_NONE);
935 }
936
937 /* Retrieve AG geometry. */
938 int
xfs_ag_get_geometry(struct xfs_mount * mp,xfs_agnumber_t agno,struct xfs_ag_geometry * ageo)939 xfs_ag_get_geometry(
940 struct xfs_mount *mp,
941 xfs_agnumber_t agno,
942 struct xfs_ag_geometry *ageo)
943 {
944 struct xfs_buf *agi_bp;
945 struct xfs_buf *agf_bp;
946 struct xfs_agi *agi;
947 struct xfs_agf *agf;
948 struct xfs_perag *pag;
949 unsigned int freeblks;
950 int error;
951
952 if (agno >= mp->m_sb.sb_agcount)
953 return -EINVAL;
954
955 /* Lock the AG headers. */
956 error = xfs_ialloc_read_agi(mp, NULL, agno, &agi_bp);
957 if (error)
958 return error;
959 error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp);
960 if (error)
961 goto out_agi;
962
963 pag = agi_bp->b_pag;
964
965 /* Fill out form. */
966 memset(ageo, 0, sizeof(*ageo));
967 ageo->ag_number = agno;
968
969 agi = agi_bp->b_addr;
970 ageo->ag_icount = be32_to_cpu(agi->agi_count);
971 ageo->ag_ifree = be32_to_cpu(agi->agi_freecount);
972
973 agf = agf_bp->b_addr;
974 ageo->ag_length = be32_to_cpu(agf->agf_length);
975 freeblks = pag->pagf_freeblks +
976 pag->pagf_flcount +
977 pag->pagf_btreeblks -
978 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE);
979 ageo->ag_freeblks = freeblks;
980 xfs_ag_geom_health(pag, ageo);
981
982 /* Release resources. */
983 xfs_buf_relse(agf_bp);
984 out_agi:
985 xfs_buf_relse(agi_bp);
986 return error;
987 }
988