1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2018-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_rmap.h"
22 #include "xfs_rmap_btree.h"
23 #include "xfs_refcount_btree.h"
24 #include "xfs_extent_busy.h"
25 #include "xfs_ag.h"
26 #include "xfs_ag_resv.h"
27 #include "xfs_quota.h"
28 #include "xfs_qm.h"
29 #include "xfs_defer.h"
30 #include "scrub/scrub.h"
31 #include "scrub/common.h"
32 #include "scrub/trace.h"
33 #include "scrub/repair.h"
34 #include "scrub/bitmap.h"
35 #include "scrub/stats.h"
36
37 /*
38 * Attempt to repair some metadata, if the metadata is corrupt and userspace
39 * told us to fix it. This function returns -EAGAIN to mean "re-run scrub",
40 * and will set *fixed to true if it thinks it repaired anything.
41 */
42 int
xrep_attempt(struct xfs_scrub * sc,struct xchk_stats_run * run)43 xrep_attempt(
44 struct xfs_scrub *sc,
45 struct xchk_stats_run *run)
46 {
47 u64 repair_start;
48 int error = 0;
49
50 trace_xrep_attempt(XFS_I(file_inode(sc->file)), sc->sm, error);
51
52 xchk_ag_btcur_free(&sc->sa);
53
54 /* Repair whatever's broken. */
55 ASSERT(sc->ops->repair);
56 run->repair_attempted = true;
57 repair_start = xchk_stats_now();
58 error = sc->ops->repair(sc);
59 trace_xrep_done(XFS_I(file_inode(sc->file)), sc->sm, error);
60 run->repair_ns += xchk_stats_elapsed_ns(repair_start);
61 switch (error) {
62 case 0:
63 /*
64 * Repair succeeded. Commit the fixes and perform a second
65 * scrub so that we can tell userspace if we fixed the problem.
66 */
67 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
68 sc->flags |= XREP_ALREADY_FIXED;
69 run->repair_succeeded = true;
70 return -EAGAIN;
71 case -ECHRNG:
72 sc->flags |= XCHK_NEED_DRAIN;
73 run->retries++;
74 return -EAGAIN;
75 case -EDEADLOCK:
76 /* Tell the caller to try again having grabbed all the locks. */
77 if (!(sc->flags & XCHK_TRY_HARDER)) {
78 sc->flags |= XCHK_TRY_HARDER;
79 run->retries++;
80 return -EAGAIN;
81 }
82 /*
83 * We tried harder but still couldn't grab all the resources
84 * we needed to fix it. The corruption has not been fixed,
85 * so exit to userspace with the scan's output flags unchanged.
86 */
87 return 0;
88 default:
89 /*
90 * EAGAIN tells the caller to re-scrub, so we cannot return
91 * that here.
92 */
93 ASSERT(error != -EAGAIN);
94 return error;
95 }
96 }
97
98 /*
99 * Complain about unfixable problems in the filesystem. We don't log
100 * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
101 * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
102 * administrator isn't running xfs_scrub in no-repairs mode.
103 *
104 * Use this helper function because _ratelimited silently declares a static
105 * structure to track rate limiting information.
106 */
107 void
xrep_failure(struct xfs_mount * mp)108 xrep_failure(
109 struct xfs_mount *mp)
110 {
111 xfs_alert_ratelimited(mp,
112 "Corruption not fixed during online repair. Unmount and run xfs_repair.");
113 }
114
115 /*
116 * Repair probe -- userspace uses this to probe if we're willing to repair a
117 * given mountpoint.
118 */
119 int
xrep_probe(struct xfs_scrub * sc)120 xrep_probe(
121 struct xfs_scrub *sc)
122 {
123 int error = 0;
124
125 if (xchk_should_terminate(sc, &error))
126 return error;
127
128 return 0;
129 }
130
131 /*
132 * Roll a transaction, keeping the AG headers locked and reinitializing
133 * the btree cursors.
134 */
135 int
xrep_roll_ag_trans(struct xfs_scrub * sc)136 xrep_roll_ag_trans(
137 struct xfs_scrub *sc)
138 {
139 int error;
140
141 /*
142 * Keep the AG header buffers locked while we roll the transaction.
143 * Ensure that both AG buffers are dirty and held when we roll the
144 * transaction so that they move forward in the log without losing the
145 * bli (and hence the bli type) when the transaction commits.
146 *
147 * Normal code would never hold clean buffers across a roll, but repair
148 * needs both buffers to maintain a total lock on the AG.
149 */
150 if (sc->sa.agi_bp) {
151 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
152 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
153 }
154
155 if (sc->sa.agf_bp) {
156 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
157 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
158 }
159
160 /*
161 * Roll the transaction. We still hold the AG header buffers locked
162 * regardless of whether or not that succeeds. On failure, the buffers
163 * will be released during teardown on our way out of the kernel. If
164 * successful, join the buffers to the new transaction and move on.
165 */
166 error = xfs_trans_roll(&sc->tp);
167 if (error)
168 return error;
169
170 /* Join the AG headers to the new transaction. */
171 if (sc->sa.agi_bp)
172 xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
173 if (sc->sa.agf_bp)
174 xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
175
176 return 0;
177 }
178
179 /* Finish all deferred work attached to the repair transaction. */
180 int
xrep_defer_finish(struct xfs_scrub * sc)181 xrep_defer_finish(
182 struct xfs_scrub *sc)
183 {
184 int error;
185
186 /*
187 * Keep the AG header buffers locked while we complete deferred work
188 * items. Ensure that both AG buffers are dirty and held when we roll
189 * the transaction so that they move forward in the log without losing
190 * the bli (and hence the bli type) when the transaction commits.
191 *
192 * Normal code would never hold clean buffers across a roll, but repair
193 * needs both buffers to maintain a total lock on the AG.
194 */
195 if (sc->sa.agi_bp) {
196 xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_MAGICNUM);
197 xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
198 }
199
200 if (sc->sa.agf_bp) {
201 xfs_alloc_log_agf(sc->tp, sc->sa.agf_bp, XFS_AGF_MAGICNUM);
202 xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
203 }
204
205 /*
206 * Finish all deferred work items. We still hold the AG header buffers
207 * locked regardless of whether or not that succeeds. On failure, the
208 * buffers will be released during teardown on our way out of the
209 * kernel. If successful, join the buffers to the new transaction
210 * and move on.
211 */
212 error = xfs_defer_finish(&sc->tp);
213 if (error)
214 return error;
215
216 /*
217 * Release the hold that we set above because defer_finish won't do
218 * that for us. The defer roll code redirties held buffers after each
219 * roll, so the AG header buffers should be ready for logging.
220 */
221 if (sc->sa.agi_bp)
222 xfs_trans_bhold_release(sc->tp, sc->sa.agi_bp);
223 if (sc->sa.agf_bp)
224 xfs_trans_bhold_release(sc->tp, sc->sa.agf_bp);
225
226 return 0;
227 }
228
229 /*
230 * Does the given AG have enough space to rebuild a btree? Neither AG
231 * reservation can be critical, and we must have enough space (factoring
232 * in AG reservations) to construct a whole btree.
233 */
234 bool
xrep_ag_has_space(struct xfs_perag * pag,xfs_extlen_t nr_blocks,enum xfs_ag_resv_type type)235 xrep_ag_has_space(
236 struct xfs_perag *pag,
237 xfs_extlen_t nr_blocks,
238 enum xfs_ag_resv_type type)
239 {
240 return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
241 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
242 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
243 }
244
245 /*
246 * Figure out how many blocks to reserve for an AG repair. We calculate the
247 * worst case estimate for the number of blocks we'd need to rebuild one of
248 * any type of per-AG btree.
249 */
250 xfs_extlen_t
xrep_calc_ag_resblks(struct xfs_scrub * sc)251 xrep_calc_ag_resblks(
252 struct xfs_scrub *sc)
253 {
254 struct xfs_mount *mp = sc->mp;
255 struct xfs_scrub_metadata *sm = sc->sm;
256 struct xfs_perag *pag;
257 struct xfs_buf *bp;
258 xfs_agino_t icount = NULLAGINO;
259 xfs_extlen_t aglen = NULLAGBLOCK;
260 xfs_extlen_t usedlen;
261 xfs_extlen_t freelen;
262 xfs_extlen_t bnobt_sz;
263 xfs_extlen_t inobt_sz;
264 xfs_extlen_t rmapbt_sz;
265 xfs_extlen_t refcbt_sz;
266 int error;
267
268 if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
269 return 0;
270
271 pag = xfs_perag_get(mp, sm->sm_agno);
272 if (xfs_perag_initialised_agi(pag)) {
273 /* Use in-core icount if possible. */
274 icount = pag->pagi_count;
275 } else {
276 /* Try to get the actual counters from disk. */
277 error = xfs_ialloc_read_agi(pag, NULL, &bp);
278 if (!error) {
279 icount = pag->pagi_count;
280 xfs_buf_relse(bp);
281 }
282 }
283
284 /* Now grab the block counters from the AGF. */
285 error = xfs_alloc_read_agf(pag, NULL, 0, &bp);
286 if (error) {
287 aglen = pag->block_count;
288 freelen = aglen;
289 usedlen = aglen;
290 } else {
291 struct xfs_agf *agf = bp->b_addr;
292
293 aglen = be32_to_cpu(agf->agf_length);
294 freelen = be32_to_cpu(agf->agf_freeblks);
295 usedlen = aglen - freelen;
296 xfs_buf_relse(bp);
297 }
298
299 /* If the icount is impossible, make some worst-case assumptions. */
300 if (icount == NULLAGINO ||
301 !xfs_verify_agino(pag, icount)) {
302 icount = pag->agino_max - pag->agino_min + 1;
303 }
304
305 /* If the block counts are impossible, make worst-case assumptions. */
306 if (aglen == NULLAGBLOCK ||
307 aglen != pag->block_count ||
308 freelen >= aglen) {
309 aglen = pag->block_count;
310 freelen = aglen;
311 usedlen = aglen;
312 }
313 xfs_perag_put(pag);
314
315 trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
316 freelen, usedlen);
317
318 /*
319 * Figure out how many blocks we'd need worst case to rebuild
320 * each type of btree. Note that we can only rebuild the
321 * bnobt/cntbt or inobt/finobt as pairs.
322 */
323 bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
324 if (xfs_has_sparseinodes(mp))
325 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
326 XFS_INODES_PER_HOLEMASK_BIT);
327 else
328 inobt_sz = xfs_iallocbt_calc_size(mp, icount /
329 XFS_INODES_PER_CHUNK);
330 if (xfs_has_finobt(mp))
331 inobt_sz *= 2;
332 if (xfs_has_reflink(mp))
333 refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
334 else
335 refcbt_sz = 0;
336 if (xfs_has_rmapbt(mp)) {
337 /*
338 * Guess how many blocks we need to rebuild the rmapbt.
339 * For non-reflink filesystems we can't have more records than
340 * used blocks. However, with reflink it's possible to have
341 * more than one rmap record per AG block. We don't know how
342 * many rmaps there could be in the AG, so we start off with
343 * what we hope is an generous over-estimation.
344 */
345 if (xfs_has_reflink(mp))
346 rmapbt_sz = xfs_rmapbt_calc_size(mp,
347 (unsigned long long)aglen * 2);
348 else
349 rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
350 } else {
351 rmapbt_sz = 0;
352 }
353
354 trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
355 inobt_sz, rmapbt_sz, refcbt_sz);
356
357 return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
358 }
359
360 /*
361 * Reconstructing per-AG Btrees
362 *
363 * When a space btree is corrupt, we don't bother trying to fix it. Instead,
364 * we scan secondary space metadata to derive the records that should be in
365 * the damaged btree, initialize a fresh btree root, and insert the records.
366 * Note that for rebuilding the rmapbt we scan all the primary data to
367 * generate the new records.
368 *
369 * However, that leaves the matter of removing all the metadata describing the
370 * old broken structure. For primary metadata we use the rmap data to collect
371 * every extent with a matching rmap owner (bitmap); we then iterate all other
372 * metadata structures with the same rmap owner to collect the extents that
373 * cannot be removed (sublist). We then subtract sublist from bitmap to
374 * derive the blocks that were used by the old btree. These blocks can be
375 * reaped.
376 *
377 * For rmapbt reconstructions we must use different tactics for extent
378 * collection. First we iterate all primary metadata (this excludes the old
379 * rmapbt, obviously) to generate new rmap records. The gaps in the rmap
380 * records are collected as bitmap. The bnobt records are collected as
381 * sublist. As with the other btrees we subtract sublist from bitmap, and the
382 * result (since the rmapbt lives in the free space) are the blocks from the
383 * old rmapbt.
384 */
385
386 /* Ensure the freelist is the correct size. */
387 int
xrep_fix_freelist(struct xfs_scrub * sc,bool can_shrink)388 xrep_fix_freelist(
389 struct xfs_scrub *sc,
390 bool can_shrink)
391 {
392 struct xfs_alloc_arg args = {0};
393
394 args.mp = sc->mp;
395 args.tp = sc->tp;
396 args.agno = sc->sa.pag->pag_agno;
397 args.alignment = 1;
398 args.pag = sc->sa.pag;
399
400 return xfs_alloc_fix_freelist(&args,
401 can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
402 }
403
404 /*
405 * Finding per-AG Btree Roots for AGF/AGI Reconstruction
406 *
407 * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
408 * the AG headers by using the rmap data to rummage through the AG looking for
409 * btree roots. This is not guaranteed to work if the AG is heavily damaged
410 * or the rmap data are corrupt.
411 *
412 * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
413 * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
414 * AGI is being rebuilt. It must maintain these locks until it's safe for
415 * other threads to change the btrees' shapes. The caller provides
416 * information about the btrees to look for by passing in an array of
417 * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
418 * The (root, height) fields will be set on return if anything is found. The
419 * last element of the array should have a NULL buf_ops to mark the end of the
420 * array.
421 *
422 * For every rmapbt record matching any of the rmap owners in btree_info,
423 * read each block referenced by the rmap record. If the block is a btree
424 * block from this filesystem matching any of the magic numbers and has a
425 * level higher than what we've already seen, remember the block and the
426 * height of the tree required to have such a block. When the call completes,
427 * we return the highest block we've found for each btree description; those
428 * should be the roots.
429 */
430
431 struct xrep_findroot {
432 struct xfs_scrub *sc;
433 struct xfs_buf *agfl_bp;
434 struct xfs_agf *agf;
435 struct xrep_find_ag_btree *btree_info;
436 };
437
438 /* See if our block is in the AGFL. */
439 STATIC int
xrep_findroot_agfl_walk(struct xfs_mount * mp,xfs_agblock_t bno,void * priv)440 xrep_findroot_agfl_walk(
441 struct xfs_mount *mp,
442 xfs_agblock_t bno,
443 void *priv)
444 {
445 xfs_agblock_t *agbno = priv;
446
447 return (*agbno == bno) ? -ECANCELED : 0;
448 }
449
450 /* Does this block match the btree information passed in? */
451 STATIC int
xrep_findroot_block(struct xrep_findroot * ri,struct xrep_find_ag_btree * fab,uint64_t owner,xfs_agblock_t agbno,bool * done_with_block)452 xrep_findroot_block(
453 struct xrep_findroot *ri,
454 struct xrep_find_ag_btree *fab,
455 uint64_t owner,
456 xfs_agblock_t agbno,
457 bool *done_with_block)
458 {
459 struct xfs_mount *mp = ri->sc->mp;
460 struct xfs_buf *bp;
461 struct xfs_btree_block *btblock;
462 xfs_daddr_t daddr;
463 int block_level;
464 int error = 0;
465
466 daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.pag->pag_agno, agbno);
467
468 /*
469 * Blocks in the AGFL have stale contents that might just happen to
470 * have a matching magic and uuid. We don't want to pull these blocks
471 * in as part of a tree root, so we have to filter out the AGFL stuff
472 * here. If the AGFL looks insane we'll just refuse to repair.
473 */
474 if (owner == XFS_RMAP_OWN_AG) {
475 error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
476 xrep_findroot_agfl_walk, &agbno);
477 if (error == -ECANCELED)
478 return 0;
479 if (error)
480 return error;
481 }
482
483 /*
484 * Read the buffer into memory so that we can see if it's a match for
485 * our btree type. We have no clue if it is beforehand, and we want to
486 * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
487 * will cause needless disk reads in subsequent calls to this function)
488 * and logging metadata verifier failures.
489 *
490 * Therefore, pass in NULL buffer ops. If the buffer was already in
491 * memory from some other caller it will already have b_ops assigned.
492 * If it was in memory from a previous unsuccessful findroot_block
493 * call, the buffer won't have b_ops but it should be clean and ready
494 * for us to try to verify if the read call succeeds. The same applies
495 * if the buffer wasn't in memory at all.
496 *
497 * Note: If we never match a btree type with this buffer, it will be
498 * left in memory with NULL b_ops. This shouldn't be a problem unless
499 * the buffer gets written.
500 */
501 error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
502 mp->m_bsize, 0, &bp, NULL);
503 if (error)
504 return error;
505
506 /* Ensure the block magic matches the btree type we're looking for. */
507 btblock = XFS_BUF_TO_BLOCK(bp);
508 ASSERT(fab->buf_ops->magic[1] != 0);
509 if (btblock->bb_magic != fab->buf_ops->magic[1])
510 goto out;
511
512 /*
513 * If the buffer already has ops applied and they're not the ones for
514 * this btree type, we know this block doesn't match the btree and we
515 * can bail out.
516 *
517 * If the buffer ops match ours, someone else has already validated
518 * the block for us, so we can move on to checking if this is a root
519 * block candidate.
520 *
521 * If the buffer does not have ops, nobody has successfully validated
522 * the contents and the buffer cannot be dirty. If the magic, uuid,
523 * and structure match this btree type then we'll move on to checking
524 * if it's a root block candidate. If there is no match, bail out.
525 */
526 if (bp->b_ops) {
527 if (bp->b_ops != fab->buf_ops)
528 goto out;
529 } else {
530 ASSERT(!xfs_trans_buf_is_dirty(bp));
531 if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
532 &mp->m_sb.sb_meta_uuid))
533 goto out;
534 /*
535 * Read verifiers can reference b_ops, so we set the pointer
536 * here. If the verifier fails we'll reset the buffer state
537 * to what it was before we touched the buffer.
538 */
539 bp->b_ops = fab->buf_ops;
540 fab->buf_ops->verify_read(bp);
541 if (bp->b_error) {
542 bp->b_ops = NULL;
543 bp->b_error = 0;
544 goto out;
545 }
546
547 /*
548 * Some read verifiers will (re)set b_ops, so we must be
549 * careful not to change b_ops after running the verifier.
550 */
551 }
552
553 /*
554 * This block passes the magic/uuid and verifier tests for this btree
555 * type. We don't need the caller to try the other tree types.
556 */
557 *done_with_block = true;
558
559 /*
560 * Compare this btree block's level to the height of the current
561 * candidate root block.
562 *
563 * If the level matches the root we found previously, throw away both
564 * blocks because there can't be two candidate roots.
565 *
566 * If level is lower in the tree than the root we found previously,
567 * ignore this block.
568 */
569 block_level = xfs_btree_get_level(btblock);
570 if (block_level + 1 == fab->height) {
571 fab->root = NULLAGBLOCK;
572 goto out;
573 } else if (block_level < fab->height) {
574 goto out;
575 }
576
577 /*
578 * This is the highest block in the tree that we've found so far.
579 * Update the btree height to reflect what we've learned from this
580 * block.
581 */
582 fab->height = block_level + 1;
583
584 /*
585 * If this block doesn't have sibling pointers, then it's the new root
586 * block candidate. Otherwise, the root will be found farther up the
587 * tree.
588 */
589 if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
590 btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
591 fab->root = agbno;
592 else
593 fab->root = NULLAGBLOCK;
594
595 trace_xrep_findroot_block(mp, ri->sc->sa.pag->pag_agno, agbno,
596 be32_to_cpu(btblock->bb_magic), fab->height - 1);
597 out:
598 xfs_trans_brelse(ri->sc->tp, bp);
599 return error;
600 }
601
602 /*
603 * Do any of the blocks in this rmap record match one of the btrees we're
604 * looking for?
605 */
606 STATIC int
xrep_findroot_rmap(struct xfs_btree_cur * cur,const struct xfs_rmap_irec * rec,void * priv)607 xrep_findroot_rmap(
608 struct xfs_btree_cur *cur,
609 const struct xfs_rmap_irec *rec,
610 void *priv)
611 {
612 struct xrep_findroot *ri = priv;
613 struct xrep_find_ag_btree *fab;
614 xfs_agblock_t b;
615 bool done;
616 int error = 0;
617
618 /* Ignore anything that isn't AG metadata. */
619 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
620 return 0;
621
622 /* Otherwise scan each block + btree type. */
623 for (b = 0; b < rec->rm_blockcount; b++) {
624 done = false;
625 for (fab = ri->btree_info; fab->buf_ops; fab++) {
626 if (rec->rm_owner != fab->rmap_owner)
627 continue;
628 error = xrep_findroot_block(ri, fab,
629 rec->rm_owner, rec->rm_startblock + b,
630 &done);
631 if (error)
632 return error;
633 if (done)
634 break;
635 }
636 }
637
638 return 0;
639 }
640
641 /* Find the roots of the per-AG btrees described in btree_info. */
642 int
xrep_find_ag_btree_roots(struct xfs_scrub * sc,struct xfs_buf * agf_bp,struct xrep_find_ag_btree * btree_info,struct xfs_buf * agfl_bp)643 xrep_find_ag_btree_roots(
644 struct xfs_scrub *sc,
645 struct xfs_buf *agf_bp,
646 struct xrep_find_ag_btree *btree_info,
647 struct xfs_buf *agfl_bp)
648 {
649 struct xfs_mount *mp = sc->mp;
650 struct xrep_findroot ri;
651 struct xrep_find_ag_btree *fab;
652 struct xfs_btree_cur *cur;
653 int error;
654
655 ASSERT(xfs_buf_islocked(agf_bp));
656 ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
657
658 ri.sc = sc;
659 ri.btree_info = btree_info;
660 ri.agf = agf_bp->b_addr;
661 ri.agfl_bp = agfl_bp;
662 for (fab = btree_info; fab->buf_ops; fab++) {
663 ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
664 ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
665 fab->root = NULLAGBLOCK;
666 fab->height = 0;
667 }
668
669 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag);
670 error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
671 xfs_btree_del_cursor(cur, error);
672
673 return error;
674 }
675
676 /* Force a quotacheck the next time we mount. */
677 void
xrep_force_quotacheck(struct xfs_scrub * sc,xfs_dqtype_t type)678 xrep_force_quotacheck(
679 struct xfs_scrub *sc,
680 xfs_dqtype_t type)
681 {
682 uint flag;
683
684 flag = xfs_quota_chkd_flag(type);
685 if (!(flag & sc->mp->m_qflags))
686 return;
687
688 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
689 sc->mp->m_qflags &= ~flag;
690 spin_lock(&sc->mp->m_sb_lock);
691 sc->mp->m_sb.sb_qflags &= ~flag;
692 spin_unlock(&sc->mp->m_sb_lock);
693 xfs_log_sb(sc->tp);
694 mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
695 }
696
697 /*
698 * Attach dquots to this inode, or schedule quotacheck to fix them.
699 *
700 * This function ensures that the appropriate dquots are attached to an inode.
701 * We cannot allow the dquot code to allocate an on-disk dquot block here
702 * because we're already in transaction context with the inode locked. The
703 * on-disk dquot should already exist anyway. If the quota code signals
704 * corruption or missing quota information, schedule quotacheck, which will
705 * repair corruptions in the quota metadata.
706 */
707 int
xrep_ino_dqattach(struct xfs_scrub * sc)708 xrep_ino_dqattach(
709 struct xfs_scrub *sc)
710 {
711 int error;
712
713 error = xfs_qm_dqattach_locked(sc->ip, false);
714 switch (error) {
715 case -EFSBADCRC:
716 case -EFSCORRUPTED:
717 case -ENOENT:
718 xfs_err_ratelimited(sc->mp,
719 "inode %llu repair encountered quota error %d, quotacheck forced.",
720 (unsigned long long)sc->ip->i_ino, error);
721 if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
722 xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
723 if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
724 xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
725 if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
726 xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
727 fallthrough;
728 case -ESRCH:
729 error = 0;
730 break;
731 default:
732 break;
733 }
734
735 return error;
736 }
737