1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_sb.h"
16 #include "xfs_inode.h"
17 #include "xfs_icache.h"
18 #include "xfs_alloc.h"
19 #include "xfs_alloc_btree.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_refcount_btree.h"
23 #include "xfs_rmap.h"
24 #include "xfs_rmap_btree.h"
25 #include "xfs_log.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_attr.h"
28 #include "xfs_reflink.h"
29 #include "scrub/scrub.h"
30 #include "scrub/common.h"
31 #include "scrub/trace.h"
32 #include "scrub/repair.h"
33 #include "scrub/health.h"
34
35 /* Common code for the metadata scrubbers. */
36
37 /*
38 * Handling operational errors.
39 *
40 * The *_process_error() family of functions are used to process error return
41 * codes from functions called as part of a scrub operation.
42 *
43 * If there's no error, we return true to tell the caller that it's ok
44 * to move on to the next check in its list.
45 *
46 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
47 * caller that something bad happened, and we preserve *error so that
48 * the caller can return the *error up the stack to userspace.
49 *
50 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
51 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words,
52 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
53 * not via return codes. We return false to tell the caller that
54 * something bad happened. Since the error has been cleared, the caller
55 * will (presumably) return that zero and scrubbing will move on to
56 * whatever's next.
57 *
58 * ftrace can be used to record the precise metadata location and the
59 * approximate code location of the failed operation.
60 */
61
62 /* Check for operational errors. */
63 static bool
__xchk_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error,__u32 errflag,void * ret_ip)64 __xchk_process_error(
65 struct xfs_scrub *sc,
66 xfs_agnumber_t agno,
67 xfs_agblock_t bno,
68 int *error,
69 __u32 errflag,
70 void *ret_ip)
71 {
72 switch (*error) {
73 case 0:
74 return true;
75 case -EDEADLOCK:
76 /* Used to restart an op with deadlock avoidance. */
77 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
78 break;
79 case -EFSBADCRC:
80 case -EFSCORRUPTED:
81 /* Note the badness but don't abort. */
82 sc->sm->sm_flags |= errflag;
83 *error = 0;
84 /* fall through */
85 default:
86 trace_xchk_op_error(sc, agno, bno, *error,
87 ret_ip);
88 break;
89 }
90 return false;
91 }
92
93 bool
xchk_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error)94 xchk_process_error(
95 struct xfs_scrub *sc,
96 xfs_agnumber_t agno,
97 xfs_agblock_t bno,
98 int *error)
99 {
100 return __xchk_process_error(sc, agno, bno, error,
101 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
102 }
103
104 bool
xchk_xref_process_error(struct xfs_scrub * sc,xfs_agnumber_t agno,xfs_agblock_t bno,int * error)105 xchk_xref_process_error(
106 struct xfs_scrub *sc,
107 xfs_agnumber_t agno,
108 xfs_agblock_t bno,
109 int *error)
110 {
111 return __xchk_process_error(sc, agno, bno, error,
112 XFS_SCRUB_OFLAG_XFAIL, __return_address);
113 }
114
115 /* Check for operational errors for a file offset. */
116 static bool
__xchk_fblock_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error,__u32 errflag,void * ret_ip)117 __xchk_fblock_process_error(
118 struct xfs_scrub *sc,
119 int whichfork,
120 xfs_fileoff_t offset,
121 int *error,
122 __u32 errflag,
123 void *ret_ip)
124 {
125 switch (*error) {
126 case 0:
127 return true;
128 case -EDEADLOCK:
129 /* Used to restart an op with deadlock avoidance. */
130 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
131 break;
132 case -EFSBADCRC:
133 case -EFSCORRUPTED:
134 /* Note the badness but don't abort. */
135 sc->sm->sm_flags |= errflag;
136 *error = 0;
137 /* fall through */
138 default:
139 trace_xchk_file_op_error(sc, whichfork, offset, *error,
140 ret_ip);
141 break;
142 }
143 return false;
144 }
145
146 bool
xchk_fblock_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error)147 xchk_fblock_process_error(
148 struct xfs_scrub *sc,
149 int whichfork,
150 xfs_fileoff_t offset,
151 int *error)
152 {
153 return __xchk_fblock_process_error(sc, whichfork, offset, error,
154 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
155 }
156
157 bool
xchk_fblock_xref_process_error(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset,int * error)158 xchk_fblock_xref_process_error(
159 struct xfs_scrub *sc,
160 int whichfork,
161 xfs_fileoff_t offset,
162 int *error)
163 {
164 return __xchk_fblock_process_error(sc, whichfork, offset, error,
165 XFS_SCRUB_OFLAG_XFAIL, __return_address);
166 }
167
168 /*
169 * Handling scrub corruption/optimization/warning checks.
170 *
171 * The *_set_{corrupt,preen,warning}() family of functions are used to
172 * record the presence of metadata that is incorrect (corrupt), could be
173 * optimized somehow (preen), or should be flagged for administrative
174 * review but is not incorrect (warn).
175 *
176 * ftrace can be used to record the precise metadata location and
177 * approximate code location of the failed check.
178 */
179
180 /* Record a block which could be optimized. */
181 void
xchk_block_set_preen(struct xfs_scrub * sc,struct xfs_buf * bp)182 xchk_block_set_preen(
183 struct xfs_scrub *sc,
184 struct xfs_buf *bp)
185 {
186 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
187 trace_xchk_block_preen(sc, bp->b_bn, __return_address);
188 }
189
190 /*
191 * Record an inode which could be optimized. The trace data will
192 * include the block given by bp if bp is given; otherwise it will use
193 * the block location of the inode record itself.
194 */
195 void
xchk_ino_set_preen(struct xfs_scrub * sc,xfs_ino_t ino)196 xchk_ino_set_preen(
197 struct xfs_scrub *sc,
198 xfs_ino_t ino)
199 {
200 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
201 trace_xchk_ino_preen(sc, ino, __return_address);
202 }
203
204 /* Record something being wrong with the filesystem primary superblock. */
205 void
xchk_set_corrupt(struct xfs_scrub * sc)206 xchk_set_corrupt(
207 struct xfs_scrub *sc)
208 {
209 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
210 trace_xchk_fs_error(sc, 0, __return_address);
211 }
212
213 /* Record a corrupt block. */
214 void
xchk_block_set_corrupt(struct xfs_scrub * sc,struct xfs_buf * bp)215 xchk_block_set_corrupt(
216 struct xfs_scrub *sc,
217 struct xfs_buf *bp)
218 {
219 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
220 trace_xchk_block_error(sc, bp->b_bn, __return_address);
221 }
222
223 /* Record a corruption while cross-referencing. */
224 void
xchk_block_xref_set_corrupt(struct xfs_scrub * sc,struct xfs_buf * bp)225 xchk_block_xref_set_corrupt(
226 struct xfs_scrub *sc,
227 struct xfs_buf *bp)
228 {
229 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
230 trace_xchk_block_error(sc, bp->b_bn, __return_address);
231 }
232
233 /*
234 * Record a corrupt inode. The trace data will include the block given
235 * by bp if bp is given; otherwise it will use the block location of the
236 * inode record itself.
237 */
238 void
xchk_ino_set_corrupt(struct xfs_scrub * sc,xfs_ino_t ino)239 xchk_ino_set_corrupt(
240 struct xfs_scrub *sc,
241 xfs_ino_t ino)
242 {
243 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
244 trace_xchk_ino_error(sc, ino, __return_address);
245 }
246
247 /* Record a corruption while cross-referencing with an inode. */
248 void
xchk_ino_xref_set_corrupt(struct xfs_scrub * sc,xfs_ino_t ino)249 xchk_ino_xref_set_corrupt(
250 struct xfs_scrub *sc,
251 xfs_ino_t ino)
252 {
253 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
254 trace_xchk_ino_error(sc, ino, __return_address);
255 }
256
257 /* Record corruption in a block indexed by a file fork. */
258 void
xchk_fblock_set_corrupt(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)259 xchk_fblock_set_corrupt(
260 struct xfs_scrub *sc,
261 int whichfork,
262 xfs_fileoff_t offset)
263 {
264 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
265 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
266 }
267
268 /* Record a corruption while cross-referencing a fork block. */
269 void
xchk_fblock_xref_set_corrupt(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)270 xchk_fblock_xref_set_corrupt(
271 struct xfs_scrub *sc,
272 int whichfork,
273 xfs_fileoff_t offset)
274 {
275 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
276 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
277 }
278
279 /*
280 * Warn about inodes that need administrative review but is not
281 * incorrect.
282 */
283 void
xchk_ino_set_warning(struct xfs_scrub * sc,xfs_ino_t ino)284 xchk_ino_set_warning(
285 struct xfs_scrub *sc,
286 xfs_ino_t ino)
287 {
288 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
289 trace_xchk_ino_warning(sc, ino, __return_address);
290 }
291
292 /* Warn about a block indexed by a file fork that needs review. */
293 void
xchk_fblock_set_warning(struct xfs_scrub * sc,int whichfork,xfs_fileoff_t offset)294 xchk_fblock_set_warning(
295 struct xfs_scrub *sc,
296 int whichfork,
297 xfs_fileoff_t offset)
298 {
299 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
300 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
301 }
302
303 /* Signal an incomplete scrub. */
304 void
xchk_set_incomplete(struct xfs_scrub * sc)305 xchk_set_incomplete(
306 struct xfs_scrub *sc)
307 {
308 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
309 trace_xchk_incomplete(sc, __return_address);
310 }
311
312 /*
313 * rmap scrubbing -- compute the number of blocks with a given owner,
314 * at least according to the reverse mapping data.
315 */
316
317 struct xchk_rmap_ownedby_info {
318 const struct xfs_owner_info *oinfo;
319 xfs_filblks_t *blocks;
320 };
321
322 STATIC int
xchk_count_rmap_ownedby_irec(struct xfs_btree_cur * cur,struct xfs_rmap_irec * rec,void * priv)323 xchk_count_rmap_ownedby_irec(
324 struct xfs_btree_cur *cur,
325 struct xfs_rmap_irec *rec,
326 void *priv)
327 {
328 struct xchk_rmap_ownedby_info *sroi = priv;
329 bool irec_attr;
330 bool oinfo_attr;
331
332 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
333 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
334
335 if (rec->rm_owner != sroi->oinfo->oi_owner)
336 return 0;
337
338 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
339 (*sroi->blocks) += rec->rm_blockcount;
340
341 return 0;
342 }
343
344 /*
345 * Calculate the number of blocks the rmap thinks are owned by something.
346 * The caller should pass us an rmapbt cursor.
347 */
348 int
xchk_count_rmap_ownedby_ag(struct xfs_scrub * sc,struct xfs_btree_cur * cur,const struct xfs_owner_info * oinfo,xfs_filblks_t * blocks)349 xchk_count_rmap_ownedby_ag(
350 struct xfs_scrub *sc,
351 struct xfs_btree_cur *cur,
352 const struct xfs_owner_info *oinfo,
353 xfs_filblks_t *blocks)
354 {
355 struct xchk_rmap_ownedby_info sroi = {
356 .oinfo = oinfo,
357 .blocks = blocks,
358 };
359
360 *blocks = 0;
361 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
362 &sroi);
363 }
364
365 /*
366 * AG scrubbing
367 *
368 * These helpers facilitate locking an allocation group's header
369 * buffers, setting up cursors for all btrees that are present, and
370 * cleaning everything up once we're through.
371 */
372
373 /* Decide if we want to return an AG header read failure. */
374 static inline bool
want_ag_read_header_failure(struct xfs_scrub * sc,unsigned int type)375 want_ag_read_header_failure(
376 struct xfs_scrub *sc,
377 unsigned int type)
378 {
379 /* Return all AG header read failures when scanning btrees. */
380 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
381 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
382 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
383 return true;
384 /*
385 * If we're scanning a given type of AG header, we only want to
386 * see read failures from that specific header. We'd like the
387 * other headers to cross-check them, but this isn't required.
388 */
389 if (sc->sm->sm_type == type)
390 return true;
391 return false;
392 }
393
394 /*
395 * Grab all the headers for an AG.
396 *
397 * The headers should be released by xchk_ag_free, but as a fail
398 * safe we attach all the buffers we grab to the scrub transaction so
399 * they'll all be freed when we cancel it.
400 */
401 int
xchk_ag_read_headers(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xfs_buf ** agi,struct xfs_buf ** agf,struct xfs_buf ** agfl)402 xchk_ag_read_headers(
403 struct xfs_scrub *sc,
404 xfs_agnumber_t agno,
405 struct xfs_buf **agi,
406 struct xfs_buf **agf,
407 struct xfs_buf **agfl)
408 {
409 struct xfs_mount *mp = sc->mp;
410 int error;
411
412 error = xfs_ialloc_read_agi(mp, sc->tp, agno, agi);
413 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
414 goto out;
415
416 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, agf);
417 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
418 goto out;
419
420 error = xfs_alloc_read_agfl(mp, sc->tp, agno, agfl);
421 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGFL))
422 goto out;
423 error = 0;
424 out:
425 return error;
426 }
427
428 /* Release all the AG btree cursors. */
429 void
xchk_ag_btcur_free(struct xchk_ag * sa)430 xchk_ag_btcur_free(
431 struct xchk_ag *sa)
432 {
433 if (sa->refc_cur)
434 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
435 if (sa->rmap_cur)
436 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
437 if (sa->fino_cur)
438 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
439 if (sa->ino_cur)
440 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
441 if (sa->cnt_cur)
442 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
443 if (sa->bno_cur)
444 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
445
446 sa->refc_cur = NULL;
447 sa->rmap_cur = NULL;
448 sa->fino_cur = NULL;
449 sa->ino_cur = NULL;
450 sa->bno_cur = NULL;
451 sa->cnt_cur = NULL;
452 }
453
454 /* Initialize all the btree cursors for an AG. */
455 int
xchk_ag_btcur_init(struct xfs_scrub * sc,struct xchk_ag * sa)456 xchk_ag_btcur_init(
457 struct xfs_scrub *sc,
458 struct xchk_ag *sa)
459 {
460 struct xfs_mount *mp = sc->mp;
461 xfs_agnumber_t agno = sa->agno;
462
463 xchk_perag_get(sc->mp, sa);
464 if (sa->agf_bp &&
465 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) {
466 /* Set up a bnobt cursor for cross-referencing. */
467 sa->bno_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
468 agno, XFS_BTNUM_BNO);
469 if (!sa->bno_cur)
470 goto err;
471 }
472
473 if (sa->agf_bp &&
474 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) {
475 /* Set up a cntbt cursor for cross-referencing. */
476 sa->cnt_cur = xfs_allocbt_init_cursor(mp, sc->tp, sa->agf_bp,
477 agno, XFS_BTNUM_CNT);
478 if (!sa->cnt_cur)
479 goto err;
480 }
481
482 /* Set up a inobt cursor for cross-referencing. */
483 if (sa->agi_bp &&
484 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) {
485 sa->ino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
486 agno, XFS_BTNUM_INO);
487 if (!sa->ino_cur)
488 goto err;
489 }
490
491 /* Set up a finobt cursor for cross-referencing. */
492 if (sa->agi_bp && xfs_sb_version_hasfinobt(&mp->m_sb) &&
493 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) {
494 sa->fino_cur = xfs_inobt_init_cursor(mp, sc->tp, sa->agi_bp,
495 agno, XFS_BTNUM_FINO);
496 if (!sa->fino_cur)
497 goto err;
498 }
499
500 /* Set up a rmapbt cursor for cross-referencing. */
501 if (sa->agf_bp && xfs_sb_version_hasrmapbt(&mp->m_sb) &&
502 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_RMAP)) {
503 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp, sa->agf_bp,
504 agno);
505 if (!sa->rmap_cur)
506 goto err;
507 }
508
509 /* Set up a refcountbt cursor for cross-referencing. */
510 if (sa->agf_bp && xfs_sb_version_hasreflink(&mp->m_sb) &&
511 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_REFC)) {
512 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
513 sa->agf_bp, agno);
514 if (!sa->refc_cur)
515 goto err;
516 }
517
518 return 0;
519 err:
520 return -ENOMEM;
521 }
522
523 /* Release the AG header context and btree cursors. */
524 void
xchk_ag_free(struct xfs_scrub * sc,struct xchk_ag * sa)525 xchk_ag_free(
526 struct xfs_scrub *sc,
527 struct xchk_ag *sa)
528 {
529 xchk_ag_btcur_free(sa);
530 if (sa->agfl_bp) {
531 xfs_trans_brelse(sc->tp, sa->agfl_bp);
532 sa->agfl_bp = NULL;
533 }
534 if (sa->agf_bp) {
535 xfs_trans_brelse(sc->tp, sa->agf_bp);
536 sa->agf_bp = NULL;
537 }
538 if (sa->agi_bp) {
539 xfs_trans_brelse(sc->tp, sa->agi_bp);
540 sa->agi_bp = NULL;
541 }
542 if (sa->pag) {
543 xfs_perag_put(sa->pag);
544 sa->pag = NULL;
545 }
546 sa->agno = NULLAGNUMBER;
547 }
548
549 /*
550 * For scrub, grab the AGI and the AGF headers, in that order. Locking
551 * order requires us to get the AGI before the AGF. We use the
552 * transaction to avoid deadlocking on crosslinked metadata buffers;
553 * either the caller passes one in (bmap scrub) or we have to create a
554 * transaction ourselves.
555 */
556 int
xchk_ag_init(struct xfs_scrub * sc,xfs_agnumber_t agno,struct xchk_ag * sa)557 xchk_ag_init(
558 struct xfs_scrub *sc,
559 xfs_agnumber_t agno,
560 struct xchk_ag *sa)
561 {
562 int error;
563
564 sa->agno = agno;
565 error = xchk_ag_read_headers(sc, agno, &sa->agi_bp,
566 &sa->agf_bp, &sa->agfl_bp);
567 if (error)
568 return error;
569
570 return xchk_ag_btcur_init(sc, sa);
571 }
572
573 /*
574 * Grab the per-ag structure if we haven't already gotten it. Teardown of the
575 * xchk_ag will release it for us.
576 */
577 void
xchk_perag_get(struct xfs_mount * mp,struct xchk_ag * sa)578 xchk_perag_get(
579 struct xfs_mount *mp,
580 struct xchk_ag *sa)
581 {
582 if (!sa->pag)
583 sa->pag = xfs_perag_get(mp, sa->agno);
584 }
585
586 /* Per-scrubber setup functions */
587
588 /*
589 * Grab an empty transaction so that we can re-grab locked buffers if
590 * one of our btrees turns out to be cyclic.
591 *
592 * If we're going to repair something, we need to ask for the largest possible
593 * log reservation so that we can handle the worst case scenario for metadata
594 * updates while rebuilding a metadata item. We also need to reserve as many
595 * blocks in the head transaction as we think we're going to need to rebuild
596 * the metadata object.
597 */
598 int
xchk_trans_alloc(struct xfs_scrub * sc,uint resblks)599 xchk_trans_alloc(
600 struct xfs_scrub *sc,
601 uint resblks)
602 {
603 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
604 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
605 resblks, 0, 0, &sc->tp);
606
607 return xfs_trans_alloc_empty(sc->mp, &sc->tp);
608 }
609
610 /* Set us up with a transaction and an empty context. */
611 int
xchk_setup_fs(struct xfs_scrub * sc,struct xfs_inode * ip)612 xchk_setup_fs(
613 struct xfs_scrub *sc,
614 struct xfs_inode *ip)
615 {
616 uint resblks;
617
618 resblks = xrep_calc_ag_resblks(sc);
619 return xchk_trans_alloc(sc, resblks);
620 }
621
622 /* Set us up with AG headers and btree cursors. */
623 int
xchk_setup_ag_btree(struct xfs_scrub * sc,struct xfs_inode * ip,bool force_log)624 xchk_setup_ag_btree(
625 struct xfs_scrub *sc,
626 struct xfs_inode *ip,
627 bool force_log)
628 {
629 struct xfs_mount *mp = sc->mp;
630 int error;
631
632 /*
633 * If the caller asks us to checkpont the log, do so. This
634 * expensive operation should be performed infrequently and only
635 * as a last resort. Any caller that sets force_log should
636 * document why they need to do so.
637 */
638 if (force_log) {
639 error = xchk_checkpoint_log(mp);
640 if (error)
641 return error;
642 }
643
644 error = xchk_setup_fs(sc, ip);
645 if (error)
646 return error;
647
648 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
649 }
650
651 /* Push everything out of the log onto disk. */
652 int
xchk_checkpoint_log(struct xfs_mount * mp)653 xchk_checkpoint_log(
654 struct xfs_mount *mp)
655 {
656 int error;
657
658 error = xfs_log_force(mp, XFS_LOG_SYNC);
659 if (error)
660 return error;
661 xfs_ail_push_all_sync(mp->m_ail);
662 return 0;
663 }
664
665 /*
666 * Given an inode and the scrub control structure, grab either the
667 * inode referenced in the control structure or the inode passed in.
668 * The inode is not locked.
669 */
670 int
xchk_get_inode(struct xfs_scrub * sc,struct xfs_inode * ip_in)671 xchk_get_inode(
672 struct xfs_scrub *sc,
673 struct xfs_inode *ip_in)
674 {
675 struct xfs_imap imap;
676 struct xfs_mount *mp = sc->mp;
677 struct xfs_inode *ip = NULL;
678 int error;
679
680 /* We want to scan the inode we already had opened. */
681 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino) {
682 sc->ip = ip_in;
683 return 0;
684 }
685
686 /* Look up the inode, see if the generation number matches. */
687 if (xfs_internal_inum(mp, sc->sm->sm_ino))
688 return -ENOENT;
689 error = xfs_iget(mp, NULL, sc->sm->sm_ino,
690 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE, 0, &ip);
691 switch (error) {
692 case -ENOENT:
693 /* Inode doesn't exist, just bail out. */
694 return error;
695 case 0:
696 /* Got an inode, continue. */
697 break;
698 case -EINVAL:
699 /*
700 * -EINVAL with IGET_UNTRUSTED could mean one of several
701 * things: userspace gave us an inode number that doesn't
702 * correspond to fs space, or doesn't have an inobt entry;
703 * or it could simply mean that the inode buffer failed the
704 * read verifiers.
705 *
706 * Try just the inode mapping lookup -- if it succeeds, then
707 * the inode buffer verifier failed and something needs fixing.
708 * Otherwise, we really couldn't find it so tell userspace
709 * that it no longer exists.
710 */
711 error = xfs_imap(sc->mp, sc->tp, sc->sm->sm_ino, &imap,
712 XFS_IGET_UNTRUSTED | XFS_IGET_DONTCACHE);
713 if (error)
714 return -ENOENT;
715 error = -EFSCORRUPTED;
716 /* fall through */
717 default:
718 trace_xchk_op_error(sc,
719 XFS_INO_TO_AGNO(mp, sc->sm->sm_ino),
720 XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
721 error, __return_address);
722 return error;
723 }
724 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
725 xfs_irele(ip);
726 return -ENOENT;
727 }
728
729 sc->ip = ip;
730 return 0;
731 }
732
733 /* Set us up to scrub a file's contents. */
734 int
xchk_setup_inode_contents(struct xfs_scrub * sc,struct xfs_inode * ip,unsigned int resblks)735 xchk_setup_inode_contents(
736 struct xfs_scrub *sc,
737 struct xfs_inode *ip,
738 unsigned int resblks)
739 {
740 int error;
741
742 error = xchk_get_inode(sc, ip);
743 if (error)
744 return error;
745
746 /* Got the inode, lock it and we're ready to go. */
747 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
748 xfs_ilock(sc->ip, sc->ilock_flags);
749 error = xchk_trans_alloc(sc, resblks);
750 if (error)
751 goto out;
752 sc->ilock_flags |= XFS_ILOCK_EXCL;
753 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
754
755 out:
756 /* scrub teardown will unlock and release the inode for us */
757 return error;
758 }
759
760 /*
761 * Predicate that decides if we need to evaluate the cross-reference check.
762 * If there was an error accessing the cross-reference btree, just delete
763 * the cursor and skip the check.
764 */
765 bool
xchk_should_check_xref(struct xfs_scrub * sc,int * error,struct xfs_btree_cur ** curpp)766 xchk_should_check_xref(
767 struct xfs_scrub *sc,
768 int *error,
769 struct xfs_btree_cur **curpp)
770 {
771 /* No point in xref if we already know we're corrupt. */
772 if (xchk_skip_xref(sc->sm))
773 return false;
774
775 if (*error == 0)
776 return true;
777
778 if (curpp) {
779 /* If we've already given up on xref, just bail out. */
780 if (!*curpp)
781 return false;
782
783 /* xref error, delete cursor and bail out. */
784 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
785 *curpp = NULL;
786 }
787
788 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
789 trace_xchk_xref_error(sc, *error, __return_address);
790
791 /*
792 * Errors encountered during cross-referencing with another
793 * data structure should not cause this scrubber to abort.
794 */
795 *error = 0;
796 return false;
797 }
798
799 /* Run the structure verifiers on in-memory buffers to detect bad memory. */
800 void
xchk_buffer_recheck(struct xfs_scrub * sc,struct xfs_buf * bp)801 xchk_buffer_recheck(
802 struct xfs_scrub *sc,
803 struct xfs_buf *bp)
804 {
805 xfs_failaddr_t fa;
806
807 if (bp->b_ops == NULL) {
808 xchk_block_set_corrupt(sc, bp);
809 return;
810 }
811 if (bp->b_ops->verify_struct == NULL) {
812 xchk_set_incomplete(sc);
813 return;
814 }
815 fa = bp->b_ops->verify_struct(bp);
816 if (!fa)
817 return;
818 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
819 trace_xchk_block_error(sc, bp->b_bn, fa);
820 }
821
822 /*
823 * Scrub the attr/data forks of a metadata inode. The metadata inode must be
824 * pointed to by sc->ip and the ILOCK must be held.
825 */
826 int
xchk_metadata_inode_forks(struct xfs_scrub * sc)827 xchk_metadata_inode_forks(
828 struct xfs_scrub *sc)
829 {
830 __u32 smtype;
831 bool shared;
832 int error;
833
834 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
835 return 0;
836
837 /* Metadata inodes don't live on the rt device. */
838 if (sc->ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
839 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
840 return 0;
841 }
842
843 /* They should never participate in reflink. */
844 if (xfs_is_reflink_inode(sc->ip)) {
845 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
846 return 0;
847 }
848
849 /* They also should never have extended attributes. */
850 if (xfs_inode_hasattr(sc->ip)) {
851 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
852 return 0;
853 }
854
855 /* Invoke the data fork scrubber. */
856 smtype = sc->sm->sm_type;
857 sc->sm->sm_type = XFS_SCRUB_TYPE_BMBTD;
858 error = xchk_bmap_data(sc);
859 sc->sm->sm_type = smtype;
860 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
861 return error;
862
863 /* Look for incorrect shared blocks. */
864 if (xfs_sb_version_hasreflink(&sc->mp->m_sb)) {
865 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
866 &shared);
867 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
868 &error))
869 return error;
870 if (shared)
871 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
872 }
873
874 return error;
875 }
876
877 /*
878 * Try to lock an inode in violation of the usual locking order rules. For
879 * example, trying to get the IOLOCK while in transaction context, or just
880 * plain breaking AG-order or inode-order inode locking rules. Either way,
881 * the only way to avoid an ABBA deadlock is to use trylock and back off if
882 * we can't.
883 */
884 int
xchk_ilock_inverted(struct xfs_inode * ip,uint lock_mode)885 xchk_ilock_inverted(
886 struct xfs_inode *ip,
887 uint lock_mode)
888 {
889 int i;
890
891 for (i = 0; i < 20; i++) {
892 if (xfs_ilock_nowait(ip, lock_mode))
893 return 0;
894 delay(1);
895 }
896 return -EDEADLOCK;
897 }
898
899 /* Pause background reaping of resources. */
900 void
xchk_stop_reaping(struct xfs_scrub * sc)901 xchk_stop_reaping(
902 struct xfs_scrub *sc)
903 {
904 sc->flags |= XCHK_REAPING_DISABLED;
905 xfs_stop_block_reaping(sc->mp);
906 }
907
908 /* Restart background reaping of resources. */
909 void
xchk_start_reaping(struct xfs_scrub * sc)910 xchk_start_reaping(
911 struct xfs_scrub *sc)
912 {
913 xfs_start_block_reaping(sc->mp);
914 sc->flags &= ~XCHK_REAPING_DISABLED;
915 }
916