1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_sb.h"
14 #include "xfs_alloc.h"
15 #include "xfs_ialloc.h"
16 #include "xfs_rmap.h"
17 #include "xfs_ag.h"
18 #include "scrub/scrub.h"
19 #include "scrub/common.h"
20
21 /* Superblock */
22
23 /* Cross-reference with the other btrees. */
24 STATIC void
xchk_superblock_xref(struct xfs_scrub * sc,struct xfs_buf * bp)25 xchk_superblock_xref(
26 struct xfs_scrub *sc,
27 struct xfs_buf *bp)
28 {
29 struct xfs_mount *mp = sc->mp;
30 xfs_agnumber_t agno = sc->sm->sm_agno;
31 xfs_agblock_t agbno;
32 int error;
33
34 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
35 return;
36
37 agbno = XFS_SB_BLOCK(mp);
38
39 error = xchk_ag_init_existing(sc, agno, &sc->sa);
40 if (!xchk_xref_process_error(sc, agno, agbno, &error))
41 return;
42
43 xchk_xref_is_used_space(sc, agbno, 1);
44 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
45 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
46 xchk_xref_is_not_shared(sc, agbno, 1);
47
48 /* scrub teardown will take care of sc->sa for us */
49 }
50
51 /*
52 * Scrub the filesystem superblock.
53 *
54 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
55 * responsible for validating all the geometry information in sb 0, so
56 * if the filesystem is capable of initiating online scrub, then clearly
57 * sb 0 is ok and we can use its information to check everything else.
58 */
59 int
xchk_superblock(struct xfs_scrub * sc)60 xchk_superblock(
61 struct xfs_scrub *sc)
62 {
63 struct xfs_mount *mp = sc->mp;
64 struct xfs_buf *bp;
65 struct xfs_dsb *sb;
66 struct xfs_perag *pag;
67 xfs_agnumber_t agno;
68 uint32_t v2_ok;
69 __be32 features_mask;
70 int error;
71 __be16 vernum_mask;
72
73 agno = sc->sm->sm_agno;
74 if (agno == 0)
75 return 0;
76
77 /*
78 * Grab an active reference to the perag structure. If we can't get
79 * it, we're racing with something that's tearing down the AG, so
80 * signal that the AG no longer exists.
81 */
82 pag = xfs_perag_get(mp, agno);
83 if (!pag)
84 return -ENOENT;
85
86 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
87 /*
88 * The superblock verifier can return several different error codes
89 * if it thinks the superblock doesn't look right. For a mount these
90 * would all get bounced back to userspace, but if we're here then the
91 * fs mounted successfully, which means that this secondary superblock
92 * is simply incorrect. Treat all these codes the same way we treat
93 * any corruption.
94 */
95 switch (error) {
96 case -EINVAL: /* also -EWRONGFS */
97 case -ENOSYS:
98 case -EFBIG:
99 error = -EFSCORRUPTED;
100 fallthrough;
101 default:
102 break;
103 }
104 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
105 goto out_pag;
106
107 sb = bp->b_addr;
108
109 /*
110 * Verify the geometries match. Fields that are permanently
111 * set by mkfs are checked; fields that can be updated later
112 * (and are not propagated to backup superblocks) are preen
113 * checked.
114 */
115 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
116 xchk_block_set_corrupt(sc, bp);
117
118 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
119 xchk_block_set_corrupt(sc, bp);
120
121 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
122 xchk_block_set_corrupt(sc, bp);
123
124 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
125 xchk_block_set_corrupt(sc, bp);
126
127 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
128 xchk_block_set_preen(sc, bp);
129
130 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
131 xchk_block_set_corrupt(sc, bp);
132
133 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
134 xchk_block_set_preen(sc, bp);
135
136 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
137 xchk_block_set_preen(sc, bp);
138
139 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
140 xchk_block_set_preen(sc, bp);
141
142 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
143 xchk_block_set_corrupt(sc, bp);
144
145 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
146 xchk_block_set_corrupt(sc, bp);
147
148 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
149 xchk_block_set_corrupt(sc, bp);
150
151 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
152 xchk_block_set_corrupt(sc, bp);
153
154 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
155 xchk_block_set_corrupt(sc, bp);
156
157 /* Check sb_versionnum bits that are set at mkfs time. */
158 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
159 XFS_SB_VERSION_NUMBITS |
160 XFS_SB_VERSION_ALIGNBIT |
161 XFS_SB_VERSION_DALIGNBIT |
162 XFS_SB_VERSION_SHAREDBIT |
163 XFS_SB_VERSION_LOGV2BIT |
164 XFS_SB_VERSION_SECTORBIT |
165 XFS_SB_VERSION_EXTFLGBIT |
166 XFS_SB_VERSION_DIRV2BIT);
167 if ((sb->sb_versionnum & vernum_mask) !=
168 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
169 xchk_block_set_corrupt(sc, bp);
170
171 /* Check sb_versionnum bits that can be set after mkfs time. */
172 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
173 XFS_SB_VERSION_NLINKBIT |
174 XFS_SB_VERSION_QUOTABIT);
175 if ((sb->sb_versionnum & vernum_mask) !=
176 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
177 xchk_block_set_preen(sc, bp);
178
179 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
180 xchk_block_set_corrupt(sc, bp);
181
182 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
183 xchk_block_set_corrupt(sc, bp);
184
185 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
186 xchk_block_set_corrupt(sc, bp);
187
188 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
189 xchk_block_set_preen(sc, bp);
190
191 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
192 xchk_block_set_corrupt(sc, bp);
193
194 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
195 xchk_block_set_corrupt(sc, bp);
196
197 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
198 xchk_block_set_corrupt(sc, bp);
199
200 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
201 xchk_block_set_corrupt(sc, bp);
202
203 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
204 xchk_block_set_corrupt(sc, bp);
205
206 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
207 xchk_block_set_corrupt(sc, bp);
208
209 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
210 xchk_block_set_preen(sc, bp);
211
212 /*
213 * Skip the summary counters since we track them in memory anyway.
214 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
215 */
216
217 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
218 xchk_block_set_preen(sc, bp);
219
220 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
221 xchk_block_set_preen(sc, bp);
222
223 /*
224 * Skip the quota flags since repair will force quotacheck.
225 * sb_qflags
226 */
227
228 if (sb->sb_flags != mp->m_sb.sb_flags)
229 xchk_block_set_corrupt(sc, bp);
230
231 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
232 xchk_block_set_corrupt(sc, bp);
233
234 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
235 xchk_block_set_corrupt(sc, bp);
236
237 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
238 xchk_block_set_preen(sc, bp);
239
240 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
241 xchk_block_set_preen(sc, bp);
242
243 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
244 xchk_block_set_corrupt(sc, bp);
245
246 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
247 xchk_block_set_corrupt(sc, bp);
248
249 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
250 xchk_block_set_corrupt(sc, bp);
251
252 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
253 xchk_block_set_corrupt(sc, bp);
254
255 /* Do we see any invalid bits in sb_features2? */
256 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
257 if (sb->sb_features2 != 0)
258 xchk_block_set_corrupt(sc, bp);
259 } else {
260 v2_ok = XFS_SB_VERSION2_OKBITS;
261 if (xfs_sb_is_v5(&mp->m_sb))
262 v2_ok |= XFS_SB_VERSION2_CRCBIT;
263
264 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
265 xchk_block_set_corrupt(sc, bp);
266
267 if (sb->sb_features2 != sb->sb_bad_features2)
268 xchk_block_set_preen(sc, bp);
269 }
270
271 /* Check sb_features2 flags that are set at mkfs time. */
272 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
273 XFS_SB_VERSION2_PROJID32BIT |
274 XFS_SB_VERSION2_CRCBIT |
275 XFS_SB_VERSION2_FTYPE);
276 if ((sb->sb_features2 & features_mask) !=
277 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
278 xchk_block_set_corrupt(sc, bp);
279
280 /* Check sb_features2 flags that can be set after mkfs time. */
281 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
282 if ((sb->sb_features2 & features_mask) !=
283 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
284 xchk_block_set_corrupt(sc, bp);
285
286 if (!xfs_has_crc(mp)) {
287 /* all v5 fields must be zero */
288 if (memchr_inv(&sb->sb_features_compat, 0,
289 sizeof(struct xfs_dsb) -
290 offsetof(struct xfs_dsb, sb_features_compat)))
291 xchk_block_set_corrupt(sc, bp);
292 } else {
293 /* Check compat flags; all are set at mkfs time. */
294 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
295 if ((sb->sb_features_compat & features_mask) !=
296 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
297 xchk_block_set_corrupt(sc, bp);
298
299 /* Check ro compat flags; all are set at mkfs time. */
300 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
301 XFS_SB_FEAT_RO_COMPAT_FINOBT |
302 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
303 XFS_SB_FEAT_RO_COMPAT_REFLINK);
304 if ((sb->sb_features_ro_compat & features_mask) !=
305 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
306 features_mask))
307 xchk_block_set_corrupt(sc, bp);
308
309 /* Check incompat flags; all are set at mkfs time. */
310 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
311 XFS_SB_FEAT_INCOMPAT_FTYPE |
312 XFS_SB_FEAT_INCOMPAT_SPINODES |
313 XFS_SB_FEAT_INCOMPAT_META_UUID);
314 if ((sb->sb_features_incompat & features_mask) !=
315 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
316 features_mask))
317 xchk_block_set_corrupt(sc, bp);
318
319 /* Check log incompat flags; all are set at mkfs time. */
320 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
321 if ((sb->sb_features_log_incompat & features_mask) !=
322 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
323 features_mask))
324 xchk_block_set_corrupt(sc, bp);
325
326 /* Don't care about sb_crc */
327
328 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
329 xchk_block_set_corrupt(sc, bp);
330
331 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
332 xchk_block_set_preen(sc, bp);
333
334 /* Don't care about sb_lsn */
335 }
336
337 if (xfs_has_metauuid(mp)) {
338 /* The metadata UUID must be the same for all supers */
339 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
340 xchk_block_set_corrupt(sc, bp);
341 }
342
343 /* Everything else must be zero. */
344 if (memchr_inv(sb + 1, 0,
345 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
346 xchk_block_set_corrupt(sc, bp);
347
348 xchk_superblock_xref(sc, bp);
349 out_pag:
350 xfs_perag_put(pag);
351 return error;
352 }
353
354 /* AGF */
355
356 /* Tally freespace record lengths. */
357 STATIC int
xchk_agf_record_bno_lengths(struct xfs_btree_cur * cur,const struct xfs_alloc_rec_incore * rec,void * priv)358 xchk_agf_record_bno_lengths(
359 struct xfs_btree_cur *cur,
360 const struct xfs_alloc_rec_incore *rec,
361 void *priv)
362 {
363 xfs_extlen_t *blocks = priv;
364
365 (*blocks) += rec->ar_blockcount;
366 return 0;
367 }
368
369 /* Check agf_freeblks */
370 static inline void
xchk_agf_xref_freeblks(struct xfs_scrub * sc)371 xchk_agf_xref_freeblks(
372 struct xfs_scrub *sc)
373 {
374 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
375 xfs_extlen_t blocks = 0;
376 int error;
377
378 if (!sc->sa.bno_cur)
379 return;
380
381 error = xfs_alloc_query_all(sc->sa.bno_cur,
382 xchk_agf_record_bno_lengths, &blocks);
383 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
384 return;
385 if (blocks != be32_to_cpu(agf->agf_freeblks))
386 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
387 }
388
389 /* Cross reference the AGF with the cntbt (freespace by length btree) */
390 static inline void
xchk_agf_xref_cntbt(struct xfs_scrub * sc)391 xchk_agf_xref_cntbt(
392 struct xfs_scrub *sc)
393 {
394 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
395 xfs_agblock_t agbno;
396 xfs_extlen_t blocks;
397 int have;
398 int error;
399
400 if (!sc->sa.cnt_cur)
401 return;
402
403 /* Any freespace at all? */
404 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
405 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
406 return;
407 if (!have) {
408 if (agf->agf_freeblks != cpu_to_be32(0))
409 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
410 return;
411 }
412
413 /* Check agf_longest */
414 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
415 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
416 return;
417 if (!have || blocks != be32_to_cpu(agf->agf_longest))
418 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
419 }
420
421 /* Check the btree block counts in the AGF against the btrees. */
422 STATIC void
xchk_agf_xref_btreeblks(struct xfs_scrub * sc)423 xchk_agf_xref_btreeblks(
424 struct xfs_scrub *sc)
425 {
426 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
427 struct xfs_mount *mp = sc->mp;
428 xfs_agblock_t blocks;
429 xfs_agblock_t btreeblks;
430 int error;
431
432 /* agf_btreeblks didn't exist before lazysbcount */
433 if (!xfs_has_lazysbcount(sc->mp))
434 return;
435
436 /* Check agf_rmap_blocks; set up for agf_btreeblks check */
437 if (sc->sa.rmap_cur) {
438 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
439 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
440 return;
441 btreeblks = blocks - 1;
442 if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
443 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
444 } else {
445 btreeblks = 0;
446 }
447
448 /*
449 * No rmap cursor; we can't xref if we have the rmapbt feature.
450 * We also can't do it if we're missing the free space btree cursors.
451 */
452 if ((xfs_has_rmapbt(mp) && !sc->sa.rmap_cur) ||
453 !sc->sa.bno_cur || !sc->sa.cnt_cur)
454 return;
455
456 /* Check agf_btreeblks */
457 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
458 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
459 return;
460 btreeblks += blocks - 1;
461
462 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
463 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
464 return;
465 btreeblks += blocks - 1;
466
467 if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
468 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
469 }
470
471 /* Check agf_refcount_blocks against tree size */
472 static inline void
xchk_agf_xref_refcblks(struct xfs_scrub * sc)473 xchk_agf_xref_refcblks(
474 struct xfs_scrub *sc)
475 {
476 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
477 xfs_agblock_t blocks;
478 int error;
479
480 if (!sc->sa.refc_cur)
481 return;
482
483 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
484 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
485 return;
486 if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
487 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
488 }
489
490 /* Cross-reference with the other btrees. */
491 STATIC void
xchk_agf_xref(struct xfs_scrub * sc)492 xchk_agf_xref(
493 struct xfs_scrub *sc)
494 {
495 struct xfs_mount *mp = sc->mp;
496 xfs_agblock_t agbno;
497
498 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
499 return;
500
501 agbno = XFS_AGF_BLOCK(mp);
502
503 xchk_ag_btcur_init(sc, &sc->sa);
504
505 xchk_xref_is_used_space(sc, agbno, 1);
506 xchk_agf_xref_freeblks(sc);
507 xchk_agf_xref_cntbt(sc);
508 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
509 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
510 xchk_agf_xref_btreeblks(sc);
511 xchk_xref_is_not_shared(sc, agbno, 1);
512 xchk_agf_xref_refcblks(sc);
513
514 /* scrub teardown will take care of sc->sa for us */
515 }
516
517 /* Scrub the AGF. */
518 int
xchk_agf(struct xfs_scrub * sc)519 xchk_agf(
520 struct xfs_scrub *sc)
521 {
522 struct xfs_mount *mp = sc->mp;
523 struct xfs_agf *agf;
524 struct xfs_perag *pag;
525 xfs_agnumber_t agno = sc->sm->sm_agno;
526 xfs_agblock_t agbno;
527 xfs_agblock_t eoag;
528 xfs_agblock_t agfl_first;
529 xfs_agblock_t agfl_last;
530 xfs_agblock_t agfl_count;
531 xfs_agblock_t fl_count;
532 int level;
533 int error = 0;
534
535 error = xchk_ag_read_headers(sc, agno, &sc->sa);
536 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
537 goto out;
538 xchk_buffer_recheck(sc, sc->sa.agf_bp);
539
540 agf = sc->sa.agf_bp->b_addr;
541 pag = sc->sa.pag;
542
543 /* Check the AG length */
544 eoag = be32_to_cpu(agf->agf_length);
545 if (eoag != xfs_ag_block_count(mp, agno))
546 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
547
548 /* Check the AGF btree roots and levels */
549 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
550 if (!xfs_verify_agbno(mp, agno, agbno))
551 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
552
553 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
554 if (!xfs_verify_agbno(mp, agno, agbno))
555 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
556
557 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
558 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
559 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
560
561 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
562 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
563 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
564
565 if (xfs_has_rmapbt(mp)) {
566 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
567 if (!xfs_verify_agbno(mp, agno, agbno))
568 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
569
570 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
571 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
572 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
573 }
574
575 if (xfs_has_reflink(mp)) {
576 agbno = be32_to_cpu(agf->agf_refcount_root);
577 if (!xfs_verify_agbno(mp, agno, agbno))
578 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
579
580 level = be32_to_cpu(agf->agf_refcount_level);
581 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
582 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
583 }
584
585 /* Check the AGFL counters */
586 agfl_first = be32_to_cpu(agf->agf_flfirst);
587 agfl_last = be32_to_cpu(agf->agf_fllast);
588 agfl_count = be32_to_cpu(agf->agf_flcount);
589 if (agfl_last > agfl_first)
590 fl_count = agfl_last - agfl_first + 1;
591 else
592 fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
593 if (agfl_count != 0 && fl_count != agfl_count)
594 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
595
596 /* Do the incore counters match? */
597 if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
598 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
599 if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
600 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
601 if (xfs_has_lazysbcount(sc->mp) &&
602 pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
603 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
604
605 xchk_agf_xref(sc);
606 out:
607 return error;
608 }
609
610 /* AGFL */
611
612 struct xchk_agfl_info {
613 unsigned int sz_entries;
614 unsigned int nr_entries;
615 xfs_agblock_t *entries;
616 struct xfs_scrub *sc;
617 };
618
619 /* Cross-reference with the other btrees. */
620 STATIC void
xchk_agfl_block_xref(struct xfs_scrub * sc,xfs_agblock_t agbno)621 xchk_agfl_block_xref(
622 struct xfs_scrub *sc,
623 xfs_agblock_t agbno)
624 {
625 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
626 return;
627
628 xchk_xref_is_used_space(sc, agbno, 1);
629 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
630 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
631 xchk_xref_is_not_shared(sc, agbno, 1);
632 }
633
634 /* Scrub an AGFL block. */
635 STATIC int
xchk_agfl_block(struct xfs_mount * mp,xfs_agblock_t agbno,void * priv)636 xchk_agfl_block(
637 struct xfs_mount *mp,
638 xfs_agblock_t agbno,
639 void *priv)
640 {
641 struct xchk_agfl_info *sai = priv;
642 struct xfs_scrub *sc = sai->sc;
643 xfs_agnumber_t agno = sc->sa.pag->pag_agno;
644
645 if (xfs_verify_agbno(mp, agno, agbno) &&
646 sai->nr_entries < sai->sz_entries)
647 sai->entries[sai->nr_entries++] = agbno;
648 else
649 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
650
651 xchk_agfl_block_xref(sc, agbno);
652
653 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
654 return -ECANCELED;
655
656 return 0;
657 }
658
659 static int
xchk_agblock_cmp(const void * pa,const void * pb)660 xchk_agblock_cmp(
661 const void *pa,
662 const void *pb)
663 {
664 const xfs_agblock_t *a = pa;
665 const xfs_agblock_t *b = pb;
666
667 return (int)*a - (int)*b;
668 }
669
670 /* Cross-reference with the other btrees. */
671 STATIC void
xchk_agfl_xref(struct xfs_scrub * sc)672 xchk_agfl_xref(
673 struct xfs_scrub *sc)
674 {
675 struct xfs_mount *mp = sc->mp;
676 xfs_agblock_t agbno;
677
678 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
679 return;
680
681 agbno = XFS_AGFL_BLOCK(mp);
682
683 xchk_ag_btcur_init(sc, &sc->sa);
684
685 xchk_xref_is_used_space(sc, agbno, 1);
686 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
687 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
688 xchk_xref_is_not_shared(sc, agbno, 1);
689
690 /*
691 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
692 * active so that the agfl block xref can use it too.
693 */
694 }
695
696 /* Scrub the AGFL. */
697 int
xchk_agfl(struct xfs_scrub * sc)698 xchk_agfl(
699 struct xfs_scrub *sc)
700 {
701 struct xchk_agfl_info sai;
702 struct xfs_agf *agf;
703 xfs_agnumber_t agno = sc->sm->sm_agno;
704 unsigned int agflcount;
705 unsigned int i;
706 int error;
707
708 error = xchk_ag_read_headers(sc, agno, &sc->sa);
709 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
710 goto out;
711 if (!sc->sa.agf_bp)
712 return -EFSCORRUPTED;
713 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
714
715 xchk_agfl_xref(sc);
716
717 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
718 goto out;
719
720 /* Allocate buffer to ensure uniqueness of AGFL entries. */
721 agf = sc->sa.agf_bp->b_addr;
722 agflcount = be32_to_cpu(agf->agf_flcount);
723 if (agflcount > xfs_agfl_size(sc->mp)) {
724 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
725 goto out;
726 }
727 memset(&sai, 0, sizeof(sai));
728 sai.sc = sc;
729 sai.sz_entries = agflcount;
730 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
731 KM_MAYFAIL);
732 if (!sai.entries) {
733 error = -ENOMEM;
734 goto out;
735 }
736
737 /* Check the blocks in the AGFL. */
738 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
739 sc->sa.agfl_bp, xchk_agfl_block, &sai);
740 if (error == -ECANCELED) {
741 error = 0;
742 goto out_free;
743 }
744 if (error)
745 goto out_free;
746
747 if (agflcount != sai.nr_entries) {
748 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
749 goto out_free;
750 }
751
752 /* Sort entries, check for duplicates. */
753 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
754 xchk_agblock_cmp, NULL);
755 for (i = 1; i < sai.nr_entries; i++) {
756 if (sai.entries[i] == sai.entries[i - 1]) {
757 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
758 break;
759 }
760 }
761
762 out_free:
763 kmem_free(sai.entries);
764 out:
765 return error;
766 }
767
768 /* AGI */
769
770 /* Check agi_count/agi_freecount */
771 static inline void
xchk_agi_xref_icounts(struct xfs_scrub * sc)772 xchk_agi_xref_icounts(
773 struct xfs_scrub *sc)
774 {
775 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
776 xfs_agino_t icount;
777 xfs_agino_t freecount;
778 int error;
779
780 if (!sc->sa.ino_cur)
781 return;
782
783 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
784 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
785 return;
786 if (be32_to_cpu(agi->agi_count) != icount ||
787 be32_to_cpu(agi->agi_freecount) != freecount)
788 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
789 }
790
791 /* Check agi_[fi]blocks against tree size */
792 static inline void
xchk_agi_xref_fiblocks(struct xfs_scrub * sc)793 xchk_agi_xref_fiblocks(
794 struct xfs_scrub *sc)
795 {
796 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
797 xfs_agblock_t blocks;
798 int error = 0;
799
800 if (!xfs_has_inobtcounts(sc->mp))
801 return;
802
803 if (sc->sa.ino_cur) {
804 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
805 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
806 return;
807 if (blocks != be32_to_cpu(agi->agi_iblocks))
808 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
809 }
810
811 if (sc->sa.fino_cur) {
812 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
813 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
814 return;
815 if (blocks != be32_to_cpu(agi->agi_fblocks))
816 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
817 }
818 }
819
820 /* Cross-reference with the other btrees. */
821 STATIC void
xchk_agi_xref(struct xfs_scrub * sc)822 xchk_agi_xref(
823 struct xfs_scrub *sc)
824 {
825 struct xfs_mount *mp = sc->mp;
826 xfs_agblock_t agbno;
827
828 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
829 return;
830
831 agbno = XFS_AGI_BLOCK(mp);
832
833 xchk_ag_btcur_init(sc, &sc->sa);
834
835 xchk_xref_is_used_space(sc, agbno, 1);
836 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
837 xchk_agi_xref_icounts(sc);
838 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
839 xchk_xref_is_not_shared(sc, agbno, 1);
840 xchk_agi_xref_fiblocks(sc);
841
842 /* scrub teardown will take care of sc->sa for us */
843 }
844
845 /* Scrub the AGI. */
846 int
xchk_agi(struct xfs_scrub * sc)847 xchk_agi(
848 struct xfs_scrub *sc)
849 {
850 struct xfs_mount *mp = sc->mp;
851 struct xfs_agi *agi;
852 struct xfs_perag *pag;
853 xfs_agnumber_t agno = sc->sm->sm_agno;
854 xfs_agblock_t agbno;
855 xfs_agblock_t eoag;
856 xfs_agino_t agino;
857 xfs_agino_t first_agino;
858 xfs_agino_t last_agino;
859 xfs_agino_t icount;
860 int i;
861 int level;
862 int error = 0;
863
864 error = xchk_ag_read_headers(sc, agno, &sc->sa);
865 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
866 goto out;
867 xchk_buffer_recheck(sc, sc->sa.agi_bp);
868
869 agi = sc->sa.agi_bp->b_addr;
870 pag = sc->sa.pag;
871
872 /* Check the AG length */
873 eoag = be32_to_cpu(agi->agi_length);
874 if (eoag != xfs_ag_block_count(mp, agno))
875 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
876
877 /* Check btree roots and levels */
878 agbno = be32_to_cpu(agi->agi_root);
879 if (!xfs_verify_agbno(mp, agno, agbno))
880 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
881
882 level = be32_to_cpu(agi->agi_level);
883 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
884 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
885
886 if (xfs_has_finobt(mp)) {
887 agbno = be32_to_cpu(agi->agi_free_root);
888 if (!xfs_verify_agbno(mp, agno, agbno))
889 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
890
891 level = be32_to_cpu(agi->agi_free_level);
892 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
893 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
894 }
895
896 /* Check inode counters */
897 xfs_agino_range(mp, agno, &first_agino, &last_agino);
898 icount = be32_to_cpu(agi->agi_count);
899 if (icount > last_agino - first_agino + 1 ||
900 icount < be32_to_cpu(agi->agi_freecount))
901 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
902
903 /* Check inode pointers */
904 agino = be32_to_cpu(agi->agi_newino);
905 if (!xfs_verify_agino_or_null(mp, agno, agino))
906 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
907
908 agino = be32_to_cpu(agi->agi_dirino);
909 if (!xfs_verify_agino_or_null(mp, agno, agino))
910 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
911
912 /* Check unlinked inode buckets */
913 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
914 agino = be32_to_cpu(agi->agi_unlinked[i]);
915 if (!xfs_verify_agino_or_null(mp, agno, agino))
916 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
917 }
918
919 if (agi->agi_pad32 != cpu_to_be32(0))
920 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
921
922 /* Do the incore counters match? */
923 if (pag->pagi_count != be32_to_cpu(agi->agi_count))
924 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
925 if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
926 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
927
928 xchk_agi_xref(sc);
929 out:
930 return error;
931 }
932