/fs/xfs/libxfs/ |
D | xfs_ag_resv.c | 70 struct xfs_perag *pag, in xfs_ag_resv_critical() argument 78 avail = pag->pagf_freeblks - pag->pag_rmapbt_resv.ar_reserved; in xfs_ag_resv_critical() 79 orig = pag->pag_meta_resv.ar_asked; in xfs_ag_resv_critical() 82 avail = pag->pagf_freeblks + pag->pagf_flcount - in xfs_ag_resv_critical() 83 pag->pag_meta_resv.ar_reserved; in xfs_ag_resv_critical() 84 orig = pag->pag_rmapbt_resv.ar_asked; in xfs_ag_resv_critical() 91 trace_xfs_ag_resv_critical(pag, type, avail); in xfs_ag_resv_critical() 95 pag->pag_mount, XFS_ERRTAG_AG_RESV_CRITICAL); in xfs_ag_resv_critical() 104 struct xfs_perag *pag, in xfs_ag_resv_needed() argument 109 len = pag->pag_meta_resv.ar_reserved + pag->pag_rmapbt_resv.ar_reserved; in xfs_ag_resv_needed() [all …]
|
D | xfs_ag.c | 46 struct xfs_perag *pag; in xfs_perag_get() local 50 pag = radix_tree_lookup(&mp->m_perag_tree, agno); in xfs_perag_get() 51 if (pag) { in xfs_perag_get() 52 ASSERT(atomic_read(&pag->pag_ref) >= 0); in xfs_perag_get() 53 ref = atomic_inc_return(&pag->pag_ref); in xfs_perag_get() 57 return pag; in xfs_perag_get() 69 struct xfs_perag *pag; in xfs_perag_get_tag() local 75 (void **)&pag, first, 1, tag); in xfs_perag_get_tag() 80 ref = atomic_inc_return(&pag->pag_ref); in xfs_perag_get_tag() 82 trace_xfs_perag_get_tag(mp, pag->pag_agno, ref, _RET_IP_); in xfs_perag_get_tag() [all …]
|
D | xfs_ag.h | 115 void xfs_perag_put(struct xfs_perag *pag); 122 struct xfs_perag *pag, in xfs_perag_next() argument 126 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_next() 128 *agno = pag->pag_agno + 1; in xfs_perag_next() 129 xfs_perag_put(pag); in xfs_perag_next() 135 #define for_each_perag_range(mp, agno, end_agno, pag) \ argument 136 for ((pag) = xfs_perag_get((mp), (agno)); \ 137 (pag) != NULL; \ 138 (pag) = xfs_perag_next((pag), &(agno), (end_agno))) 140 #define for_each_perag_from(mp, agno, pag) \ argument [all …]
|
D | xfs_ag_resv.h | 9 int xfs_ag_resv_free(struct xfs_perag *pag); 10 int xfs_ag_resv_init(struct xfs_perag *pag, struct xfs_trans *tp); 12 bool xfs_ag_resv_critical(struct xfs_perag *pag, enum xfs_ag_resv_type type); 13 xfs_extlen_t xfs_ag_resv_needed(struct xfs_perag *pag, 16 void xfs_ag_resv_alloc_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, 18 void xfs_ag_resv_free_extent(struct xfs_perag *pag, enum xfs_ag_resv_type type, 23 struct xfs_perag *pag, in xfs_perag_resv() argument 28 return &pag->pag_meta_resv; in xfs_perag_resv() 30 return &pag->pag_rmapbt_resv; in xfs_perag_resv() 47 struct xfs_perag *pag; in xfs_ag_resv_rmapbt_alloc() local [all …]
|
D | xfs_ialloc.c | 108 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno; in xfs_inobt_get_rec() 175 struct xfs_perag *pag, in xfs_inobt_insert() argument 185 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum); in xfs_inobt_insert() 245 ASSERT(freecount == cur->bc_ag.pag->pagi_freecount); in xfs_check_agi_freecount() 520 struct xfs_perag *pag, in xfs_inobt_insert_sprec() argument 530 cur = xfs_inobt_init_cursor(mp, tp, agbp, pag, btnum); in xfs_inobt_insert_sprec() 577 trace_xfs_irec_merge_pre(mp, pag->pag_agno, rec.ir_startino, in xfs_inobt_insert_sprec() 584 trace_xfs_irec_merge_post(mp, pag->pag_agno, nrec->ir_startino, in xfs_inobt_insert_sprec() 614 struct xfs_perag *pag) in xfs_ialloc_ag_alloc() argument 665 args.fsbno = XFS_AGB_TO_FSB(args.mp, pag->pag_agno, args.agbno); in xfs_ialloc_ag_alloc() [all …]
|
D | xfs_alloc.c | 233 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno; in xfs_alloc_get_rec() 415 ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >= in xfs_alloc_fix_len() 779 args->agbp, args->pag, XFS_BTNUM_CNT); in xfs_alloc_cur_setup() 789 args->agbp, args->pag, XFS_BTNUM_BNO); in xfs_alloc_cur_setup() 792 args->agbp, args->pag, XFS_BTNUM_BNO); in xfs_alloc_cur_setup() 1066 xfs_extent_busy_reuse(args->mp, args->pag, fbno, 1, in xfs_alloc_ag_vextent_small() 1092 error = xfs_rmap_free(args->tp, args->agbp, args->pag, fbno, 1, in xfs_alloc_ag_vextent_small() 1169 error = xfs_rmap_alloc(args->tp, args->agbp, args->pag, in xfs_alloc_ag_vextent() 1181 ASSERT(!xfs_extent_busy_search(args->mp, args->pag, in xfs_alloc_ag_vextent() 1185 xfs_ag_resv_alloc_extent(args->pag, args->resv, args); in xfs_alloc_ag_vextent() [all …]
|
D | xfs_refcount_btree.c | 29 cur->bc_ag.agbp, cur->bc_ag.pag); in xfs_refcountbt_dup_cursor() 40 struct xfs_perag *pag = agbp->b_pag; in xfs_refcountbt_set_root() local 46 pag->pagf_refcount_level += inc; in xfs_refcountbt_set_root() 68 args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.pag->pag_agno, in xfs_refcountbt_alloc_block() 77 trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, in xfs_refcountbt_alloc_block() 83 ASSERT(args.agno == cur->bc_ag.pag->pag_agno); in xfs_refcountbt_alloc_block() 108 trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.pag->pag_agno, in xfs_refcountbt_free_block() 173 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); in xfs_refcountbt_init_ptr_from_cur() 205 struct xfs_perag *pag = bp->b_pag; in xfs_refcountbt_verify() local 219 if (pag && pag->pagf_init) { in xfs_refcountbt_verify() [all …]
|
D | xfs_rmap_btree.c | 55 cur->bc_ag.agbp, cur->bc_ag.pag); in xfs_rmapbt_dup_cursor() 72 cur->bc_ag.pag->pagf_levels[btnum] += inc; in xfs_rmapbt_set_root() 86 struct xfs_perag *pag = cur->bc_ag.pag; in xfs_rmapbt_alloc_block() local 96 trace_xfs_rmapbt_alloc_block(cur->bc_mp, pag->pag_agno, bno, 1); in xfs_rmapbt_alloc_block() 102 xfs_extent_busy_reuse(cur->bc_mp, pag, bno, 1, false); in xfs_rmapbt_alloc_block() 108 xfs_ag_resv_rmapbt_alloc(cur->bc_mp, pag->pag_agno); in xfs_rmapbt_alloc_block() 121 struct xfs_perag *pag = cur->bc_ag.pag; in xfs_rmapbt_free_block() local 126 trace_xfs_rmapbt_free_block(cur->bc_mp, pag->pag_agno, in xfs_rmapbt_free_block() 134 xfs_extent_busy_insert(cur->bc_tp, pag, bno, 1, in xfs_rmapbt_free_block() 137 xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); in xfs_rmapbt_free_block() [all …]
|
D | xfs_refcount.c | 50 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, in xfs_refcount_lookup_le() 67 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, in xfs_refcount_lookup_ge() 84 trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_ag.pag->pag_agno, bno, in xfs_refcount_lookup_eq() 112 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno; in xfs_refcount_get_rec() 123 agno = cur->bc_ag.pag->pag_agno; in xfs_refcount_get_rec() 148 trace_xfs_refcount_get(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); in xfs_refcount_get_rec() 173 trace_xfs_refcount_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); in xfs_refcount_update() 180 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_update() 197 trace_xfs_refcount_insert(cur->bc_mp, cur->bc_ag.pag->pag_agno, irec); in xfs_refcount_insert() 212 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_refcount_insert() [all …]
|
D | xfs_ialloc_btree.c | 38 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); in xfs_inobt_dup_cursor() 105 args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_ag.pag->pag_agno, sbno); in __xfs_inobt_alloc_block() 238 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); in xfs_inobt_init_ptr_from_cur() 250 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agi->agi_seqno)); in xfs_finobt_init_ptr_from_cur() 430 struct xfs_perag *pag, in xfs_inobt_init_common() argument 453 atomic_inc(&pag->pag_ref); in xfs_inobt_init_common() 454 cur->bc_ag.pag = pag; in xfs_inobt_init_common() 464 struct xfs_perag *pag, in xfs_inobt_init_cursor() argument 470 cur = xfs_inobt_init_common(mp, tp, pag, btnum); in xfs_inobt_init_cursor() 484 struct xfs_perag *pag, in xfs_inobt_stage_cursor() argument [all …]
|
D | xfs_health.h | 112 void xfs_ag_mark_sick(struct xfs_perag *pag, unsigned int mask); 113 void xfs_ag_mark_healthy(struct xfs_perag *pag, unsigned int mask); 114 void xfs_ag_measure_sickness(struct xfs_perag *pag, unsigned int *sick, 145 xfs_ag_has_sickness(struct xfs_perag *pag, unsigned int mask) in xfs_ag_has_sickness() argument 149 xfs_ag_measure_sickness(pag, &sick, &checked); in xfs_ag_has_sickness() 175 xfs_ag_is_healthy(struct xfs_perag *pag) in xfs_ag_is_healthy() argument 177 return !xfs_ag_has_sickness(pag, -1U); in xfs_ag_is_healthy() 187 void xfs_ag_geom_health(struct xfs_perag *pag, struct xfs_ag_geometry *ageo);
|
D | xfs_alloc_btree.c | 29 cur->bc_ag.agbp, cur->bc_ag.pag, cur->bc_btnum); in xfs_allocbt_dup_cursor() 46 cur->bc_ag.pag->pagf_levels[btnum] += inc; in xfs_allocbt_set_root() 113 struct xfs_perag *pag; in xfs_allocbt_update_lastrec() local 157 pag = cur->bc_ag.agbp->b_pag; in xfs_allocbt_update_lastrec() 158 pag->pagf_longest = be32_to_cpu(len); in xfs_allocbt_update_lastrec() 225 ASSERT(cur->bc_ag.pag->pag_agno == be32_to_cpu(agf->agf_seqno)); in xfs_allocbt_init_ptr_from_cur() 290 struct xfs_perag *pag = bp->b_pag; in xfs_allocbt_verify() local 316 if (pag && pag->pagf_init) { in xfs_allocbt_verify() 317 if (level >= pag->pagf_levels[btnum]) in xfs_allocbt_verify() 473 struct xfs_perag *pag, in xfs_allocbt_init_common() argument [all …]
|
D | xfs_rmap.c | 84 trace_xfs_rmap_update(cur->bc_mp, cur->bc_ag.pag->pag_agno, in xfs_rmap_update() 96 cur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_rmap_update() 112 trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno, in xfs_rmap_insert() 138 rcur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_rmap_insert() 154 trace_xfs_rmap_delete(rcur->bc_mp, rcur->bc_ag.pag->pag_agno, agbno, in xfs_rmap_delete() 175 rcur->bc_ag.pag->pag_agno, error, _RET_IP_); in xfs_rmap_delete() 202 xfs_agnumber_t agno = cur->bc_ag.pag->pag_agno; in xfs_rmap_get_rec() 265 cur->bc_ag.pag->pag_agno, rec->rm_startblock, in xfs_rmap_find_left_neighbor_helper() 317 cur->bc_ag.pag->pag_agno, bno, 0, owner, offset, flags); in xfs_rmap_find_left_neighbor() 325 cur->bc_ag.pag->pag_agno, irec->rm_startblock, in xfs_rmap_find_left_neighbor() [all …]
|
/fs/xfs/ |
D | xfs_extent_busy.c | 25 struct xfs_perag *pag, in xfs_extent_busy_insert() argument 36 new->agno = pag->pag_agno; in xfs_extent_busy_insert() 43 trace_xfs_extent_busy(tp->t_mountp, pag->pag_agno, bno, len); in xfs_extent_busy_insert() 45 spin_lock(&pag->pagb_lock); in xfs_extent_busy_insert() 46 rbp = &pag->pagb_tree.rb_node; in xfs_extent_busy_insert() 63 rb_insert_color(&new->rb_node, &pag->pagb_tree); in xfs_extent_busy_insert() 66 spin_unlock(&pag->pagb_lock); in xfs_extent_busy_insert() 81 struct xfs_perag *pag, in xfs_extent_busy_search() argument 90 spin_lock(&pag->pagb_lock); in xfs_extent_busy_search() 91 rbp = pag->pagb_tree.rb_node; in xfs_extent_busy_search() [all …]
|
D | xfs_icache.c | 48 static int xfs_icwalk_ag(struct xfs_perag *pag, 204 struct xfs_perag *pag) in xfs_blockgc_queue() argument 206 struct xfs_mount *mp = pag->pag_mount; in xfs_blockgc_queue() 212 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) in xfs_blockgc_queue() 213 queue_delayed_work(pag->pag_mount->m_blockgc_wq, in xfs_blockgc_queue() 214 &pag->pag_blockgc_work, in xfs_blockgc_queue() 222 struct xfs_perag *pag, in xfs_perag_set_inode_tag() argument 226 struct xfs_mount *mp = pag->pag_mount; in xfs_perag_set_inode_tag() 229 lockdep_assert_held(&pag->pag_ici_lock); in xfs_perag_set_inode_tag() 231 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); in xfs_perag_set_inode_tag() [all …]
|
D | xfs_iwalk.c | 55 struct xfs_perag *pag; member 98 struct xfs_perag *pag, in xfs_iwalk_ichunk_ra() argument 114 xfs_btree_reada_bufs(mp, pag->pag_agno, agbno, in xfs_iwalk_ichunk_ra() 186 struct xfs_perag *pag = iwag->pag; in xfs_iwalk_ag_recs() local 194 trace_xfs_iwalk_ag_rec(mp, pag->pag_agno, irec); in xfs_iwalk_ag_recs() 200 error = iwag->inobt_walk_fn(mp, tp, pag->pag_agno, irec, in xfs_iwalk_ag_recs() 218 ino = XFS_AGINO_TO_INO(mp, pag->pag_agno, in xfs_iwalk_ag_recs() 272 struct xfs_perag *pag = iwag->pag; in xfs_iwalk_ag_start() local 278 error = xfs_inobt_cur(mp, tp, pag, XFS_BTNUM_INO, curpp, agi_bpp); in xfs_iwalk_ag_start() 312 iwag->lastino = XFS_AGINO_TO_INO(mp, pag->pag_agno, in xfs_iwalk_ag_start() [all …]
|
D | xfs_filestream.c | 43 struct xfs_perag *pag; in xfs_filestream_peek_ag() local 46 pag = xfs_perag_get(mp, agno); in xfs_filestream_peek_ag() 47 ret = atomic_read(&pag->pagf_fstrms); in xfs_filestream_peek_ag() 48 xfs_perag_put(pag); in xfs_filestream_peek_ag() 57 struct xfs_perag *pag; in xfs_filestream_get_ag() local 60 pag = xfs_perag_get(mp, agno); in xfs_filestream_get_ag() 61 ret = atomic_inc_return(&pag->pagf_fstrms); in xfs_filestream_get_ag() 62 xfs_perag_put(pag); in xfs_filestream_get_ag() 71 struct xfs_perag *pag; in xfs_filestream_put_ag() local 73 pag = xfs_perag_get(mp, agno); in xfs_filestream_put_ag() [all …]
|
D | xfs_health.c | 27 struct xfs_perag *pag; in xfs_health_unmount() local 37 for_each_perag(mp, agno, pag) { in xfs_health_unmount() 38 xfs_ag_measure_sickness(pag, &sick, &checked); in xfs_health_unmount() 179 struct xfs_perag *pag, in xfs_ag_mark_sick() argument 183 trace_xfs_ag_mark_sick(pag->pag_mount, pag->pag_agno, mask); in xfs_ag_mark_sick() 185 spin_lock(&pag->pag_state_lock); in xfs_ag_mark_sick() 186 pag->pag_sick |= mask; in xfs_ag_mark_sick() 187 pag->pag_checked |= mask; in xfs_ag_mark_sick() 188 spin_unlock(&pag->pag_state_lock); in xfs_ag_mark_sick() 194 struct xfs_perag *pag, in xfs_ag_mark_healthy() argument [all …]
|
D | xfs_inode.c | 48 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, 1276 struct xfs_perag *pag; in xfs_link() local 1278 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino)); in xfs_link() 1279 error = xfs_iunlink_remove(tp, pag, sip); in xfs_link() 1280 xfs_perag_put(pag); in xfs_link() 1881 struct xfs_perag *pag, in xfs_iunlink_lookup_backref() argument 1886 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, in xfs_iunlink_lookup_backref() 1898 struct xfs_perag *pag, in xfs_iunlink_insert_backref() argument 1903 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash, in xfs_iunlink_insert_backref() 1927 struct xfs_perag *pag, in xfs_iunlink_add_backref() argument [all …]
|
D | xfs_fsmap.c | 161 struct xfs_perag *pag; /* AG info, if applicable */ member 208 if (!info->pag) in xfs_getfsmap_is_shared() 213 cur = xfs_refcountbt_init_cursor(mp, tp, info->agf_bp, info->pag); in xfs_getfsmap_is_shared() 315 info->pag ? info->pag->pag_agno : NULLAGNUMBER, rec); in xfs_getfsmap_helper() 358 fsb = XFS_AGB_TO_FSB(mp, cur->bc_ag.pag->pag_agno, rec->rm_startblock); in xfs_getfsmap_datadev_helper() 376 rec_daddr = XFS_AGB_TO_DADDR(mp, cur->bc_ag.pag->pag_agno, in xfs_getfsmap_datadev_bnobt_helper() 584 struct xfs_perag *pag; in __xfs_getfsmap_datadev() local 621 for_each_perag_range(mp, start_ag, end_ag, pag) { in __xfs_getfsmap_datadev() 626 info->pag = pag; in __xfs_getfsmap_datadev() 627 if (pag->pag_agno == end_ag) { in __xfs_getfsmap_datadev() [all …]
|
/fs/xfs/scrub/ |
D | fscounters.c | 70 struct xfs_perag *pag = NULL; in xchk_fscount_warmup() local 74 for_each_perag(mp, agno, pag) { in xchk_fscount_warmup() 77 if (pag->pagi_init && pag->pagf_init) in xchk_fscount_warmup() 92 if (!pag->pagi_init || !pag->pagf_init) { in xchk_fscount_warmup() 107 if (pag) in xchk_fscount_warmup() 108 xfs_perag_put(pag); in xchk_fscount_warmup() 175 struct xfs_perag *pag; in xchk_fscount_aggregate_agcounts() local 186 for_each_perag(mp, agno, pag) { in xchk_fscount_aggregate_agcounts() 191 if (!pag->pagi_init || !pag->pagf_init) { in xchk_fscount_aggregate_agcounts() 197 fsc->icount += pag->pagi_count; in xchk_fscount_aggregate_agcounts() [all …]
|
D | agheader_repair.c | 97 if (!xfs_verify_agbno(mp, sc->sa.pag->pag_agno, agbno)) in xrep_agf_check_agfl_block() 191 agf->agf_seqno = cpu_to_be32(sc->sa.pag->pag_agno); in xrep_agf_init_header() 193 sc->sa.pag->pag_agno)); in xrep_agf_init_header() 201 ASSERT(sc->sa.pag->pagf_init); in xrep_agf_init_header() 202 sc->sa.pag->pagf_init = 0; in xrep_agf_init_header() 251 sc->sa.pag, XFS_BTNUM_BNO); in xrep_agf_calc_from_btrees() 265 sc->sa.pag, XFS_BTNUM_CNT); in xrep_agf_calc_from_btrees() 273 cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.pag); in xrep_agf_calc_from_btrees() 286 sc->sa.pag); in xrep_agf_calc_from_btrees() 306 struct xfs_perag *pag; in xrep_agf_commit_new() local [all …]
|
D | repair.c | 160 struct xfs_perag *pag, in xrep_ag_has_space() argument 164 return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) && in xrep_ag_has_space() 165 !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) && in xrep_ag_has_space() 166 pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks; in xrep_ag_has_space() 180 struct xfs_perag *pag; in xrep_calc_ag_resblks() local 195 pag = xfs_perag_get(mp, sm->sm_agno); in xrep_calc_ag_resblks() 196 if (pag->pagi_init) { in xrep_calc_ag_resblks() 198 icount = pag->pagi_count; in xrep_calc_ag_resblks() 203 icount = pag->pagi_count; in xrep_calc_ag_resblks() 222 xfs_perag_put(pag); in xrep_calc_ag_resblks() [all …]
|
D | common.c | 412 ASSERT(!sa->pag); in xchk_ag_read_headers() 413 sa->pag = xfs_perag_get(mp, agno); in xchk_ag_read_headers() 414 if (!sa->pag) in xchk_ag_read_headers() 467 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_BNO)) { in xchk_ag_btcur_init() 470 sa->pag, XFS_BTNUM_BNO); in xchk_ag_btcur_init() 474 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_CNT)) { in xchk_ag_btcur_init() 477 sa->pag, XFS_BTNUM_CNT); in xchk_ag_btcur_init() 482 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_INO)) { in xchk_ag_btcur_init() 484 sa->pag, XFS_BTNUM_INO); in xchk_ag_btcur_init() 489 xchk_ag_btree_healthy_enough(sc, sa->pag, XFS_BTNUM_FINO)) { in xchk_ag_btcur_init() [all …]
|
D | health.c | 130 struct xfs_perag *pag; in xchk_update_health() local 140 pag = xfs_perag_get(sc->mp, sc->sm->sm_agno); in xchk_update_health() 142 xfs_ag_mark_sick(pag, sc->sick_mask); in xchk_update_health() 144 xfs_ag_mark_healthy(pag, sc->sick_mask); in xchk_update_health() 145 xfs_perag_put(pag); in xchk_update_health() 177 struct xfs_perag *pag, in xchk_ag_btree_healthy_enough() argument 225 if (xfs_ag_has_sickness(pag, mask)) { in xchk_ag_btree_healthy_enough()
|