Home
last modified time | relevance | path

Searched refs:sc (Results 1 – 25 of 64) sorted by relevance

123

/fs/xfs/scrub/
Dagheader.c25 struct xfs_scrub *sc, in xchk_superblock_xref() argument
28 struct xfs_mount *mp = sc->mp; in xchk_superblock_xref()
29 xfs_agnumber_t agno = sc->sm->sm_agno; in xchk_superblock_xref()
33 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_superblock_xref()
38 error = xchk_ag_init(sc, agno, &sc->sa); in xchk_superblock_xref()
39 if (!xchk_xref_process_error(sc, agno, agbno, &error)) in xchk_superblock_xref()
42 xchk_xref_is_used_space(sc, agbno, 1); in xchk_superblock_xref()
43 xchk_xref_is_not_inode_chunk(sc, agbno, 1); in xchk_superblock_xref()
44 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS); in xchk_superblock_xref()
45 xchk_xref_is_not_shared(sc, agbno, 1); in xchk_superblock_xref()
[all …]
Dcommon.c65 struct xfs_scrub *sc, in __xchk_process_error() argument
77 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); in __xchk_process_error()
82 sc->sm->sm_flags |= errflag; in __xchk_process_error()
86 trace_xchk_op_error(sc, agno, bno, *error, in __xchk_process_error()
95 struct xfs_scrub *sc, in xchk_process_error() argument
100 return __xchk_process_error(sc, agno, bno, error, in xchk_process_error()
106 struct xfs_scrub *sc, in xchk_xref_process_error() argument
111 return __xchk_process_error(sc, agno, bno, error, in xchk_xref_process_error()
118 struct xfs_scrub *sc, in __xchk_fblock_process_error() argument
130 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); in __xchk_fblock_process_error()
[all …]
Dinode.c31 struct xfs_scrub *sc, in xchk_setup_inode() argument
40 error = xchk_get_inode(sc, ip); in xchk_setup_inode()
46 return xchk_trans_alloc(sc, 0); in xchk_setup_inode()
52 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; in xchk_setup_inode()
53 xfs_ilock(sc->ip, sc->ilock_flags); in xchk_setup_inode()
54 error = xchk_trans_alloc(sc, 0); in xchk_setup_inode()
57 sc->ilock_flags |= XFS_ILOCK_EXCL; in xchk_setup_inode()
58 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); in xchk_setup_inode()
70 struct xfs_scrub *sc, in xchk_inode_extsize() argument
78 fa = xfs_inode_validate_extsize(sc->mp, be32_to_cpu(dip->di_extsize), in xchk_inode_extsize()
[all …]
Drtbitmap.c23 struct xfs_scrub *sc, in xchk_setup_rt() argument
28 error = xchk_setup_fs(sc, ip); in xchk_setup_rt()
32 sc->ilock_flags = XFS_ILOCK_EXCL | XFS_ILOCK_RTBITMAP; in xchk_setup_rt()
33 sc->ip = sc->mp->m_rbmip; in xchk_setup_rt()
34 xfs_ilock(sc->ip, sc->ilock_flags); in xchk_setup_rt()
48 struct xfs_scrub *sc = priv; in xchk_rtbitmap_rec() local
56 !xfs_verify_rtbno(sc->mp, startblock) || in xchk_rtbitmap_rec()
57 !xfs_verify_rtbno(sc->mp, startblock + blockcount - 1)) in xchk_rtbitmap_rec()
58 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); in xchk_rtbitmap_rec()
65 struct xfs_scrub *sc) in xchk_rtbitmap_check_extents() argument
[all …]
Drepair.c41 struct xfs_scrub *sc) in xrep_attempt() argument
45 trace_xrep_attempt(ip, sc->sm, error); in xrep_attempt()
47 xchk_ag_btcur_free(&sc->sa); in xrep_attempt()
50 ASSERT(sc->ops->repair); in xrep_attempt()
51 error = sc->ops->repair(sc); in xrep_attempt()
52 trace_xrep_done(ip, sc->sm, error); in xrep_attempt()
59 sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT; in xrep_attempt()
60 sc->flags |= XREP_ALREADY_FIXED; in xrep_attempt()
65 if (!(sc->flags & XCHK_TRY_HARDER)) { in xrep_attempt()
66 sc->flags |= XCHK_TRY_HARDER; in xrep_attempt()
[all …]
Dbmap.c29 struct xfs_scrub *sc, in xchk_setup_inode_bmap() argument
34 error = xchk_get_inode(sc, ip); in xchk_setup_inode_bmap()
38 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; in xchk_setup_inode_bmap()
39 xfs_ilock(sc->ip, sc->ilock_flags); in xchk_setup_inode_bmap()
46 if (S_ISREG(VFS_I(sc->ip)->i_mode) && in xchk_setup_inode_bmap()
47 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { in xchk_setup_inode_bmap()
48 struct address_space *mapping = VFS_I(sc->ip)->i_mapping; in xchk_setup_inode_bmap()
50 inode_dio_wait(VFS_I(sc->ip)); in xchk_setup_inode_bmap()
73 error = xchk_trans_alloc(sc, 0); in xchk_setup_inode_bmap()
76 sc->ilock_flags |= XFS_ILOCK_EXCL; in xchk_setup_inode_bmap()
[all …]
Dagheader_repair.c34 struct xfs_scrub *sc) in xrep_superblock() argument
36 struct xfs_mount *mp = sc->mp; in xrep_superblock()
42 agno = sc->sm->sm_agno; in xrep_superblock()
46 error = xfs_sb_get_secondary(mp, sc->tp, agno, &bp); in xrep_superblock()
55 xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF); in xrep_superblock()
56 xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1); in xrep_superblock()
63 struct xfs_scrub *sc; member
78 if (xchk_should_terminate(raa->sc, &error)) in xrep_agf_walk_allocbt()
94 struct xfs_scrub *sc = priv; in xrep_agf_check_agfl_block() local
96 if (!xfs_verify_agbno(mp, sc->sa.agno, agbno)) in xrep_agf_check_agfl_block()
[all …]
Dcommon.h16 struct xfs_scrub *sc, in xchk_should_terminate() argument
34 int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
35 bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
37 bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
40 bool xchk_xref_process_error(struct xfs_scrub *sc,
42 bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
45 void xchk_block_set_preen(struct xfs_scrub *sc,
47 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
49 void xchk_set_corrupt(struct xfs_scrub *sc);
50 void xchk_block_set_corrupt(struct xfs_scrub *sc,
[all …]
Ddir.c25 struct xfs_scrub *sc, in xchk_setup_directory() argument
28 return xchk_setup_inode_contents(sc, ip, 0); in xchk_setup_directory()
39 struct xfs_scrub *sc; member
50 struct xfs_mount *mp = sdc->sc->mp; in xchk_dir_check_ftype()
57 xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, in xchk_dir_check_ftype()
70 error = xfs_iget(mp, sdc->sc->tp, inum, 0, 0, &ip); in xchk_dir_check_ftype()
71 if (!xchk_fblock_xref_process_error(sdc->sc, XFS_DATA_FORK, offset, in xchk_dir_check_ftype()
79 xchk_fblock_set_corrupt(sdc->sc, XFS_DATA_FORK, offset); in xchk_dir_check_ftype()
111 ip = sdc->sc->ip; in xchk_dir_actor()
116 if (xchk_should_terminate(sdc->sc, &error)) in xchk_dir_actor()
[all …]
Dparent.c23 struct xfs_scrub *sc, in xchk_setup_parent() argument
26 return xchk_setup_inode_contents(sc, ip, 0); in xchk_setup_parent()
35 struct xfs_scrub *sc; member
63 if (xchk_should_terminate(spc->sc, &error)) in xchk_parent_actor()
72 struct xfs_scrub *sc, in xchk_parent_count_parent_dentries() argument
78 .ino = sc->ip->i_ino, in xchk_parent_count_parent_dentries()
79 .sc = sc, in xchk_parent_count_parent_dentries()
108 error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize); in xchk_parent_count_parent_dentries()
131 struct xfs_scrub *sc, in xchk_parent_validate() argument
135 struct xfs_mount *mp = sc->mp; in xchk_parent_validate()
[all …]
Dquota.c23 struct xfs_scrub *sc) in xchk_quota_to_dqtype() argument
25 switch (sc->sm->sm_type) { in xchk_quota_to_dqtype()
40 struct xfs_scrub *sc, in xchk_setup_quota() argument
46 if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp)) in xchk_setup_quota()
49 dqtype = xchk_quota_to_dqtype(sc); in xchk_setup_quota()
52 sc->flags |= XCHK_HAS_QUOTAOFFLOCK; in xchk_setup_quota()
53 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock); in xchk_setup_quota()
54 if (!xfs_this_quota_on(sc->mp, dqtype)) in xchk_setup_quota()
56 error = xchk_setup_fs(sc, ip); in xchk_setup_quota()
59 sc->ip = xfs_quota_inode(sc->mp, dqtype); in xchk_setup_quota()
[all …]
Dialloc.c32 struct xfs_scrub *sc, in xchk_setup_ag_iallocbt() argument
35 return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER); in xchk_setup_ag_iallocbt()
58 struct xfs_scrub *sc, in xchk_iallocbt_chunk_xref_other() argument
66 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT) in xchk_iallocbt_chunk_xref_other()
67 pcur = &sc->sa.ino_cur; in xchk_iallocbt_chunk_xref_other()
69 pcur = &sc->sa.fino_cur; in xchk_iallocbt_chunk_xref_other()
73 if (!xchk_should_check_xref(sc, &error, pcur)) in xchk_iallocbt_chunk_xref_other()
77 xchk_btree_xref_set_corrupt(sc, *pcur, 0); in xchk_iallocbt_chunk_xref_other()
83 struct xfs_scrub *sc, in xchk_iallocbt_chunk_xref() argument
89 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_iallocbt_chunk_xref()
[all …]
Drmap.c24 struct xfs_scrub *sc, in xchk_setup_ag_rmapbt() argument
27 return xchk_setup_ag_btree(sc, ip, false); in xchk_setup_ag_rmapbt()
35 struct xfs_scrub *sc, in xchk_rmapbt_xref_refc() argument
46 if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm)) in xchk_rmapbt_xref_refc()
55 error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock, in xchk_rmapbt_xref_refc()
57 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur)) in xchk_rmapbt_xref_refc()
60 xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0); in xchk_rmapbt_xref_refc()
66 struct xfs_scrub *sc, in xchk_rmapbt_xref() argument
72 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) in xchk_rmapbt_xref()
75 xchk_xref_is_used_space(sc, agbno, len); in xchk_rmapbt_xref()
[all …]
Drefcount.c22 struct xfs_scrub *sc, in xchk_setup_ag_refcountbt() argument
25 return xchk_setup_ag_btree(sc, ip, false); in xchk_setup_ag_refcountbt()
72 struct xfs_scrub *sc; member
103 if (xchk_should_terminate(refchk->sc, &error)) in xchk_refcountbt_rmap_check()
111 xchk_btree_xref_set_corrupt(refchk->sc, cur, 0); in xchk_refcountbt_rmap_check()
269 struct xfs_scrub *sc, in xchk_refcountbt_xref_rmap() argument
275 .sc = sc, in xchk_refcountbt_xref_rmap()
287 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm)) in xchk_refcountbt_xref_rmap()
297 error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high, in xchk_refcountbt_xref_rmap()
299 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur)) in xchk_refcountbt_xref_rmap()
[all …]
Dscrub.h87 int xchk_tester(struct xfs_scrub *sc);
88 int xchk_superblock(struct xfs_scrub *sc);
89 int xchk_agf(struct xfs_scrub *sc);
90 int xchk_agfl(struct xfs_scrub *sc);
91 int xchk_agi(struct xfs_scrub *sc);
92 int xchk_bnobt(struct xfs_scrub *sc);
93 int xchk_cntbt(struct xfs_scrub *sc);
94 int xchk_inobt(struct xfs_scrub *sc);
95 int xchk_finobt(struct xfs_scrub *sc);
96 int xchk_rmapbt(struct xfs_scrub *sc);
[all …]
Dalloc.c24 struct xfs_scrub *sc, in xchk_setup_ag_allocbt() argument
27 return xchk_setup_ag_btree(sc, ip, false); in xchk_setup_ag_allocbt()
37 struct xfs_scrub *sc, in xchk_allocbt_xref_other() argument
47 if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT) in xchk_allocbt_xref_other()
48 pcur = &sc->sa.cnt_cur; in xchk_allocbt_xref_other()
50 pcur = &sc->sa.bno_cur; in xchk_allocbt_xref_other()
51 if (!*pcur || xchk_skip_xref(sc->sm)) in xchk_allocbt_xref_other()
55 if (!xchk_should_check_xref(sc, &error, pcur)) in xchk_allocbt_xref_other()
58 xchk_btree_xref_set_corrupt(sc, *pcur, 0); in xchk_allocbt_xref_other()
63 if (!xchk_should_check_xref(sc, &error, pcur)) in xchk_allocbt_xref_other()
[all …]
Dscrub.c136 struct xfs_scrub *sc) in xchk_probe() argument
140 if (xchk_should_terminate(sc, &error)) in xchk_probe()
151 struct xfs_scrub *sc, in xchk_teardown() argument
155 xchk_ag_free(sc, &sc->sa); in xchk_teardown()
156 if (sc->tp) { in xchk_teardown()
157 if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) in xchk_teardown()
158 error = xfs_trans_commit(sc->tp); in xchk_teardown()
160 xfs_trans_cancel(sc->tp); in xchk_teardown()
161 sc->tp = NULL; in xchk_teardown()
163 if (sc->ip) { in xchk_teardown()
[all …]
Dhealth.c128 struct xfs_scrub *sc) in xchk_update_health() argument
133 if (!sc->sick_mask) in xchk_update_health()
136 bad = (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT); in xchk_update_health()
137 switch (type_to_health_flag[sc->sm->sm_type].group) { in xchk_update_health()
139 pag = xfs_perag_get(sc->mp, sc->sm->sm_agno); in xchk_update_health()
141 xfs_ag_mark_sick(pag, sc->sick_mask); in xchk_update_health()
143 xfs_ag_mark_healthy(pag, sc->sick_mask); in xchk_update_health()
147 if (!sc->ip) in xchk_update_health()
150 xfs_inode_mark_sick(sc->ip, sc->sick_mask); in xchk_update_health()
152 xfs_inode_mark_healthy(sc->ip, sc->sick_mask); in xchk_update_health()
[all …]
Dfscounters.c64 struct xfs_scrub *sc) in xchk_fscount_warmup() argument
66 struct xfs_mount *mp = sc->mp; in xchk_fscount_warmup()
80 error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); in xchk_fscount_warmup()
83 error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); in xchk_fscount_warmup()
104 if (xchk_should_terminate(sc, &error)) in xchk_fscount_warmup()
119 struct xfs_scrub *sc, in xchk_setup_fscounters() argument
125 sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); in xchk_setup_fscounters()
126 if (!sc->buf) in xchk_setup_fscounters()
128 fsc = sc->buf; in xchk_setup_fscounters()
130 xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); in xchk_setup_fscounters()
[all …]
Dbtree.c26 struct xfs_scrub *sc, in __xchk_btree_process_error() argument
39 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error); in __xchk_btree_process_error()
44 sc->sm->sm_flags |= errflag; in __xchk_btree_process_error()
49 trace_xchk_ifork_btree_op_error(sc, cur, level, in __xchk_btree_process_error()
52 trace_xchk_btree_op_error(sc, cur, level, in __xchk_btree_process_error()
61 struct xfs_scrub *sc, in xchk_btree_process_error() argument
66 return __xchk_btree_process_error(sc, cur, level, error, in xchk_btree_process_error()
72 struct xfs_scrub *sc, in xchk_btree_xref_process_error() argument
77 return __xchk_btree_process_error(sc, cur, level, error, in xchk_btree_xref_process_error()
84 struct xfs_scrub *sc, in __xchk_btree_set_corrupt() argument
[all …]
Dattr.c30 struct xfs_scrub *sc, in xchk_setup_xattr_buf() argument
35 struct xchk_xattr_buf *ab = sc->buf; in xchk_setup_xattr_buf()
42 sz = 3 * sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize); in xchk_setup_xattr_buf()
53 sc->buf = NULL; in xchk_setup_xattr_buf()
65 sc->buf = ab; in xchk_setup_xattr_buf()
72 struct xfs_scrub *sc, in xchk_setup_xattr() argument
82 if (sc->flags & XCHK_TRY_HARDER) { in xchk_setup_xattr()
83 error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0); in xchk_setup_xattr()
88 return xchk_setup_inode_contents(sc, ip, 0); in xchk_setup_xattr()
95 struct xfs_scrub *sc; member
[all …]
Drepair.h11 static inline int xrep_notsupported(struct xfs_scrub *sc) in xrep_notsupported() argument
20 int xrep_attempt(struct xfs_inode *ip, struct xfs_scrub *sc);
22 int xrep_roll_ag_trans(struct xfs_scrub *sc);
25 xfs_extlen_t xrep_calc_ag_resblks(struct xfs_scrub *sc);
26 int xrep_alloc_ag_block(struct xfs_scrub *sc,
29 int xrep_init_btblock(struct xfs_scrub *sc, xfs_fsblock_t fsb,
35 int xrep_fix_freelist(struct xfs_scrub *sc, bool can_shrink);
36 int xrep_invalidate_blocks(struct xfs_scrub *sc, struct xbitmap *btlist);
37 int xrep_reap_extents(struct xfs_scrub *sc, struct xbitmap *exlist,
52 int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp,
[all …]
Dsymlink.c21 struct xfs_scrub *sc, in xchk_setup_symlink() argument
25 sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL); in xchk_setup_symlink()
26 if (!sc->buf) in xchk_setup_symlink()
29 return xchk_setup_inode_contents(sc, ip, 0); in xchk_setup_symlink()
36 struct xfs_scrub *sc) in xchk_symlink() argument
38 struct xfs_inode *ip = sc->ip; in xchk_symlink()
50 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); in xchk_symlink()
58 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0); in xchk_symlink()
63 error = xfs_readlink_bmap_ilocked(sc->ip, sc->buf); in xchk_symlink()
64 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) in xchk_symlink()
[all …]
/fs/ocfs2/cluster/
Dtcp.c64 #define SC_NODEF_ARGS(sc) sc->sc_node->nd_name, sc->sc_node->nd_num, \ argument
65 &sc->sc_node->nd_ipv4_address, \
66 ntohs(sc->sc_node->nd_ipv4_port)
83 #define sclog(sc, fmt, args...) do { \ argument
84 typeof(sc) __sc = (sc); \
130 static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
131 static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
160 struct o2net_sock_container *sc) in o2net_set_nst_sock_container() argument
162 nst->st_sc = sc; in o2net_set_nst_sock_container()
171 static inline void o2net_set_sock_timer(struct o2net_sock_container *sc) in o2net_set_sock_timer() argument
[all …]
Dnetdebug.c194 void o2net_debug_add_sc(struct o2net_sock_container *sc) in o2net_debug_add_sc() argument
197 list_add(&sc->sc_net_debug_item, &sock_containers); in o2net_debug_add_sc()
201 void o2net_debug_del_sc(struct o2net_sock_container *sc) in o2net_debug_del_sc() argument
204 list_del_init(&sc->sc_net_debug_item); in o2net_debug_del_sc()
216 struct o2net_sock_container *sc, *ret = NULL; in next_sc() local
220 list_for_each_entry(sc, &sc_start->sc_net_debug_item, in next_sc()
223 if (&sc->sc_net_debug_item == &sock_containers) in next_sc()
227 if (sc->sc_page != NULL) { in next_sc()
228 ret = sc; in next_sc()
239 struct o2net_sock_container *sc, *dummy_sc = sd->dbg_sock; in sc_seq_start() local
[all …]

123