Home
last modified time | relevance | path

Searched refs:state (Results 1 – 25 of 169) sorted by relevance

1234567

/fs/btrfs/
Dcheck-integrity.c189 struct btrfsic_state *state; member
290 static int btrfsic_process_superblock(struct btrfsic_state *state,
292 static int btrfsic_process_metablock(struct btrfsic_state *state,
300 struct btrfsic_state *state,
311 static int btrfsic_handle_extent_data(struct btrfsic_state *state,
315 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
319 static int btrfsic_read_block(struct btrfsic_state *state,
321 static void btrfsic_dump_database(struct btrfsic_state *state);
322 static int btrfsic_test_for_metadata(struct btrfsic_state *state,
331 struct btrfsic_state *state,
[all …]
Dextent_io.c31 static inline bool extent_state_in_tree(const struct extent_state *state) in extent_state_in_tree() argument
33 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree()
65 struct extent_state *state; in btrfs_leak_debug_check() local
69 state = list_entry(states.next, struct extent_state, leak_list); in btrfs_leak_debug_check()
71 state->start, state->end, state->state, in btrfs_leak_debug_check()
72 extent_state_in_tree(state), in btrfs_leak_debug_check()
73 refcount_read(&state->refs)); in btrfs_leak_debug_check()
74 list_del(&state->leak_list); in btrfs_leak_debug_check()
75 kmem_cache_free(extent_state_cache, state); in btrfs_leak_debug_check()
130 static int add_extent_changeset(struct extent_state *state, unsigned bits, in add_extent_changeset() argument
[all …]
/fs/nfs/
Dnfs4state.c668 struct nfs4_state *state; in nfs4_alloc_open_state() local
670 state = kzalloc(sizeof(*state), GFP_NOFS); in nfs4_alloc_open_state()
671 if (!state) in nfs4_alloc_open_state()
673 refcount_set(&state->count, 1); in nfs4_alloc_open_state()
674 INIT_LIST_HEAD(&state->lock_states); in nfs4_alloc_open_state()
675 spin_lock_init(&state->state_lock); in nfs4_alloc_open_state()
676 seqlock_init(&state->seqlock); in nfs4_alloc_open_state()
677 init_waitqueue_head(&state->waitq); in nfs4_alloc_open_state()
678 return state; in nfs4_alloc_open_state()
682 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) in nfs4_state_set_mode_locked() argument
[all …]
Dnfs4proc.c454 struct nfs4_state *state = exception->state; in nfs4_do_handle_exception() local
464 if (stateid == NULL && state != NULL) in nfs4_do_handle_exception()
465 stateid = nfs4_recoverable_stateid(&state->stateid); in nfs4_do_handle_exception()
498 if (state == NULL) in nfs4_do_handle_exception()
500 ret = nfs4_schedule_stateid_recovery(server, state); in nfs4_do_handle_exception()
632 struct nfs4_state *state, long *timeout) in nfs4_async_handle_error() argument
635 .state = state, in nfs4_async_handle_error()
1379 if (p->state != NULL) in nfs4_opendata_free()
1380 nfs4_put_open_state(p->state); in nfs4_opendata_free()
1400 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state, in nfs4_mode_match_open_stateid() argument
[all …]
Dnfs4trace.h626 const struct nfs4_state *state = ctx->state;
633 if (!IS_ERR_OR_NULL(state)) {
634 inode = state->inode;
636 be32_to_cpu(state->stateid.seqid);
638 nfs_stateid_hash(&state->stateid);
640 be32_to_cpu(state->open_stateid.seqid);
642 nfs_stateid_hash(&state->open_stateid);
695 const struct nfs4_state *state
697 TP_ARGS(state),
708 const struct inode *inode = state->inode;
[all …]
/fs/nfsd/
Dnfs4acl.c455 init_state(struct posix_acl_state *state, int cnt) in init_state() argument
459 memset(state, 0, sizeof(struct posix_acl_state)); in init_state()
460 state->empty = 1; in init_state()
468 state->users = kzalloc(alloc, GFP_KERNEL); in init_state()
469 if (!state->users) in init_state()
471 state->groups = kzalloc(alloc, GFP_KERNEL); in init_state()
472 if (!state->groups) { in init_state()
473 kfree(state->users); in init_state()
480 free_state(struct posix_acl_state *state) { in free_state() argument
481 kfree(state->users); in free_state()
[all …]
/fs/xfs/libxfs/
Dxfs_attr.c56 STATIC int xfs_attr_fillstate(xfs_da_state_t *state);
57 STATIC int xfs_attr_refillstate(xfs_da_state_t *state);
848 struct xfs_da_state *state; in xfs_attr_node_addname() local
862 state = xfs_da_state_alloc(); in xfs_attr_node_addname()
863 state->args = args; in xfs_attr_node_addname()
864 state->mp = mp; in xfs_attr_node_addname()
870 error = xfs_da3_node_lookup_int(state, &retval); in xfs_attr_node_addname()
873 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_addname()
901 retval = xfs_attr3_leaf_add(blk->bp, state->args); in xfs_attr_node_addname()
903 if (state->path.active == 1) { in xfs_attr_node_addname()
[all …]
Dxfs_da_btree.c39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
91 xfs_da_state_kill_altpath(xfs_da_state_t *state) in xfs_da_state_kill_altpath() argument
[all …]
Dxfs_dir2_node.c29 static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state,
562 xfs_da_state_t *state) /* state to fill in */ in xfs_dir2_leafn_lookup_for_addname() argument
599 if (state->extravalid) { in xfs_dir2_leafn_lookup_for_addname()
601 curbp = state->extrablk.bp; in xfs_dir2_leafn_lookup_for_addname()
602 curfdb = state->extrablk.blkno; in xfs_dir2_leafn_lookup_for_addname()
687 state->extravalid = 1; in xfs_dir2_leafn_lookup_for_addname()
688 state->extrablk.bp = curbp; in xfs_dir2_leafn_lookup_for_addname()
689 state->extrablk.index = fi; in xfs_dir2_leafn_lookup_for_addname()
690 state->extrablk.blkno = curfdb; in xfs_dir2_leafn_lookup_for_addname()
697 state->extrablk.magic = XFS_DIR2_FREE_MAGIC; in xfs_dir2_leafn_lookup_for_addname()
[all …]
Dxfs_attr_leaf.c53 STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state,
56 STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
1204 struct xfs_da_state *state, in xfs_attr3_leaf_split() argument
1211 trace_xfs_attr_leaf_split(state->args); in xfs_attr3_leaf_split()
1217 error = xfs_da_grow_inode(state->args, &blkno); in xfs_attr3_leaf_split()
1220 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); in xfs_attr3_leaf_split()
1230 xfs_attr3_leaf_rebalance(state, oldblk, newblk); in xfs_attr3_leaf_split()
1231 error = xfs_da3_blk_link(state, oldblk, newblk); in xfs_attr3_leaf_split()
1242 if (state->inleaf) { in xfs_attr3_leaf_split()
1243 trace_xfs_attr_leaf_add_old(state->args); in xfs_attr3_leaf_split()
[all …]
Dxfs_bmap.c1167 int state = xfs_bmap_fork_to_state(whichfork); in xfs_iread_extents() local
1265 xfs_iext_insert(ip, &icur, &new, state); in xfs_iread_extents()
1266 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_); in xfs_iread_extents()
1541 int state = xfs_bmap_fork_to_state(whichfork); in xfs_bmap_add_extent_delay_real() local
1583 state |= BMAP_LEFT_FILLING; in xfs_bmap_add_extent_delay_real()
1585 state |= BMAP_RIGHT_FILLING; in xfs_bmap_add_extent_delay_real()
1592 state |= BMAP_LEFT_VALID; in xfs_bmap_add_extent_delay_real()
1594 state |= BMAP_LEFT_DELAY; in xfs_bmap_add_extent_delay_real()
1597 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && in xfs_bmap_add_extent_delay_real()
1602 state |= BMAP_LEFT_CONTIG; in xfs_bmap_add_extent_delay_real()
[all …]
Dxfs_da_btree.h154 int xfs_da3_split(xfs_da_state_t *state);
159 int xfs_da3_join(xfs_da_state_t *state);
160 void xfs_da3_fixhashpath(struct xfs_da_state *state,
166 int xfs_da3_node_lookup_int(xfs_da_state_t *state, int *result);
167 int xfs_da3_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
172 int xfs_da3_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
203 void xfs_da_state_free(xfs_da_state_t *state);
/fs/fscache/
Doperation.c39 op->state = FSCACHE_OP_ST_INITIALISED; in fscache_operation_init()
69 ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS, in fscache_enqueue_operation()
70 op->state, ==, FSCACHE_OP_ST_CANCELLED); in fscache_enqueue_operation()
99 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); in fscache_run_op()
101 op->state = FSCACHE_OP_ST_IN_PROGRESS; in fscache_run_op()
128 op->debug_id, object->debug_id, object->state->name); in fscache_report_unexpected_submission()
129 kdebug("objstate=%s [%s]", object->state->name, ostate->name); in fscache_report_unexpected_submission()
165 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); in fscache_submit_exclusive_op()
173 ostate = object->state; in fscache_submit_exclusive_op()
176 op->state = FSCACHE_OP_ST_PENDING; in fscache_submit_exclusive_op()
[all …]
Dobject.c51 #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) argument
70 #define TRANSIT_TO(state, emask) \ argument
71 { .events = (emask), .transit_to = STATE(state) }
170 const struct fscache_state *state, *new_state; in fscache_object_sm_dispatcher() local
178 object->debug_id, object->state->name, object->events); in fscache_object_sm_dispatcher()
183 state = object->state; in fscache_object_sm_dispatcher()
194 state = t->transit_to; in fscache_object_sm_dispatcher()
195 ASSERT(state->work != NULL); in fscache_object_sm_dispatcher()
206 if (!state->work) { in fscache_object_sm_dispatcher()
208 for (t = state->transitions; t->events; t++) { in fscache_object_sm_dispatcher()
[all …]
/fs/quota/
Dquota.c119 struct qc_state state; in quota_getinfo() local
126 ret = sb->s_qcop->get_state(sb, &state); in quota_getinfo()
129 tstate = state.s_state + type; in quota_getinfo()
315 static int quota_state_to_flags(struct qc_state *state) in quota_state_to_flags() argument
319 if (state->s_state[USRQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags()
321 if (state->s_state[USRQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags()
323 if (state->s_state[GRPQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags()
325 if (state->s_state[GRPQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags()
327 if (state->s_state[PRJQUOTA].flags & QCI_ACCT_ENABLED) in quota_state_to_flags()
329 if (state->s_state[PRJQUOTA].flags & QCI_LIMITS_ENFORCED) in quota_state_to_flags()
[all …]
/fs/xfs/scrub/
Ddabtree.c54 ds->state->path.blk[level].blkno), in xchk_da_process_error()
76 ds->state->path.blk[level].blkno), in xchk_da_set_corrupt()
92 blk = &ds->state->path.blk[level]; in xchk_da_btree_entry()
138 blks = ds->state->path.blk; in xchk_da_btree_hash()
249 memcpy(&ds->state->altpath, &ds->state->path, in xchk_da_btree_block_check_sibling()
250 sizeof(ds->state->altpath)); in xchk_da_btree_block_check_sibling()
257 error = xfs_da3_path_shift(ds->state, &ds->state->altpath, in xchk_da_btree_block_check_sibling()
266 error = xfs_da3_path_shift(ds->state, &ds->state->altpath, in xchk_da_btree_block_check_sibling()
274 if (ds->state->altpath.blk[level].bp) in xchk_da_btree_block_check_sibling()
276 ds->state->altpath.blk[level].bp); in xchk_da_btree_block_check_sibling()
[all …]
/fs/jfs/
Djfs_mount.c237 if (chkSuper(sb) || (sbi->state != FM_CLEAN)) in jfs_mount_rw()
358 sbi->state = le32_to_cpu(j_sb->s_state); in chkSuper()
396 int updateSuper(struct super_block *sb, uint state) in updateSuper() argument
404 if (state == FM_DIRTY) { in updateSuper()
405 sbi->p_state = state; in updateSuper()
407 } else if (state == FM_MOUNT) { in updateSuper()
408 sbi->p_state = sbi->state; in updateSuper()
409 state = FM_DIRTY; in updateSuper()
410 } else if (state == FM_CLEAN) { in updateSuper()
411 state = sbi->p_state; in updateSuper()
[all …]
/fs/afs/
Dcell.c230 enum afs_cell_state state; in afs_lookup_cell() local
290 wait_var_event(&cell->state, in afs_lookup_cell()
292 state = smp_load_acquire(&cell->state); /* vs error */ in afs_lookup_cell()
293 state == AFS_CELL_ACTIVE || state == AFS_CELL_FAILED; in afs_lookup_cell()
297 if (state == AFS_CELL_FAILED) { in afs_lookup_cell()
658 _debug("state %u", cell->state); in afs_manage_cell()
659 switch (cell->state) { in afs_manage_cell()
670 if (cell->state == AFS_CELL_FAILED) in afs_manage_cell()
672 smp_store_release(&cell->state, AFS_CELL_UNSET); in afs_manage_cell()
673 wake_up_var(&cell->state); in afs_manage_cell()
[all …]
Drxrpc.c494 call->state = AFS_CALL_COMPLETE; in afs_make_call()
503 enum afs_call_state state; in afs_deliver_to_call() local
509 while (state = READ_ONCE(call->state), in afs_deliver_to_call()
510 state == AFS_CALL_CL_AWAIT_REPLY || in afs_deliver_to_call()
511 state == AFS_CALL_SV_AWAIT_OP_ID || in afs_deliver_to_call()
512 state == AFS_CALL_SV_AWAIT_REQUEST || in afs_deliver_to_call()
513 state == AFS_CALL_SV_AWAIT_ACK in afs_deliver_to_call()
515 if (state == AFS_CALL_SV_AWAIT_ACK) { in afs_deliver_to_call()
540 state = READ_ONCE(call->state); in afs_deliver_to_call()
544 if (state == AFS_CALL_CL_PROC_REPLY) { in afs_deliver_to_call()
[all …]
Dflock.c25 static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state) in afs_set_lock_state() argument
27 _debug("STATE %u -> %u", vnode->lock_state, state); in afs_set_lock_state()
28 vnode->lock_state = state; in afs_set_lock_state()
99 p->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_grant_locks()
126 p->fl_u.afs.state = error; in afs_next_locker()
142 next->fl_u.afs.state = AFS_LOCK_YOUR_TRY; in afs_next_locker()
167 p->fl_u.afs.state = -ENOENT; in afs_kill_lockers_enoent()
482 fl->fl_u.afs.state = AFS_LOCK_PENDING; in afs_do_setlk()
525 fl->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_do_setlk()
532 fl->fl_u.afs.state = AFS_LOCK_GRANTED; in afs_do_setlk()
[all …]
/fs/xfs/
Dxfs_quotaops.c57 struct qc_state *state) in xfs_fs_get_quota_state() argument
62 memset(state, 0, sizeof(*state)); in xfs_fs_get_quota_state()
65 state->s_incoredqs = q->qi_dquots; in xfs_fs_get_quota_state()
67 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state()
69 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state()
71 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state()
73 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state()
75 state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state()
77 state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state()
79 xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, in xfs_fs_get_quota_state()
[all …]
/fs/ocfs2/dlm/
Ddlmthread.c55 if (res->state & flags) { in __dlm_wait_on_lockres_flags()
91 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) in __dlm_lockres_unused()
94 if (res->state & (DLM_LOCK_RES_RECOVERING| in __dlm_lockres_unused()
191 res->state &= ~DLM_LOCK_RES_DROPPING_REF; in __dlm_do_purge_lockres()
209 if (res->state & DLM_LOCK_RES_DROPPING_REF) { in dlm_purge_lockres()
216 res->state |= DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres()
273 res->state &= ~DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres()
318 (lockres->state & DLM_LOCK_RES_MIGRATING) || in dlm_run_purge_list()
324 !unused, lockres->state, in dlm_run_purge_list()
358 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| in dlm_shuffle_lists()
[all …]
Ddlmdebug.c92 buf, res->owner, res->state); in __dlm_print_one_lock_resource()
503 res->owner, res->state, res->last_used, in dump_lockres()
666 char *state; in debug_state_print() local
674 state = "NEW"; break; in debug_state_print()
676 state = "JOINED"; break; in debug_state_print()
678 state = "SHUTDOWN"; break; in debug_state_print()
680 state = "LEAVING"; break; in debug_state_print()
682 state = "UNKNOWN"; break; in debug_state_print()
694 task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state); in debug_state_print()
772 if (dlm->reco.state == DLM_RECO_STATE_ACTIVE) in debug_state_print()
[all …]
/fs/ceph/
Dmdsmap.c26 if (1 == m->m_num_mds && m->m_info[0].state > 0) in ceph_mdsmap_get_random_mds()
31 if (m->m_info[i].state > 0) in ceph_mdsmap_get_random_mds()
39 while (m->m_info[i].state <= 0) in ceph_mdsmap_get_random_mds()
149 s32 mds, inc, state; in ceph_mdsmap_decode() local
184 state = ceph_decode_32(p); in ceph_mdsmap_decode()
210 ceph_mds_state_name(state)); in ceph_mdsmap_decode()
212 if (mds < 0 || state <= 0) in ceph_mdsmap_decode()
228 info->state = state; in ceph_mdsmap_decode()
248 if (i == 0 || m->m_info[i-1].state > 0) in ceph_mdsmap_decode()
395 if (m->m_info[i].state == CEPH_MDS_STATE_ACTIVE) in ceph_mdsmap_is_cluster_available()
/fs/squashfs/
Dxz_wrapper.c25 struct xz_dec *state; member
93 stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size); in squashfs_xz_init()
94 if (stream->state == NULL) { in squashfs_xz_init()
113 xz_dec_end(stream->state); in squashfs_xz_free()
127 xz_dec_reset(stream->state); in squashfs_xz_uncompress()
152 xz_err = xz_dec_run(stream->state, &stream->buf); in squashfs_xz_uncompress()

1234567