/fs/btrfs/ |
D | check-integrity.c | 199 struct btrfsic_state *state; member 301 static int btrfsic_process_superblock(struct btrfsic_state *state, 303 static int btrfsic_process_metablock(struct btrfsic_state *state, 311 struct btrfsic_state *state, 322 static int btrfsic_handle_extent_data(struct btrfsic_state *state, 326 static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 329 static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr, 333 static int btrfsic_read_block(struct btrfsic_state *state, 335 static void btrfsic_dump_database(struct btrfsic_state *state); 336 static int btrfsic_test_for_metadata(struct btrfsic_state *state, [all …]
|
D | extent_io.c | 28 static inline bool extent_state_in_tree(const struct extent_state *state) in extent_state_in_tree() argument 30 return !RB_EMPTY_NODE(&state->rb_node); in extent_state_in_tree() 62 struct extent_state *state; in btrfs_leak_debug_check() local 66 state = list_entry(states.next, struct extent_state, leak_list); in btrfs_leak_debug_check() 68 state->start, state->end, state->state, in btrfs_leak_debug_check() 69 extent_state_in_tree(state), in btrfs_leak_debug_check() 70 atomic_read(&state->refs)); in btrfs_leak_debug_check() 71 list_del(&state->leak_list); in btrfs_leak_debug_check() 72 kmem_cache_free(extent_state_cache, state); in btrfs_leak_debug_check() 201 tree->state = RB_ROOT; in extent_io_tree_init() [all …]
|
D | transaction.c | 143 if (btrfs_blocked_trans_types[cur_trans->state] & type) { in join_transaction() 190 cur_trans->state = TRANS_STATE_RUNNING; in join_transaction() 245 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && in record_root_in_trans() 255 set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); in record_root_in_trans() 294 clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state); in record_root_in_trans() 303 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state)) in btrfs_record_root_in_trans() 312 !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state)) in btrfs_record_root_in_trans() 324 return (trans->state >= TRANS_STATE_BLOCKED && in is_transaction_blocked() 325 trans->state < TRANS_STATE_UNBLOCKED && in is_transaction_blocked() 344 cur_trans->state >= TRANS_STATE_UNBLOCKED || in wait_current_trans() [all …]
|
/fs/nfsd/ |
D | nfs4acl.c | 467 init_state(struct posix_acl_state *state, int cnt) in init_state() argument 471 memset(state, 0, sizeof(struct posix_acl_state)); in init_state() 472 state->empty = 1; in init_state() 480 state->users = kzalloc(alloc, GFP_KERNEL); in init_state() 481 if (!state->users) in init_state() 483 state->groups = kzalloc(alloc, GFP_KERNEL); in init_state() 484 if (!state->groups) { in init_state() 485 kfree(state->users); in init_state() 492 free_state(struct posix_acl_state *state) { in free_state() argument 493 kfree(state->users); in free_state() [all …]
|
/fs/nfs/ |
D | nfs4state.c | 623 struct nfs4_state *state; in nfs4_alloc_open_state() local 625 state = kzalloc(sizeof(*state), GFP_NOFS); in nfs4_alloc_open_state() 626 if (!state) in nfs4_alloc_open_state() 628 atomic_set(&state->count, 1); in nfs4_alloc_open_state() 629 INIT_LIST_HEAD(&state->lock_states); in nfs4_alloc_open_state() 630 spin_lock_init(&state->state_lock); in nfs4_alloc_open_state() 631 seqlock_init(&state->seqlock); in nfs4_alloc_open_state() 632 return state; in nfs4_alloc_open_state() 636 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode) in nfs4_state_set_mode_locked() argument 638 if (state->state == fmode) in nfs4_state_set_mode_locked() [all …]
|
D | nfs4proc.c | 87 struct nfs4_state *state, struct nfs4_label *ilabel, 348 struct nfs4_state *state = exception->state; in nfs4_handle_exception() local 362 if (state == NULL) in nfs4_handle_exception() 364 ret = nfs4_schedule_stateid_recovery(server, state); in nfs4_handle_exception() 371 if (state == NULL) in nfs4_handle_exception() 373 ret = nfs4_schedule_stateid_recovery(server, state); in nfs4_handle_exception() 378 if (state != NULL) { in nfs4_handle_exception() 379 ret = nfs4_schedule_stateid_recovery(server, state); in nfs4_handle_exception() 918 struct nfs4_state *state; member 1063 if (p->state != NULL) in nfs4_opendata_free() [all …]
|
/fs/xfs/libxfs/ |
D | xfs_da_btree.c | 58 STATIC int xfs_da3_root_split(xfs_da_state_t *state, 61 STATIC int xfs_da3_node_split(xfs_da_state_t *state, 67 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state, 70 STATIC void xfs_da3_node_add(xfs_da_state_t *state, 77 STATIC int xfs_da3_root_join(xfs_da_state_t *state, 79 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval); 80 STATIC void xfs_da3_node_remove(xfs_da_state_t *state, 82 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state, 89 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state, 110 xfs_da_state_kill_altpath(xfs_da_state_t *state) in xfs_da_state_kill_altpath() argument [all …]
|
D | xfs_dir2_node.c | 44 static void xfs_dir2_leafn_rebalance(xfs_da_state_t *state, 521 xfs_da_state_t *state) /* state to fill in */ in xfs_dir2_leafn_lookup_for_addname() argument 558 if (state->extravalid) { in xfs_dir2_leafn_lookup_for_addname() 560 curbp = state->extrablk.bp; in xfs_dir2_leafn_lookup_for_addname() 561 curfdb = state->extrablk.blkno; in xfs_dir2_leafn_lookup_for_addname() 646 state->extravalid = 1; in xfs_dir2_leafn_lookup_for_addname() 647 state->extrablk.bp = curbp; in xfs_dir2_leafn_lookup_for_addname() 648 state->extrablk.index = fi; in xfs_dir2_leafn_lookup_for_addname() 649 state->extrablk.blkno = curfdb; in xfs_dir2_leafn_lookup_for_addname() 656 state->extrablk.magic = XFS_DIR2_FREE_MAGIC; in xfs_dir2_leafn_lookup_for_addname() [all …]
|
D | xfs_attr.c | 75 STATIC int xfs_attr_fillstate(xfs_da_state_t *state); 76 STATIC int xfs_attr_refillstate(xfs_da_state_t *state); 882 xfs_da_state_t *state; in xfs_attr_node_addname() local 896 state = xfs_da_state_alloc(); in xfs_attr_node_addname() 897 state->args = args; in xfs_attr_node_addname() 898 state->mp = mp; in xfs_attr_node_addname() 904 error = xfs_da3_node_lookup_int(state, &retval); in xfs_attr_node_addname() 907 blk = &state->path.blk[ state->path.active-1 ]; in xfs_attr_node_addname() 935 retval = xfs_attr3_leaf_add(blk->bp, state->args); in xfs_attr_node_addname() 937 if (state->path.active == 1) { in xfs_attr_node_addname() [all …]
|
D | xfs_bmap.c | 169 xfs_exntst_t state) in xfs_bmbt_update() argument 173 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state); in xfs_bmbt_update() 532 int state = 0; in xfs_bmap_trace_exlist() local 535 state |= BMAP_ATTRFORK; in xfs_bmap_trace_exlist() 1757 int state = 0;/* state bits, accessed thru macros */ in xfs_bmap_add_extent_delay_real() local 1795 state |= BMAP_LEFT_FILLING; in xfs_bmap_add_extent_delay_real() 1797 state |= BMAP_RIGHT_FILLING; in xfs_bmap_add_extent_delay_real() 1804 state |= BMAP_LEFT_VALID; in xfs_bmap_add_extent_delay_real() 1808 state |= BMAP_LEFT_DELAY; in xfs_bmap_add_extent_delay_real() 1811 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) && in xfs_bmap_add_extent_delay_real() [all …]
|
D | xfs_attr_leaf.c | 69 STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state, 72 STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state, 1006 struct xfs_da_state *state, in xfs_attr3_leaf_split() argument 1013 trace_xfs_attr_leaf_split(state->args); in xfs_attr3_leaf_split() 1019 error = xfs_da_grow_inode(state->args, &blkno); in xfs_attr3_leaf_split() 1022 error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp); in xfs_attr3_leaf_split() 1032 xfs_attr3_leaf_rebalance(state, oldblk, newblk); in xfs_attr3_leaf_split() 1033 error = xfs_da3_blk_link(state, oldblk, newblk); in xfs_attr3_leaf_split() 1044 if (state->inleaf) { in xfs_attr3_leaf_split() 1045 trace_xfs_attr_leaf_add_old(state->args); in xfs_attr3_leaf_split() [all …]
|
D | xfs_da_btree.h | 167 int xfs_da3_split(xfs_da_state_t *state); 172 int xfs_da3_join(xfs_da_state_t *state); 173 void xfs_da3_fixhashpath(struct xfs_da_state *state, 179 int xfs_da3_node_lookup_int(xfs_da_state_t *state, int *result); 180 int xfs_da3_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path, 185 int xfs_da3_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk, 216 void xfs_da_state_free(xfs_da_state_t *state);
|
/fs/fscache/ |
D | operation.c | 40 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); in fscache_enqueue_operation() 67 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); in fscache_run_op() 69 op->state = FSCACHE_OP_ST_IN_PROGRESS; in fscache_run_op() 90 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); in fscache_submit_exclusive_op() 98 op->state = FSCACHE_OP_ST_PENDING; in fscache_submit_exclusive_op() 157 op->debug_id, object->debug_id, object->state->name); in fscache_report_unexpected_submission() 158 kdebug("objstate=%s [%s]", object->state->name, ostate->name); in fscache_report_unexpected_submission() 195 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED); in fscache_submit_op() 203 ostate = object->state; in fscache_submit_op() 206 op->state = FSCACHE_OP_ST_PENDING; in fscache_submit_op() [all …]
|
D | object.c | 54 #define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); }) argument 73 #define TRANSIT_TO(state, emask) \ argument 74 { .events = (emask), .transit_to = STATE(state) } 170 const struct fscache_state *state, *new_state; in fscache_object_sm_dispatcher() local 177 object->debug_id, object->state->name, object->events); in fscache_object_sm_dispatcher() 182 state = object->state; in fscache_object_sm_dispatcher() 192 state = t->transit_to; in fscache_object_sm_dispatcher() 193 ASSERT(state->work != NULL); in fscache_object_sm_dispatcher() 203 if (!state->work) { in fscache_object_sm_dispatcher() 205 for (t = state->transitions; t->events; t++) { in fscache_object_sm_dispatcher() [all …]
|
/fs/jfs/ |
D | jfs_mount.c | 250 if (chkSuper(sb) || (sbi->state != FM_CLEAN)) in jfs_mount_rw() 371 sbi->state = le32_to_cpu(j_sb->s_state); in chkSuper() 409 int updateSuper(struct super_block *sb, uint state) in updateSuper() argument 417 if (state == FM_DIRTY) { in updateSuper() 418 sbi->p_state = state; in updateSuper() 420 } else if (state == FM_MOUNT) { in updateSuper() 421 sbi->p_state = sbi->state; in updateSuper() 422 state = FM_DIRTY; in updateSuper() 423 } else if (state == FM_CLEAN) { in updateSuper() 424 state = sbi->p_state; in updateSuper() [all …]
|
/fs/afs/ |
D | rxrpc.c | 316 call->state = AFS_CALL_AWAIT_REPLY; in afs_send_pages() 397 call->state = AFS_CALL_AWAIT_REPLY; in afs_make_call() 465 while ((call->state == AFS_CALL_AWAIT_REPLY || in afs_deliver_to_call() 466 call->state == AFS_CALL_AWAIT_OP_ID || in afs_deliver_to_call() 467 call->state == AFS_CALL_AWAIT_REQUEST || in afs_deliver_to_call() 468 call->state == AFS_CALL_AWAIT_ACK) && in afs_deliver_to_call() 478 call->state == AFS_CALL_AWAIT_REPLY) in afs_deliver_to_call() 479 call->state = AFS_CALL_COMPLETE; in afs_deliver_to_call() 489 if (call->state != AFS_CALL_AWAIT_REPLY) in afs_deliver_to_call() 495 call->state = AFS_CALL_ERROR; in afs_deliver_to_call() [all …]
|
D | vlocation.c | 178 vl->state = AFS_VL_NEW; in afs_vlocation_alloc() 411 vl->state = AFS_VL_CREATING; in afs_vlocation_lookup() 419 vl->state = AFS_VL_VALID; in afs_vlocation_lookup() 446 while (vl->state != AFS_VL_VALID) { in afs_vlocation_lookup() 447 afs_vlocation_state_t state = vl->state; in afs_vlocation_lookup() local 449 _debug("invalid [state %d]", state); in afs_vlocation_lookup() 451 if (state == AFS_VL_NEW || state == AFS_VL_NO_VOLUME) { in afs_vlocation_lookup() 452 vl->state = AFS_VL_CREATING; in afs_vlocation_lookup() 463 vl->state == AFS_VL_NEW || in afs_vlocation_lookup() 464 vl->state == AFS_VL_VALID || in afs_vlocation_lookup() [all …]
|
D | volume.c | 223 int ret, state, loop; in afs_volume_pick_fileserver() local 249 state = server->fs_state; in afs_volume_pick_fileserver() 251 _debug("consider %d [%d]", loop, state); in afs_volume_pick_fileserver() 253 switch (state) { in afs_volume_pick_fileserver() 264 ret = state; in afs_volume_pick_fileserver() 270 ret = state; in afs_volume_pick_fileserver() 277 ret = state; in afs_volume_pick_fileserver() 286 ret = state; in afs_volume_pick_fileserver()
|
/fs/ceph/ |
D | mdsmap.c | 25 if (1 == m->m_max_mds && m->m_info[0].state > 0) in ceph_mdsmap_get_random_mds() 30 if (m->m_info[i].state > 0) in ceph_mdsmap_get_random_mds() 39 while (m->m_info[i].state <= 0) in ceph_mdsmap_get_random_mds() 88 s32 mds, inc, state; in ceph_mdsmap_decode() local 110 state = ceph_decode_32(p); in ceph_mdsmap_decode() 129 ceph_mds_state_name(state)); in ceph_mdsmap_decode() 131 if (mds < 0 || mds >= m->m_max_mds || state <= 0) in ceph_mdsmap_decode() 136 info->state = state; in ceph_mdsmap_decode()
|
/fs/ocfs2/dlm/ |
D | dlmthread.c | 70 if (res->state & flags) { in __dlm_wait_on_lockres_flags() 106 if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) in __dlm_lockres_unused() 109 if (res->state & DLM_LOCK_RES_RECOVERING) in __dlm_lockres_unused() 177 res->state |= DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres() 217 res->state &= ~DLM_LOCK_RES_DROPPING_REF; in dlm_purge_lockres() 262 (lockres->state & DLM_LOCK_RES_MIGRATING) || in dlm_run_purge_list() 268 !unused, lockres->state, in dlm_run_purge_list() 302 BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING| in dlm_shuffle_lists() 464 if (res->state & (DLM_LOCK_RES_MIGRATING | in __dlm_dirty_lockres() 472 res->state |= DLM_LOCK_RES_DIRTY; in __dlm_dirty_lockres() [all …]
|
D | dlmdebug.c | 107 buf, res->owner, res->state); in __dlm_print_one_lock_resource() 538 res->owner, res->state, res->last_used, in dump_lockres() 701 char *state; in debug_state_print() local 709 state = "NEW"; break; in debug_state_print() 711 state = "JOINED"; break; in debug_state_print() 713 state = "SHUTDOWN"; break; in debug_state_print() 715 state = "LEAVING"; break; in debug_state_print() 717 state = "UNKNOWN"; break; in debug_state_print() 729 task_pid_nr(dlm->dlm_thread_task), dlm->node_num, state); in debug_state_print() 807 if (dlm->reco.state == DLM_RECO_STATE_ACTIVE) in debug_state_print() [all …]
|
/fs/ntfs/ |
D | inode.h | 51 unsigned long state; /* NTFS specific flags describing this inode. member 185 return test_bit(NI_##flag, &(ni)->state); \ 189 set_bit(NI_##flag, &(ni)->state); \ 193 clear_bit(NI_##flag, &(ni)->state); \ 202 return test_and_set_bit(NI_##flag, &(ni)->state); \ 206 return test_and_clear_bit(NI_##flag, &(ni)->state); \
|
/fs/gfs2/ |
D | glock.h | 185 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, 187 extern void gfs2_holder_reinit(unsigned int state, unsigned flags, 198 unsigned int state, int flags, 218 unsigned int state, int flags, in gfs2_glock_nq_init() argument 223 gfs2_holder_init(gl, state, flags, gh); in gfs2_glock_nq_init() 232 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
|
/fs/squashfs/ |
D | xz_wrapper.c | 38 struct xz_dec *state; member 106 stream->state = xz_dec_init(XZ_PREALLOC, comp_opts->dict_size); in squashfs_xz_init() 107 if (stream->state == NULL) { in squashfs_xz_init() 126 xz_dec_end(stream->state); in squashfs_xz_free() 141 xz_dec_reset(stream->state); in squashfs_xz_uncompress() 174 xz_err = xz_dec_run(stream->state, &stream->buf); in squashfs_xz_uncompress()
|
/fs/ocfs2/cluster/ |
D | masklog.c | 38 char *state; in mlog_mask_show() local 41 state = "allow"; in mlog_mask_show() 43 state = "deny"; in mlog_mask_show() 45 state = "off"; in mlog_mask_show() 47 return snprintf(buf, PAGE_SIZE, "%s\n", state); in mlog_mask_show()
|