/fs/ext4/ |
D | extents.c | 195 int depth = path->p_depth; in ext4_ext_find_goal() local 215 ex = path[depth].p_ext; in ext4_ext_find_goal() 228 if (path[depth].p_bh) in ext4_ext_find_goal() 229 return path[depth].p_bh->b_blocknr; in ext4_ext_find_goal() 324 ext4_ext_max_entries(struct inode *inode, int depth) in ext4_ext_max_entries() argument 328 if (depth == ext_depth(inode)) { in ext4_ext_max_entries() 329 if (depth == 0) in ext4_ext_max_entries() 334 if (depth == 0) in ext4_ext_max_entries() 370 int depth) in ext4_valid_extent_entries() argument 381 if (depth == 0) { in ext4_valid_extent_entries() [all …]
|
D | indirect.c | 144 static Indirect *ext4_get_branch(struct inode *inode, int depth, in ext4_get_branch() argument 159 while (--depth) { in ext4_get_branch() 539 int depth; in ext4_ind_map_blocks() local 546 depth = ext4_block_to_path(inode, map->m_lblk, offsets, in ext4_ind_map_blocks() 549 if (depth == 0) in ext4_ind_map_blocks() 552 partial = ext4_get_branch(inode, depth, offsets, chain, &err); in ext4_ind_map_blocks() 556 first_block = le32_to_cpu(chain[depth - 1].key); in ext4_ind_map_blocks() 562 blk = le32_to_cpu(*(chain[depth-1].p + count)); in ext4_ind_map_blocks() 584 for (i = partial - chain + 1; i < depth; i++) in ext4_ind_map_blocks() 621 indirect_blks = (chain + depth) - partial - 1; in ext4_ind_map_blocks() [all …]
|
/fs/quota/ |
D | quota_tree.c | 26 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) in __get_index() argument 30 depth = info->dqi_qtree_depth - depth - 1; in __get_index() 31 while (depth--) in __get_index() 36 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) in get_index() argument 40 return __get_index(info, id, depth); in get_index() 334 uint *treeblk, int depth) in do_insert_tree() argument 359 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); in do_insert_tree() 362 if (depth == info->dqi_qtree_depth - 1) { in do_insert_tree() 368 dquot->dq_id, depth)])); in do_insert_tree() 375 ret = do_insert_tree(info, dquot, &newblk, depth+1); in do_insert_tree() [all …]
|
/fs/reiserfs/ |
D | lock.c | 55 int depth; in reiserfs_write_unlock_nested() local 61 depth = sb_i->lock_depth; in reiserfs_write_unlock_nested() 67 return depth; in reiserfs_write_unlock_nested() 70 void reiserfs_write_lock_nested(struct super_block *s, int depth) in reiserfs_write_lock_nested() argument 75 if (depth == -1) in reiserfs_write_lock_nested() 80 sb_i->lock_depth = depth; in reiserfs_write_lock_nested()
|
D | resize.c | 37 int depth; in reiserfs_resize() local 47 depth = reiserfs_write_unlock_nested(s); in reiserfs_resize() 49 reiserfs_write_lock_nested(s, depth); in reiserfs_resize() 143 int depth; in reiserfs_resize() local 148 depth = reiserfs_write_unlock_nested(s); in reiserfs_resize() 150 reiserfs_write_lock_nested(s, depth); in reiserfs_resize() 161 depth = reiserfs_write_unlock_nested(s); in reiserfs_resize() 163 reiserfs_write_lock_nested(s, depth); in reiserfs_resize()
|
D | stree.c | 561 int depth = -1; in search_by_key_reada() local 580 if (depth == -1) in search_by_key_reada() 581 depth = reiserfs_write_unlock_nested(s); in search_by_key_reada() 586 return depth; in search_by_key_reada() 679 int depth = -1; in search_by_key() local 682 depth = search_by_key_reada(sb, reada_bh, in search_by_key() 685 if (!buffer_uptodate(bh) && depth == -1) in search_by_key() 686 depth = reiserfs_write_unlock_nested(sb); in search_by_key() 691 if (depth != -1) in search_by_key() 692 reiserfs_write_lock_nested(sb, depth); in search_by_key() [all …]
|
D | journal.c | 953 int depth; in reiserfs_async_progress_wait() local 955 depth = reiserfs_write_unlock_nested(s); in reiserfs_async_progress_wait() 957 reiserfs_write_lock_nested(s, depth); in reiserfs_async_progress_wait() 980 int depth; in flush_commit_list() local 1031 depth = reiserfs_write_unlock_nested(s); in flush_commit_list() 1036 reiserfs_write_lock_nested(s, depth); in flush_commit_list() 1056 depth = reiserfs_write_unlock_nested(s); in flush_commit_list() 1058 reiserfs_write_lock_nested(s, depth); in flush_commit_list() 1070 depth = reiserfs_write_unlock_nested(s); in flush_commit_list() 1072 reiserfs_write_lock_nested(s, depth); in flush_commit_list() [all …]
|
D | bitmap.c | 461 int depth = reiserfs_write_unlock_nested(s); in _reiserfs_free_block() local 463 reiserfs_write_lock_nested(s, depth); in _reiserfs_free_block() 1202 int depth; in blocknrs_and_prealloc_arrays_from_search_start() local 1212 depth = reiserfs_write_unlock_nested(s); in blocknrs_and_prealloc_arrays_from_search_start() 1216 reiserfs_write_lock_nested(s, depth); in blocknrs_and_prealloc_arrays_from_search_start() 1231 reiserfs_write_lock_nested(s, depth); in blocknrs_and_prealloc_arrays_from_search_start() 1261 depth = reiserfs_write_unlock_nested(s); in blocknrs_and_prealloc_arrays_from_search_start() 1265 reiserfs_write_lock_nested(s, depth); in blocknrs_and_prealloc_arrays_from_search_start() 1297 depth = reiserfs_write_unlock_nested(s); in blocknrs_and_prealloc_arrays_from_search_start() 1302 reiserfs_write_lock_nested(s, depth); in blocknrs_and_prealloc_arrays_from_search_start() [all …]
|
D | dir.c | 77 int depth; in reiserfs_readdir_inode() local 208 depth = reiserfs_write_unlock_nested(inode->i_sb); in reiserfs_readdir_inode() 212 reiserfs_write_lock_nested(inode->i_sb, depth); in reiserfs_readdir_inode() 218 reiserfs_write_lock_nested(inode->i_sb, depth); in reiserfs_readdir_inode()
|
D | inode.c | 71 int depth = reiserfs_write_unlock_nested(inode->i_sb); in reiserfs_evict_inode() local 73 reiserfs_write_lock_nested(inode->i_sb, depth); in reiserfs_evict_inode() 1640 int depth; in reiserfs_iget() local 1644 depth = reiserfs_write_unlock_nested(s); in reiserfs_iget() 1648 reiserfs_write_lock_nested(s, depth); in reiserfs_iget() 1936 int depth; in reiserfs_new_inode() local 1940 depth = reiserfs_write_unlock_nested(sb); in reiserfs_new_inode() 1942 reiserfs_write_lock_nested(sb, depth); in reiserfs_new_inode() 1967 depth = reiserfs_write_unlock_nested(inode->i_sb); in reiserfs_new_inode() 1970 reiserfs_write_lock_nested(inode->i_sb, depth); in reiserfs_new_inode() [all …]
|
D | fix_node.c | 1110 int depth = reiserfs_write_unlock_nested(tb->tb_sb); in get_far_parent() local 1112 reiserfs_write_lock_nested(tb->tb_sb, depth); in get_far_parent() 2130 int depth = reiserfs_write_unlock_nested(tb->tb_sb); in get_direct_parent() local 2132 reiserfs_write_lock_nested(tb->tb_sb, depth); in get_direct_parent() 2158 int depth; in get_neighbors() local 2176 depth = reiserfs_write_unlock_nested(tb->tb_sb); in get_neighbors() 2178 reiserfs_write_lock_nested(tb->tb_sb, depth); in get_neighbors() 2216 depth = reiserfs_write_unlock_nested(tb->tb_sb); in get_neighbors() 2218 reiserfs_write_lock_nested(tb->tb_sb, depth); in get_neighbors() 2487 int depth; in wait_tb_buffers_until_unlocked() local [all …]
|
D | ioctl.c | 188 int depth = reiserfs_write_unlock_nested(inode->i_sb); in reiserfs_unpack() local 191 reiserfs_write_lock_nested(inode->i_sb, depth); in reiserfs_unpack()
|
D | super.c | 285 int depth; in finish_unfinished() local 343 depth = reiserfs_write_unlock_nested(inode->i_sb); in finish_unfinished() 345 reiserfs_write_lock_nested(inode->i_sb, depth); in finish_unfinished() 2276 int depth; in reiserfs_write_dquot() local 2284 depth = reiserfs_write_unlock_nested(dquot->dq_sb); in reiserfs_write_dquot() 2286 reiserfs_write_lock_nested(dquot->dq_sb, depth); in reiserfs_write_dquot() 2299 int depth; in reiserfs_acquire_dquot() local 2307 depth = reiserfs_write_unlock_nested(dquot->dq_sb); in reiserfs_acquire_dquot() 2309 reiserfs_write_lock_nested(dquot->dq_sb, depth); in reiserfs_acquire_dquot() 2358 int depth; in reiserfs_write_info() local [all …]
|
/fs/minix/ |
D | itree_common.c | 31 int depth, in get_branch() argument 45 while (--depth) { in get_branch() 160 int depth = block_to_path(inode, block, offsets); in get_block() local 162 if (depth == 0) in get_block() 166 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 171 map_bh(bh, inode->i_sb, block_to_cpu(chain[depth-1].key)); in get_block() 173 partial = chain+depth-1; /* the whole chain */ in get_block() 196 left = (chain + depth) - partial; in get_block() 224 int depth, in find_shared() argument 233 for (k = depth; k > 1 && !offsets[k-1]; k--) in find_shared() [all …]
|
/fs/configfs/ |
D | symlink.c | 27 int depth = 0; in item_depth() local 28 do { depth++; } while ((p = p->ci_parent) && !configfs_is_root(p)); in item_depth() 29 return depth; in item_depth() 61 int depth, size; in configfs_get_target_path() local 64 depth = item_depth(item); in configfs_get_target_path() 65 size = item_path_length(target) + depth * 3 - 1; in configfs_get_target_path() 69 pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size); in configfs_get_target_path() 71 for (s = path; depth--; s += 3) in configfs_get_target_path()
|
D | inode.c | 138 int depth = sd->s_depth; in configfs_set_inode_lock_class() local 140 if (depth > 0) { in configfs_set_inode_lock_class() 141 if (depth <= ARRAY_SIZE(default_group_class)) { in configfs_set_inode_lock_class() 143 &default_group_class[depth - 1]); in configfs_set_inode_lock_class()
|
/fs/sysv/ |
D | itree.c | 89 int depth, in get_branch() argument 102 while (--depth) { in get_branch() 215 int depth = block_to_path(inode, iblock, offsets); in get_block() local 217 if (depth == 0) in get_block() 222 partial = get_branch(inode, depth, offsets, chain, &err); in get_block() 229 chain[depth-1].key)); in get_block() 231 partial = chain+depth-1; /* the whole chain */ in get_block() 254 left = (chain + depth) - partial; in get_block() 282 int depth, in find_shared() argument 291 for (k = depth; k > 1 && !offsets[k-1]; k--) in find_shared() [all …]
|
/fs/ufs/ |
D | inode.c | 124 static u64 ufs_frag_map(struct inode *inode, unsigned offsets[4], int depth) in ufs_frag_map() argument 140 if (depth == 0) in ufs_frag_map() 153 while (--depth) { in ufs_frag_map() 177 while (--depth) { in ufs_frag_map() 402 int depth = ufs_block_to_path(inode, fragment >> uspi->s_fpbshift, offsets); in ufs_getfrag_block() local 406 phys64 = ufs_frag_map(inode, offsets, depth); in ufs_getfrag_block() 425 if (unlikely(!depth)) { in ufs_getfrag_block() 441 if (depth == 1) { in ufs_getfrag_block() 448 for (i = 1; i < depth - 1; i++) in ufs_getfrag_block() 451 phys64 = ufs_inode_getblock(inode, phys64, offsets[depth - 1], in ufs_getfrag_block() [all …]
|
/fs/ext2/ |
D | inode.c | 235 int depth, in ext2_get_branch() argument 249 while (--depth) { in ext2_get_branch() 632 int depth; in ext2_get_blocks() local 639 depth = ext2_block_to_path(inode,iblock,offsets,&blocks_to_boundary); in ext2_get_blocks() 641 if (depth == 0) in ext2_get_blocks() 644 partial = ext2_get_branch(inode, depth, offsets, chain, &err); in ext2_get_blocks() 647 first_block = le32_to_cpu(chain[depth - 1].key); in ext2_get_blocks() 653 if (!verify_chain(chain, chain + depth - 1)) { in ext2_get_blocks() 662 partial = chain + depth - 1; in ext2_get_blocks() 665 blk = le32_to_cpu(*(chain[depth-1].p + count)); in ext2_get_blocks() [all …]
|
/fs/ |
D | eventpoll.c | 685 void *priv, int depth, bool ep_locked) in ep_scan_ready_list() argument 699 mutex_lock_nested(&ep->mtx, depth); in ep_scan_ready_list() 889 int depth) in ep_item_poll() argument 903 ep_read_events_proc, &depth, depth, in ep_item_poll() 912 int depth = *(int *)priv; in ep_read_events_proc() local 915 depth++; in ep_read_events_proc() 918 if (ep_item_poll(epi, &pt, depth)) { in ep_read_events_proc() 937 int depth = 0; in ep_eventpoll_poll() local 947 &depth, depth, false); in ep_eventpoll_poll() 2145 static inline int epoll_mutex_lock(struct mutex *mutex, int depth, in epoll_mutex_lock() argument [all …]
|
/fs/overlayfs/ |
D | inode.c | 572 int depth = inode->i_sb->s_stack_depth - 1; in ovl_lockdep_annotate_inode_mutex_key() local 574 if (WARN_ON_ONCE(depth < 0 || depth >= OVL_MAX_NESTING)) in ovl_lockdep_annotate_inode_mutex_key() 575 depth = 0; in ovl_lockdep_annotate_inode_mutex_key() 578 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_dir_key[depth]); in ovl_lockdep_annotate_inode_mutex_key() 580 lockdep_set_class(&inode->i_rwsem, &ovl_i_mutex_key[depth]); in ovl_lockdep_annotate_inode_mutex_key() 582 lockdep_set_class(&OVL_I(inode)->lock, &ovl_i_lock_key[depth]); in ovl_lockdep_annotate_inode_mutex_key()
|
/fs/hfs/ |
D | btree.c | 93 tree->depth = be16_to_cpu(head->depth); in hfs_btree_open() 181 head->depth = cpu_to_be16(tree->depth); in hfs_btree_write()
|
D | btree.h | 41 unsigned int depth; member 148 __be16 depth; /* (V) The number of levels in this B-tree */ member
|
/fs/hfsplus/ |
D | btree.c | 177 tree->depth = be16_to_cpu(head->depth); in hfs_btree_open() 304 head->depth = cpu_to_be16(tree->depth); in hfs_btree_write()
|
/fs/ocfs2/ |
D | ocfs2_trace.h | 424 TP_PROTO(unsigned long long owner, int depth), 425 TP_ARGS(owner, depth), 428 __field(int, depth) 432 __entry->depth = depth; 434 TP_printk("%llu %d", __entry->owner, __entry->depth) 439 int depth), 440 TP_ARGS(subtree_root, blkno, depth), 444 __field(int, depth) 449 __entry->depth = depth; 452 __entry->blkno, __entry->depth) [all …]
|