/fs/ubifs/ |
D | commit.c | 513 struct ubifs_idx_node idx __aligned(8); 528 struct ubifs_idx_node *idx; in dbg_old_index_check_init() local 537 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in dbg_old_index_check_init() 538 if (!idx) in dbg_old_index_check_init() 541 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_old_index_check_init() 545 d->old_zroot_level = le16_to_cpu(idx->level); in dbg_old_index_check_init() 546 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); in dbg_old_index_check_init() 548 kfree(idx); in dbg_old_index_check_init() 572 struct ubifs_idx_node *idx; in dbg_check_old_index() local 608 idx = &i->idx; in dbg_check_old_index() [all …]
|
D | tnc_misc.c | 277 struct ubifs_idx_node *idx; in read_znode() local 279 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in read_znode() 280 if (!idx) in read_znode() 283 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in read_znode() 285 kfree(idx); in read_znode() 289 znode->child_cnt = le16_to_cpu(idx->child_cnt); in read_znode() 290 znode->level = le16_to_cpu(idx->level); in read_znode() 305 const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in read_znode() 385 kfree(idx); in read_znode() 390 ubifs_dump_node(c, idx); in read_znode() [all …]
|
D | tnc_commit.c | 37 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, in make_idx_node() argument 44 idx->ch.node_type = UBIFS_IDX_NODE; in make_idx_node() 45 idx->child_cnt = cpu_to_le16(znode->child_cnt); in make_idx_node() 46 idx->level = cpu_to_le16(znode->level); in make_idx_node() 48 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in make_idx_node() 62 ubifs_prepare_node(c, idx, len, 0); in make_idx_node() 257 struct ubifs_idx_node *idx; in layout_leb_in_gaps() local 261 idx = snod->node; in layout_leb_in_gaps() 262 key_read(c, ubifs_idx_key(c, idx), &snod->key); in layout_leb_in_gaps() 263 level = le16_to_cpu(idx->level); in layout_leb_in_gaps() [all …]
|
D | misc.h | 209 const struct ubifs_idx_node *idx, in ubifs_idx_branch() argument 212 return (struct ubifs_branch *)((void *)idx->branches + in ubifs_idx_branch() 222 const struct ubifs_idx_node *idx) in ubifs_idx_key() argument 224 return (void *)((struct ubifs_branch *)idx->branches)->key; in ubifs_idx_key()
|
/fs/xfs/libxfs/ |
D | xfs_inode_fork.c | 884 xfs_extnum_t idx) /* index of target extent */ in xfs_iext_get_ext() argument 886 ASSERT(idx >= 0); in xfs_iext_get_ext() 887 ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); in xfs_iext_get_ext() 889 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { in xfs_iext_get_ext() 894 xfs_extnum_t page_idx = idx; /* ext index in target list */ in xfs_iext_get_ext() 899 return &ifp->if_u1.if_extents[idx]; in xfs_iext_get_ext() 912 xfs_extnum_t idx, /* starting index of new items */ in xfs_iext_insert() argument 920 trace_xfs_iext_insert(ip, idx, new, state, _RET_IP_); in xfs_iext_insert() 923 xfs_iext_add(ifp, idx, count); in xfs_iext_insert() 924 for (i = idx; i < idx + count; i++, new++) in xfs_iext_insert() [all …]
|
D | xfs_bmap.c | 501 xfs_extnum_t idx; /* extent record index */ in xfs_bmap_trace_exlist() local 510 for (idx = 0; idx < cnt; idx++) in xfs_bmap_trace_exlist() 511 trace_xfs_extlist(ip, idx, whichfork, caller_ip); in xfs_bmap_trace_exlist() 1481 int idx; /* extent record index */ in xfs_bmap_first_unused() local 1502 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) { in xfs_bmap_first_unused() 1503 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, idx); in xfs_bmap_first_unused() 1733 ASSERT(bma->idx >= 0); in xfs_bmap_add_extent_delay_real() 1734 ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec)); in xfs_bmap_add_extent_delay_real() 1748 ep = xfs_iext_get_ext(ifp, bma->idx); in xfs_bmap_add_extent_delay_real() 1770 if (bma->idx > 0) { in xfs_bmap_add_extent_delay_real() [all …]
|
D | xfs_da_format.h | 808 xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name() argument 812 return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; in xfs_attr3_leaf_name() 816 xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_remote() argument 818 return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_remote() 822 xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_local() argument 824 return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_local()
|
/fs/nfs/blocklayout/ |
D | dev.c | 189 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 194 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 196 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_simple() 221 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 223 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_slice() 237 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 239 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_concat() 266 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument 268 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_stripe() 295 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_deviceid() argument [all …]
|
/fs/coda/ |
D | inode.c | 117 int idx; in get_device_index() local 138 idx = iminor(inode); in get_device_index() 141 if (idx < 0 || idx >= MAX_CODADEVS) { in get_device_index() 146 return idx; in get_device_index() 158 int idx; in coda_fill_super() local 163 idx = get_device_index((struct coda_mount_data *) data); in coda_fill_super() 166 if(idx == -1) in coda_fill_super() 167 idx = 0; in coda_fill_super() 169 pr_info("%s: device index: %i\n", __func__, idx); in coda_fill_super() 171 vc = &coda_comms[idx]; in coda_fill_super()
|
/fs/nfs/flexfilelayout/ |
D | flexfilelayout.h | 120 FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_DEVID_NODE() argument 122 if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt || in FF_LAYOUT_DEVID_NODE() 123 FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL || in FF_LAYOUT_DEVID_NODE() 124 FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL) in FF_LAYOUT_DEVID_NODE() 126 return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node; in FF_LAYOUT_DEVID_NODE() 136 FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_COMP() argument 138 if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt) in FF_LAYOUT_COMP() 140 return FF_LAYOUT_LSEG(lseg)->mirror_array[idx]; in FF_LAYOUT_COMP()
|
D | flexfilelayout.c | 765 int idx; in ff_layout_choose_best_ds_for_read() local 768 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { in ff_layout_choose_best_ds_for_read() 769 ds = nfs4_ff_layout_prepare_ds(lseg, idx, false); in ff_layout_choose_best_ds_for_read() 771 *best_idx = idx; in ff_layout_choose_best_ds_for_read() 989 int idx) in ff_layout_async_handle_error_v4() argument 995 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_async_handle_error_v4() 1099 int idx) in ff_layout_async_handle_error_v3() argument 1101 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_async_handle_error_v3() 1136 int idx) in ff_layout_async_handle_error() argument 1142 return ff_layout_async_handle_error_v3(task, lseg, idx); in ff_layout_async_handle_error() [all …]
|
D | flexfilelayoutdev.c | 551 u32 idx; in ff_read_layout_has_available_ds() local 553 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_read_layout_has_available_ds() 554 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_read_layout_has_available_ds() 569 u32 idx; in ff_rw_layout_has_available_ds() local 571 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_rw_layout_has_available_ds() 572 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_rw_layout_has_available_ds()
|
/fs/ntfs/ |
D | logfile.c | 277 u16 nr_clients, idx; in ntfs_check_log_client_array() local 293 idx = le16_to_cpu(ra->client_free_list); in ntfs_check_log_client_array() 296 for (idx_is_first = true; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--, in ntfs_check_log_client_array() 297 idx = le16_to_cpu(cr->next_client)) { in ntfs_check_log_client_array() 298 if (!nr_clients || idx >= le16_to_cpu(ra->log_clients)) in ntfs_check_log_client_array() 301 cr = ca + idx; in ntfs_check_log_client_array() 312 idx = le16_to_cpu(ra->client_in_use_list); in ntfs_check_log_client_array() 388 pgoff_t idx; in ntfs_check_and_load_restart_page() local 397 idx = (pos + size) >> PAGE_CACHE_SHIFT; in ntfs_check_and_load_restart_page() 400 page = ntfs_map_page(vi->i_mapping, idx); in ntfs_check_and_load_restart_page() [all …]
|
/fs/hpfs/ |
D | alloc.c | 488 static unsigned find_run(__le32 *bmp, unsigned *idx) in find_run() argument 491 while (tstbits(bmp, *idx, 1)) { in find_run() 492 (*idx)++; in find_run() 493 if (unlikely(*idx >= 0x4000)) in find_run() 497 while (!tstbits(bmp, *idx + len, 1)) in find_run() 528 unsigned idx, len, start_bmp, end_bmp; in hpfs_trim_fs() local 549 idx = 0; in hpfs_trim_fs() 550 while ((len = find_run(bmp, &idx)) && !err) { in hpfs_trim_fs() 551 err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); in hpfs_trim_fs() 552 idx += len; in hpfs_trim_fs() [all …]
|
/fs/hfs/ |
D | bfind.c | 177 int idx, res = 0; in hfs_brec_goto() local 188 idx = bnode->prev; in hfs_brec_goto() 189 if (!idx) { in hfs_brec_goto() 194 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto() 206 idx = bnode->next; in hfs_brec_goto() 207 if (!idx) { in hfs_brec_goto() 212 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
|
D | btree.c | 187 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument 194 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap() 201 prev->next = idx; in hfs_bmap_new_bmap() 202 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap() 250 u32 nidx, idx; in hfs_bmap_alloc() local 272 idx = 0; in hfs_bmap_alloc() 280 idx += i; in hfs_bmap_alloc() 287 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc() 296 idx += 8; in hfs_bmap_alloc() 303 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
|
/fs/hfsplus/ |
D | btree.c | 311 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument 318 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap() 323 prev->next = idx; in hfs_bmap_new_bmap() 324 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap() 376 u32 nidx, idx; in hfs_bmap_alloc() local 398 idx = 0; in hfs_bmap_alloc() 406 idx += i; in hfs_bmap_alloc() 414 idx); in hfs_bmap_alloc() 423 idx += 8; in hfs_bmap_alloc() 430 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
|
D | bfind.c | 234 int idx, res = 0; in hfs_brec_goto() local 245 idx = bnode->prev; in hfs_brec_goto() 246 if (!idx) { in hfs_brec_goto() 251 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto() 263 idx = bnode->next; in hfs_brec_goto() 264 if (!idx) { in hfs_brec_goto() 269 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
|
/fs/f2fs/ |
D | trace.c | 161 unsigned idx; in f2fs_destroy_trace_ios() local 164 for (idx = 0; idx < found; idx++) in f2fs_destroy_trace_ios() 165 radix_tree_delete(&pids, pid[idx]); in f2fs_destroy_trace_ios()
|
D | node.c | 855 int idx = depth - 2; in truncate_partial_nodes() local 862 for (i = 0; i < idx + 1; i++) { in truncate_partial_nodes() 867 idx = i - 1; in truncate_partial_nodes() 873 ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); in truncate_partial_nodes() 876 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { in truncate_partial_nodes() 877 child_nid = get_nid(pages[idx], i, false); in truncate_partial_nodes() 884 if (set_nid(pages[idx], i, 0, false)) in truncate_partial_nodes() 888 if (offset[idx + 1] == 0) { in truncate_partial_nodes() 889 dn->node_page = pages[idx]; in truncate_partial_nodes() 890 dn->nid = nid[idx]; in truncate_partial_nodes() [all …]
|
/fs/cifs/ |
D | winucase.c | 642 unsigned char idx; in cifs_toupper() local 647 idx = (in & 0xff00) >> 8; in cifs_toupper() 650 tbl = toplevel[idx]; in cifs_toupper() 655 idx = in & 0xff; in cifs_toupper() 658 out = tbl[idx]; in cifs_toupper()
|
/fs/btrfs/ |
D | compression.c | 780 int idx = type - 1; in find_workspace() local 782 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; in find_workspace() 783 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; in find_workspace() 784 atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; in find_workspace() 785 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; in find_workspace() 786 int *num_ws = &btrfs_comp_ws[idx].num_ws; in find_workspace() 810 workspace = btrfs_compress_op[idx]->alloc_workspace(); in find_workspace() 824 int idx = type - 1; in free_workspace() local 825 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; in free_workspace() 826 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; in free_workspace() [all …]
|
/fs/nfsd/ |
D | nfs4layouts.c | 40 static inline u32 devid_hashfn(u64 idx) in devid_hashfn() argument 42 return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK; in devid_hashfn() 77 map->idx = nfsd_devid_seq++; in nfsd4_alloc_devid_map() 78 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]); in nfsd4_alloc_devid_map() 88 nfsd4_find_devid_map(int idx) in nfsd4_find_devid_map() argument 93 list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash) in nfsd4_find_devid_map() 94 if (map->idx == idx) in nfsd4_find_devid_map() 111 id->fsid_idx = fhp->fh_export->ex_devid_map->idx; in nfsd4_set_deviceid()
|
/fs/ocfs2/dlm/ |
D | dlmcommon.h | 405 static inline char *dlm_list_in_text(enum dlm_lockres_list idx) in dlm_list_in_text() argument 407 if (idx == DLM_GRANTED_LIST) in dlm_list_in_text() 409 else if (idx == DLM_CONVERTING_LIST) in dlm_list_in_text() 411 else if (idx == DLM_BLOCKED_LIST) in dlm_list_in_text() 418 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) in dlm_list_idx_to_ptr() argument 421 if (idx == DLM_GRANTED_LIST) in dlm_list_idx_to_ptr() 423 else if (idx == DLM_CONVERTING_LIST) in dlm_list_idx_to_ptr() 425 else if (idx == DLM_BLOCKED_LIST) in dlm_list_idx_to_ptr() 964 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); 965 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); [all …]
|
D | dlmrecovery.c | 2407 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) in __dlm_hb_node_down() argument 2411 if (dlm->reco.new_master == idx) { in __dlm_hb_node_down() 2413 dlm->name, idx); in __dlm_hb_node_down() 2419 "finalize1 state, clearing\n", dlm->name, idx); in __dlm_hb_node_down() 2426 if (dlm->joining_node == idx) { in __dlm_hb_node_down() 2427 mlog(0, "Clearing join state for node %u\n", idx); in __dlm_hb_node_down() 2432 if (!test_bit(idx, dlm->live_nodes_map)) { in __dlm_hb_node_down() 2435 dlm->name, idx); in __dlm_hb_node_down() 2440 if (!test_bit(idx, dlm->domain_map)) { in __dlm_hb_node_down() 2443 mlog(0, "node %u already removed from domain!\n", idx); in __dlm_hb_node_down() [all …]
|