Home
last modified time | relevance | path

Searched refs:idx (Results 1 – 25 of 81) sorted by relevance

1234

/fs/ubifs/
Dcommit.c501 struct ubifs_idx_node idx __aligned(8);
516 struct ubifs_idx_node *idx; in dbg_old_index_check_init() local
525 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in dbg_old_index_check_init()
526 if (!idx) in dbg_old_index_check_init()
529 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_old_index_check_init()
533 d->old_zroot_level = le16_to_cpu(idx->level); in dbg_old_index_check_init()
534 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); in dbg_old_index_check_init()
536 kfree(idx); in dbg_old_index_check_init()
560 struct ubifs_idx_node *idx; in dbg_check_old_index() local
596 idx = &i->idx; in dbg_check_old_index()
[all …]
Dtnc_misc.c272 struct ubifs_idx_node *idx; in read_znode() local
274 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in read_znode()
275 if (!idx) in read_znode()
278 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in read_znode()
280 kfree(idx); in read_znode()
284 err = ubifs_node_check_hash(c, idx, zzbr->hash); in read_znode()
286 ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs); in read_znode()
287 kfree(idx); in read_znode()
291 znode->child_cnt = le16_to_cpu(idx->child_cnt); in read_znode()
292 znode->level = le16_to_cpu(idx->level); in read_znode()
[all …]
Dtnc_commit.c25 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, in make_idx_node() argument
33 idx->ch.node_type = UBIFS_IDX_NODE; in make_idx_node()
34 idx->child_cnt = cpu_to_le16(znode->child_cnt); in make_idx_node()
35 idx->level = cpu_to_le16(znode->level); in make_idx_node()
37 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in make_idx_node()
54 ubifs_prepare_node(c, idx, len, 0); in make_idx_node()
55 ubifs_node_calc_hash(c, idx, hash); in make_idx_node()
252 struct ubifs_idx_node *idx; in layout_leb_in_gaps() local
256 idx = snod->node; in layout_leb_in_gaps()
257 key_read(c, ubifs_idx_key(c, idx), &snod->key); in layout_leb_in_gaps()
[all …]
Dmisc.h200 const struct ubifs_idx_node *idx, in ubifs_idx_branch() argument
203 return (struct ubifs_branch *)((void *)idx->branches + in ubifs_idx_branch()
213 const struct ubifs_idx_node *idx) in ubifs_idx_key() argument
215 return (void *)((struct ubifs_branch *)idx->branches)->key; in ubifs_idx_key()
/fs/coda/
Dinode.c112 int idx; in get_device_index() local
133 idx = iminor(inode); in get_device_index()
136 if (idx < 0 || idx >= MAX_CODADEVS) { in get_device_index()
141 return idx; in get_device_index()
153 int idx; in coda_fill_super() local
158 idx = get_device_index((struct coda_mount_data *) data); in coda_fill_super()
161 if(idx == -1) in coda_fill_super()
162 idx = 0; in coda_fill_super()
164 pr_info("%s: device index: %i\n", __func__, idx); in coda_fill_super()
166 vc = &coda_comms[idx]; in coda_fill_super()
/fs/udf/
Dunicode.c158 int idx, len; in udf_name_from_CS0() local
203 for (idx = ocu_len - u_ch, ext_i_len = 0; in udf_name_from_CS0()
204 (idx >= 0) && (ext_i_len < EXT_SIZE); in udf_name_from_CS0()
205 idx -= u_ch, ext_i_len++) { in udf_name_from_CS0()
206 c = ocu[idx]; in udf_name_from_CS0()
208 c = (c << 8) | ocu[idx + 1]; in udf_name_from_CS0()
212 i_ext = idx; in udf_name_from_CS0()
220 idx = i_ext + u_ch; in udf_name_from_CS0()
222 ocu, ocu_len, &idx, in udf_name_from_CS0()
231 idx = 0; in udf_name_from_CS0()
[all …]
/fs/nfs/blocklayout/
Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
238 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_simple()
353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
355 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_scsi()
402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
404 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_slice()
418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
420 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_concat()
447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
[all …]
/fs/ntfs/
Dlogfile.c264 u16 nr_clients, idx; in ntfs_check_log_client_array() local
280 idx = le16_to_cpu(ra->client_free_list); in ntfs_check_log_client_array()
283 for (idx_is_first = true; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--, in ntfs_check_log_client_array()
284 idx = le16_to_cpu(cr->next_client)) { in ntfs_check_log_client_array()
285 if (!nr_clients || idx >= le16_to_cpu(ra->log_clients)) in ntfs_check_log_client_array()
288 cr = ca + idx; in ntfs_check_log_client_array()
299 idx = le16_to_cpu(ra->client_in_use_list); in ntfs_check_log_client_array()
375 pgoff_t idx; in ntfs_check_and_load_restart_page() local
384 idx = (pos + size) >> PAGE_SHIFT; in ntfs_check_and_load_restart_page()
387 page = ntfs_map_page(vi->i_mapping, idx); in ntfs_check_and_load_restart_page()
[all …]
/fs/overlayfs/
Dexport.c94 return oe->lowerstack[0].layer->idx; in ovl_connectable_layer()
115 origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx; in ovl_connect_layer()
346 static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx) in ovl_dentry_real_at() argument
351 if (!idx) in ovl_dentry_real_at()
355 if (oe->lowerstack[i].layer->idx == idx) in ovl_dentry_real_at()
387 if (ovl_dentry_real_at(connected, layer->idx) != parent) in ovl_lookup_real_one()
405 } else if (ovl_dentry_real_at(this, layer->idx) != real) { in ovl_lookup_real_one()
419 real, layer->idx, connected, err); in ovl_lookup_real_one()
445 inode = ovl_lookup_inode(sb, real, !layer->idx); in ovl_lookup_real_inode()
457 if (!this && layer->idx && ofs->indexdir && !WARN_ON(!d_is_dir(real))) { in ovl_lookup_real_inode()
[all …]
Dnamei.c776 int ovl_path_next(int idx, struct dentry *dentry, struct path *path) in ovl_path_next() argument
780 BUG_ON(idx < 0); in ovl_path_next()
781 if (idx == 0) { in ovl_path_next()
785 idx++; in ovl_path_next()
787 BUG_ON(idx > oe->numlower); in ovl_path_next()
788 path->dentry = oe->lowerstack[idx - 1].dentry; in ovl_path_next()
789 path->mnt = oe->lowerstack[idx - 1].layer->mnt; in ovl_path_next()
791 return (idx < oe->numlower) ? idx + 1 : -1; in ovl_path_next()
908 d.last = lower.layer->idx == roe->numlower; in ovl_lookup()
989 i = lower.layer->idx - 1; in ovl_lookup()
/fs/hpfs/
Dalloc.c489 static unsigned find_run(__le32 *bmp, unsigned *idx) in find_run() argument
492 while (tstbits(bmp, *idx, 1)) { in find_run()
493 (*idx)++; in find_run()
494 if (unlikely(*idx >= 0x4000)) in find_run()
498 while (!tstbits(bmp, *idx + len, 1)) in find_run()
529 unsigned idx, len, start_bmp, end_bmp; in hpfs_trim_fs() local
550 idx = 0; in hpfs_trim_fs()
551 while ((len = find_run(bmp, &idx)) && !err) { in hpfs_trim_fs()
552 err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); in hpfs_trim_fs()
553 idx += len; in hpfs_trim_fs()
[all …]
/fs/hfsplus/
Dbtree.c312 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument
319 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap()
324 prev->next = idx; in hfs_bmap_new_bmap()
325 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap()
377 u32 nidx, idx; in hfs_bmap_alloc() local
399 idx = 0; in hfs_bmap_alloc()
407 idx += i; in hfs_bmap_alloc()
415 idx); in hfs_bmap_alloc()
424 idx += 8; in hfs_bmap_alloc()
431 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
Dbfind.c235 int idx, res = 0; in hfs_brec_goto() local
246 idx = bnode->prev; in hfs_brec_goto()
247 if (!idx) { in hfs_brec_goto()
252 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
264 idx = bnode->next; in hfs_brec_goto()
265 if (!idx) { in hfs_brec_goto()
270 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
/fs/hfs/
Dbtree.c188 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument
195 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap()
202 prev->next = idx; in hfs_bmap_new_bmap()
203 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap()
251 u32 nidx, idx; in hfs_bmap_alloc() local
273 idx = 0; in hfs_bmap_alloc()
281 idx += i; in hfs_bmap_alloc()
288 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc()
297 idx += 8; in hfs_bmap_alloc()
304 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
Dbfind.c166 int idx, res = 0; in hfs_brec_goto() local
177 idx = bnode->prev; in hfs_brec_goto()
178 if (!idx) { in hfs_brec_goto()
183 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
195 idx = bnode->next; in hfs_brec_goto()
196 if (!idx) { in hfs_brec_goto()
201 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
/fs/erofs/
Dxattr.h47 static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) in erofs_xattr_handler() argument
62 return idx && idx < ARRAY_SIZE(xattr_handler_map) ? in erofs_xattr_handler()
63 xattr_handler_map[idx] : NULL; in erofs_xattr_handler()
/fs/f2fs/
Dnode.c979 int idx = depth - 2; in truncate_partial_nodes() local
986 for (i = 0; i < idx + 1; i++) { in truncate_partial_nodes()
991 idx = i - 1; in truncate_partial_nodes()
997 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); in truncate_partial_nodes()
1000 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { in truncate_partial_nodes()
1001 child_nid = get_nid(pages[idx], i, false); in truncate_partial_nodes()
1008 if (set_nid(pages[idx], i, 0, false)) in truncate_partial_nodes()
1012 if (offset[idx + 1] == 0) { in truncate_partial_nodes()
1013 dn->node_page = pages[idx]; in truncate_partial_nodes()
1014 dn->nid = nid[idx]; in truncate_partial_nodes()
[all …]
Dtrace.c158 unsigned idx; in f2fs_destroy_trace_ios() local
161 for (idx = 0; idx < found; idx++) in f2fs_destroy_trace_ios()
162 radix_tree_delete(&pids, pid[idx]); in f2fs_destroy_trace_ios()
/fs/nfs/flexfilelayout/
Dflexfilelayout.h142 FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_COMP() argument
146 if (idx < fls->mirror_array_cnt) in FF_LAYOUT_COMP()
147 return fls->mirror_array[idx]; in FF_LAYOUT_COMP()
152 FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_DEVID_NODE() argument
154 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx); in FF_LAYOUT_DEVID_NODE()
Dflexfilelayoutdev.c554 u32 idx; in ff_read_layout_has_available_ds() local
556 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_read_layout_has_available_ds()
557 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_read_layout_has_available_ds()
576 u32 idx; in ff_rw_layout_has_available_ds() local
578 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_rw_layout_has_available_ds()
579 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_rw_layout_has_available_ds()
Dflexfilelayout.c796 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) in ff_layout_mark_ds_unreachable() argument
798 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_mark_ds_unreachable()
805 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx) in ff_layout_mark_ds_reachable() argument
807 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_mark_ds_reachable()
822 int idx; in ff_layout_choose_ds_for_read() local
825 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { in ff_layout_choose_ds_for_read()
826 if (idx+1 == fls->mirror_array_cnt) in ff_layout_choose_ds_for_read()
829 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_layout_choose_ds_for_read()
838 *best_idx = idx; in ff_layout_choose_ds_for_read()
1132 int idx) in ff_layout_async_handle_error_v4() argument
[all …]
/fs/cifs/
Dwinucase.c629 unsigned char idx; in cifs_toupper() local
634 idx = (in & 0xff00) >> 8; in cifs_toupper()
637 tbl = toplevel[idx]; in cifs_toupper()
642 idx = in & 0xff; in cifs_toupper()
645 out = tbl[idx]; in cifs_toupper()
/fs/ocfs2/dlm/
Ddlmcommon.h380 static inline char *dlm_list_in_text(enum dlm_lockres_list idx) in dlm_list_in_text() argument
382 if (idx == DLM_GRANTED_LIST) in dlm_list_in_text()
384 else if (idx == DLM_CONVERTING_LIST) in dlm_list_in_text()
386 else if (idx == DLM_BLOCKED_LIST) in dlm_list_in_text()
393 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) in dlm_list_idx_to_ptr() argument
396 if (idx == DLM_GRANTED_LIST) in dlm_list_idx_to_ptr()
398 else if (idx == DLM_CONVERTING_LIST) in dlm_list_idx_to_ptr()
400 else if (idx == DLM_BLOCKED_LIST) in dlm_list_idx_to_ptr()
952 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
953 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
[all …]
/fs/nfsd/
Dnfs4layouts.c50 static inline u32 devid_hashfn(u64 idx) in devid_hashfn() argument
52 return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK; in devid_hashfn()
87 map->idx = nfsd_devid_seq++; in nfsd4_alloc_devid_map()
88 list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]); in nfsd4_alloc_devid_map()
98 nfsd4_find_devid_map(int idx) in nfsd4_find_devid_map() argument
103 list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash) in nfsd4_find_devid_map()
104 if (map->idx == idx) in nfsd4_find_devid_map()
121 id->fsid_idx = fhp->fh_export->ex_devid_map->idx; in nfsd4_set_deviceid()
/fs/xfs/libxfs/
Dxfs_da_format.h794 xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name() argument
798 return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; in xfs_attr3_leaf_name()
802 xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_remote() argument
804 return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_remote()
808 xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_local() argument
810 return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_local()

1234