Home
last modified time | relevance | path

Searched refs:idx (Results 1 – 25 of 47) sorted by relevance

12

/fs/ubifs/
Dcommit.c456 struct ubifs_idx_node idx __attribute__((aligned(8))); member
471 struct ubifs_idx_node *idx; in dbg_old_index_check_init() local
480 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in dbg_old_index_check_init()
481 if (!idx) in dbg_old_index_check_init()
484 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_old_index_check_init()
488 d->old_zroot_level = le16_to_cpu(idx->level); in dbg_old_index_check_init()
489 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); in dbg_old_index_check_init()
491 kfree(idx); in dbg_old_index_check_init()
515 struct ubifs_idx_node *idx; in dbg_check_old_index() local
551 idx = &i->idx; in dbg_check_old_index()
[all …]
Dtnc_misc.c277 struct ubifs_idx_node *idx; in read_znode() local
279 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in read_znode()
280 if (!idx) in read_znode()
283 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in read_znode()
285 kfree(idx); in read_znode()
289 znode->child_cnt = le16_to_cpu(idx->child_cnt); in read_znode()
290 znode->level = le16_to_cpu(idx->level); in read_znode()
305 const struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in read_znode()
385 kfree(idx); in read_znode()
390 dbg_dump_node(c, idx); in read_znode()
[all …]
Dtnc_commit.c36 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, in make_idx_node() argument
43 idx->ch.node_type = UBIFS_IDX_NODE; in make_idx_node()
44 idx->child_cnt = cpu_to_le16(znode->child_cnt); in make_idx_node()
45 idx->level = cpu_to_le16(znode->level); in make_idx_node()
47 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in make_idx_node()
61 ubifs_prepare_node(c, idx, len, 0); in make_idx_node()
254 struct ubifs_idx_node *idx; in layout_leb_in_gaps() local
258 idx = snod->node; in layout_leb_in_gaps()
259 key_read(c, ubifs_idx_key(c, idx), &snod->key); in layout_leb_in_gaps()
260 level = le16_to_cpu(idx->level); in layout_leb_in_gaps()
[all …]
Dmisc.h268 const struct ubifs_idx_node *idx, in ubifs_idx_branch() argument
271 return (struct ubifs_branch *)((void *)idx->branches + in ubifs_idx_branch()
281 const struct ubifs_idx_node *idx) in ubifs_idx_key() argument
283 return (void *)((struct ubifs_branch *)idx->branches)->key; in ubifs_idx_key()
Dsb.c76 struct ubifs_idx_node *idx; in create_default_filesystem() local
269 idx = kzalloc(ALIGN(tmp, c->min_io_size), GFP_KERNEL); in create_default_filesystem()
270 if (!idx) in create_default_filesystem()
276 idx->ch.node_type = UBIFS_IDX_NODE; in create_default_filesystem()
277 idx->child_cnt = cpu_to_le16(1); in create_default_filesystem()
279 br = ubifs_idx_branch(c, idx, 0); in create_default_filesystem()
283 err = ubifs_write_node(c, idx, tmp, main_first + DEFAULT_IDX_LEB, 0, in create_default_filesystem()
285 kfree(idx); in create_default_filesystem()
Ddebug.c520 const struct ubifs_idx_node *idx = node; in dbg_dump_node() local
522 n = le16_to_cpu(idx->child_cnt); in dbg_dump_node()
525 (int)le16_to_cpu(idx->level)); in dbg_dump_node()
531 br = ubifs_idx_branch(c, idx, i); in dbg_dump_node()
1461 int idx; in dbg_walk_index() local
1475 for (idx = 0; idx < znode->child_cnt; idx++) { in dbg_walk_index()
1476 zbr = &znode->zbranch[idx]; in dbg_walk_index()
1491 idx = znode->iip + 1; in dbg_walk_index()
1493 if (idx < znode->child_cnt) { in dbg_walk_index()
1495 zbr = &znode->zbranch[idx]; in dbg_walk_index()
[all …]
/fs/xfs/
Dxfs_bmap.c100 xfs_extnum_t idx, /* extent number to update/insert */
117 xfs_extnum_t idx, /* extent number to update/insert */
134 xfs_extnum_t idx, /* extent number to update/insert */
147 xfs_extnum_t idx, /* extent number to update/insert */
161 xfs_extnum_t idx, /* extent number to update/insert */
197 xfs_extnum_t idx, /* extent number to update/insert */
284 xfs_extnum_t idx, /* index of entry(entries) deleted */
297 xfs_extnum_t idx, /* index of entry(entries) inserted */
311 xfs_extnum_t idx, /* index of entry updated */
322 xfs_extnum_t idx, /* index of entry to be updated */
[all …]
Dxfs_attr_leaf.h155 xfs_attr_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr_leaf_name_remote() argument
158 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; in xfs_attr_leaf_name_remote()
162 xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr_leaf_name_local() argument
165 &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; in xfs_attr_leaf_name_local()
168 static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr_leaf_name() argument
170 return &((char *)leafp)[be16_to_cpu(leafp->entries[idx].nameidx)]; in xfs_attr_leaf_name()
Dxfs_inode.c3267 xfs_extnum_t idx) /* index of target extent */ in xfs_iext_get_ext() argument
3269 ASSERT(idx >= 0); in xfs_iext_get_ext()
3270 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { in xfs_iext_get_ext()
3275 xfs_extnum_t page_idx = idx; /* ext index in target list */ in xfs_iext_get_ext()
3280 return &ifp->if_u1.if_extents[idx]; in xfs_iext_get_ext()
3293 xfs_extnum_t idx, /* starting index of new items */ in xfs_iext_insert() argument
3300 xfs_iext_add(ifp, idx, count); in xfs_iext_insert()
3301 for (i = idx; i < idx + count; i++, new++) in xfs_iext_insert()
3320 xfs_extnum_t idx, /* index to begin adding exts */ in xfs_iext_add() argument
3328 ASSERT((idx >= 0) && (idx <= nextents)); in xfs_iext_add()
[all …]
Dxfs_trans_item.c452 xfs_trans_add_busy(xfs_trans_t *tp, xfs_agnumber_t ag, xfs_extlen_t idx) in xfs_trans_add_busy() argument
491 lbsp->lbc_idx = idx; in xfs_trans_add_busy()
525 lbsp->lbc_idx = idx; in xfs_trans_add_busy()
/fs/coda/
Dinode.c105 int idx; in get_device_index() local
131 idx = iminor(inode); in get_device_index()
134 if(idx < 0 || idx >= MAX_CODADEVS) { in get_device_index()
139 return idx; in get_device_index()
148 int idx; in coda_fill_super() local
150 idx = get_device_index((struct coda_mount_data *) data); in coda_fill_super()
153 if(idx == -1) in coda_fill_super()
154 idx = 0; in coda_fill_super()
156 printk(KERN_INFO "coda_read_super: device index: %i\n", idx); in coda_fill_super()
158 vc = &coda_comms[idx]; in coda_fill_super()
Dpsdev.c273 int idx, err; in coda_psdev_open() local
275 idx = iminor(inode); in coda_psdev_open()
276 if (idx < 0 || idx >= MAX_CODADEVS) in coda_psdev_open()
282 vcp = &coda_comms[idx]; in coda_psdev_open()
/fs/ntfs/
Dlogfile.c276 u16 nr_clients, idx; in ntfs_check_log_client_array() local
292 idx = le16_to_cpu(ra->client_free_list); in ntfs_check_log_client_array()
295 for (idx_is_first = true; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--, in ntfs_check_log_client_array()
296 idx = le16_to_cpu(cr->next_client)) { in ntfs_check_log_client_array()
297 if (!nr_clients || idx >= le16_to_cpu(ra->log_clients)) in ntfs_check_log_client_array()
300 cr = ca + idx; in ntfs_check_log_client_array()
311 idx = le16_to_cpu(ra->client_in_use_list); in ntfs_check_log_client_array()
387 pgoff_t idx; in ntfs_check_and_load_restart_page() local
396 idx = (pos + size) >> PAGE_CACHE_SHIFT; in ntfs_check_and_load_restart_page()
399 page = ntfs_map_page(vi->i_mapping, idx); in ntfs_check_and_load_restart_page()
[all …]
Dattrib.c2499 pgoff_t idx, end; in ntfs_attr_set() local
2516 idx = ofs >> PAGE_CACHE_SHIFT; in ntfs_attr_set()
2529 page = read_mapping_page(mapping, idx, NULL); in ntfs_attr_set()
2532 "page (error, index 0x%lx).", idx); in ntfs_attr_set()
2540 if (idx == end) in ntfs_attr_set()
2550 if (idx == end) in ntfs_attr_set()
2552 idx++; in ntfs_attr_set()
2555 for (; idx < end; idx++) { in ntfs_attr_set()
2557 page = grab_cache_page(mapping, idx); in ntfs_attr_set()
2560 "page (index 0x%lx).", idx); in ntfs_attr_set()
[all …]
/fs/hfsplus/
Dbfind.c154 int idx, res = 0; in hfs_brec_goto() local
165 idx = bnode->prev; in hfs_brec_goto()
166 if (!idx) { in hfs_brec_goto()
171 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
183 idx = bnode->next; in hfs_brec_goto()
184 if (!idx) { in hfs_brec_goto()
189 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
Dbtree.c149 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument
156 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap()
161 prev->next = idx; in hfs_bmap_new_bmap()
162 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap()
186 u32 nidx, idx; in hfs_bmap_alloc() local
223 idx = 0; in hfs_bmap_alloc()
231 idx += i; in hfs_bmap_alloc()
238 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc()
247 idx += 8; in hfs_bmap_alloc()
254 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
/fs/hfs/
Dbtree.c171 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument
178 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap()
185 prev->next = idx; in hfs_bmap_new_bmap()
186 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap()
210 u32 nidx, idx; in hfs_bmap_alloc() local
247 idx = 0; in hfs_bmap_alloc()
255 idx += i; in hfs_bmap_alloc()
262 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc()
271 idx += 8; in hfs_bmap_alloc()
278 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
Dbfind.c163 int idx, res = 0; in hfs_brec_goto() local
174 idx = bnode->prev; in hfs_brec_goto()
175 if (!idx) { in hfs_brec_goto()
180 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
192 idx = bnode->next; in hfs_brec_goto()
193 if (!idx) { in hfs_brec_goto()
198 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
/fs/
Dbio.c154 unsigned int bvec_nr_vecs(unsigned short idx) in bvec_nr_vecs() argument
156 return bvec_slabs[idx].nr_vecs; in bvec_nr_vecs()
159 void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) in bvec_free_bs() argument
161 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); in bvec_free_bs()
163 if (idx == BIOVEC_MAX_IDX) in bvec_free_bs()
166 struct biovec_slab *bvs = bvec_slabs + idx; in bvec_free_bs()
172 struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc_bs() argument
190 *idx = 0; in bvec_alloc_bs()
193 *idx = 1; in bvec_alloc_bs()
196 *idx = 2; in bvec_alloc_bs()
[all …]
/fs/smbfs/
Dcache.c171 if (ctl.idx >= SMB_DIRCACHE_SIZE) { in smb_fill_cache()
179 ctl.idx -= SMB_DIRCACHE_SIZE; in smb_fill_cache()
186 ctl.cache->dentry[ctl.idx] = newdent; in smb_fill_cache()
205 ctl.idx += 1; in smb_fill_cache()
Ddir.c153 ctl.idx = ctl.fpos % SMB_DIRCACHE_SIZE; in smb_readdir()
164 while (ctl.idx < SMB_DIRCACHE_SIZE) { in smb_readdir()
168 dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx], in smb_readdir()
180 ctl.idx += 1; in smb_readdir()
191 ctl.idx = 0; in smb_readdir()
208 ctl.idx = SMB_DIRCACHE_START; in smb_readdir()
215 if (ctl.idx == -1) in smb_readdir()
/fs/adfs/
Ddir_f.c70 #define dir_u8(idx) \ argument
71 ({ int _buf = idx >> blocksize_bits; \
72 int _off = idx - (_buf << blocksize_bits);\
76 #define dir_u32(idx) \ argument
77 ({ int _buf = idx >> blocksize_bits; \
78 int _off = idx - (_buf << blocksize_bits);\
/fs/ocfs2/dlm/
Ddlmrecovery.c2292 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) in __dlm_hb_node_down() argument
2296 if (dlm->reco.new_master == idx) { in __dlm_hb_node_down()
2298 dlm->name, idx); in __dlm_hb_node_down()
2304 "finalize1 state, clearing\n", dlm->name, idx); in __dlm_hb_node_down()
2311 if (dlm->joining_node == idx) { in __dlm_hb_node_down()
2312 mlog(0, "Clearing join state for node %u\n", idx); in __dlm_hb_node_down()
2317 if (!test_bit(idx, dlm->live_nodes_map)) { in __dlm_hb_node_down()
2320 dlm->name, idx); in __dlm_hb_node_down()
2325 if (!test_bit(idx, dlm->domain_map)) { in __dlm_hb_node_down()
2328 mlog(0, "node %u already removed from domain!\n", idx); in __dlm_hb_node_down()
[all …]
Ddlmcommon.h401 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) in dlm_list_idx_to_ptr() argument
404 if (idx == DLM_GRANTED_LIST) in dlm_list_idx_to_ptr()
406 else if (idx == DLM_CONVERTING_LIST) in dlm_list_idx_to_ptr()
408 else if (idx == DLM_BLOCKED_LIST) in dlm_list_idx_to_ptr()
948 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
949 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
1021 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
/fs/ncpfs/
Ddir.c464 ctl.idx = ctl.fpos % NCP_DIRCACHE_SIZE; in ncp_readdir()
475 while (ctl.idx < NCP_DIRCACHE_SIZE) { in ncp_readdir()
479 dent = ncp_dget_fpos(ctl.cache->dentry[ctl.idx], in ncp_readdir()
490 ctl.idx += 1; in ncp_readdir()
501 ctl.idx = 0; in ncp_readdir()
523 ctl.idx = NCP_DIRCACHE_START; in ncp_readdir()
610 if (ctl.idx >= NCP_DIRCACHE_SIZE) { in ncp_fill_cache()
618 ctl.idx -= NCP_DIRCACHE_SIZE; in ncp_fill_cache()
625 ctl.cache->dentry[ctl.idx] = newdent; in ncp_fill_cache()
643 ctl.idx += 1; in ncp_fill_cache()

12