/fs/ubifs/ |
D | commit.c | 501 struct ubifs_idx_node idx __aligned(8); 516 struct ubifs_idx_node *idx; in dbg_old_index_check_init() local 525 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in dbg_old_index_check_init() 526 if (!idx) in dbg_old_index_check_init() 529 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in dbg_old_index_check_init() 533 d->old_zroot_level = le16_to_cpu(idx->level); in dbg_old_index_check_init() 534 d->old_zroot_sqnum = le64_to_cpu(idx->ch.sqnum); in dbg_old_index_check_init() 536 kfree(idx); in dbg_old_index_check_init() 560 struct ubifs_idx_node *idx; in dbg_check_old_index() local 596 idx = &i->idx; in dbg_check_old_index() [all …]
|
D | tnc_misc.c | 272 struct ubifs_idx_node *idx; in read_znode() local 274 idx = kmalloc(c->max_idx_node_sz, GFP_NOFS); in read_znode() 275 if (!idx) in read_znode() 278 err = ubifs_read_node(c, idx, UBIFS_IDX_NODE, len, lnum, offs); in read_znode() 280 kfree(idx); in read_znode() 284 err = ubifs_node_check_hash(c, idx, zzbr->hash); in read_znode() 286 ubifs_bad_hash(c, idx, zzbr->hash, lnum, offs); in read_znode() 287 kfree(idx); in read_znode() 291 znode->child_cnt = le16_to_cpu(idx->child_cnt); in read_znode() 292 znode->level = le16_to_cpu(idx->level); in read_znode() [all …]
|
D | tnc_commit.c | 25 static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx, in make_idx_node() argument 33 idx->ch.node_type = UBIFS_IDX_NODE; in make_idx_node() 34 idx->child_cnt = cpu_to_le16(znode->child_cnt); in make_idx_node() 35 idx->level = cpu_to_le16(znode->level); in make_idx_node() 37 struct ubifs_branch *br = ubifs_idx_branch(c, idx, i); in make_idx_node() 54 ubifs_prepare_node(c, idx, len, 0); in make_idx_node() 55 ubifs_node_calc_hash(c, idx, hash); in make_idx_node() 252 struct ubifs_idx_node *idx; in layout_leb_in_gaps() local 256 idx = snod->node; in layout_leb_in_gaps() 257 key_read(c, ubifs_idx_key(c, idx), &snod->key); in layout_leb_in_gaps() [all …]
|
D | misc.h | 200 const struct ubifs_idx_node *idx, in ubifs_idx_branch() argument 203 return (struct ubifs_branch *)((void *)idx->branches + in ubifs_idx_branch() 213 const struct ubifs_idx_node *idx) in ubifs_idx_key() argument 215 return (void *)((struct ubifs_branch *)idx->branches)->key; in ubifs_idx_key()
|
/fs/f2fs/ |
D | iostat.c | 90 int io, idx = 0; in __record_iostat_latency() local 97 for (idx = 0; idx < MAX_IO_TYPE; idx++) { in __record_iostat_latency() 99 cnt = io_lat->bio_cnt[idx][io]; in __record_iostat_latency() 100 iostat_lat[idx][io].peak_lat = in __record_iostat_latency() 101 jiffies_to_msecs(io_lat->peak_lat[idx][io]); in __record_iostat_latency() 102 iostat_lat[idx][io].cnt = cnt; in __record_iostat_latency() 103 iostat_lat[idx][io].avg_lat = cnt ? in __record_iostat_latency() 104 jiffies_to_msecs(io_lat->sum_lat[idx][io]) / cnt : 0; in __record_iostat_latency() 105 io_lat->sum_lat[idx][io] = 0; in __record_iostat_latency() 106 io_lat->peak_lat[idx][io] = 0; in __record_iostat_latency() [all …]
|
D | node.c | 1038 int idx = depth - 2; in truncate_partial_nodes() local 1045 for (i = 0; i < idx + 1; i++) { in truncate_partial_nodes() 1050 idx = i - 1; in truncate_partial_nodes() 1056 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); in truncate_partial_nodes() 1059 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { in truncate_partial_nodes() 1060 child_nid = get_nid(pages[idx], i, false); in truncate_partial_nodes() 1067 if (set_nid(pages[idx], i, 0, false)) in truncate_partial_nodes() 1071 if (offset[idx + 1] == 0) { in truncate_partial_nodes() 1072 dn->node_page = pages[idx]; in truncate_partial_nodes() 1073 dn->nid = nid[idx]; in truncate_partial_nodes() [all …]
|
/fs/btrfs/ |
D | struct-funcs.c | 60 const unsigned long idx = member_offset >> PAGE_SHIFT; \ 73 token->kaddr = page_address(token->eb->pages[idx]); \ 74 token->offset = idx << PAGE_SHIFT; \ 79 token->kaddr = page_address(token->eb->pages[idx + 1]); \ 80 token->offset = (idx + 1) << PAGE_SHIFT; \ 89 const unsigned long idx = member_offset >> PAGE_SHIFT; \ 90 char *kaddr = page_address(eb->pages[idx]); \ 100 kaddr = page_address(eb->pages[idx + 1]); \ 109 const unsigned long idx = member_offset >> PAGE_SHIFT; \ 123 token->kaddr = page_address(token->eb->pages[idx]); \ [all …]
|
/fs/coda/ |
D | inode.c | 112 int idx; in get_device_index() local 133 idx = iminor(inode); in get_device_index() 136 if (idx < 0 || idx >= MAX_CODADEVS) { in get_device_index() 141 return idx; in get_device_index() 153 int idx; in coda_fill_super() local 158 idx = get_device_index((struct coda_mount_data *) data); in coda_fill_super() 161 if(idx == -1) in coda_fill_super() 162 idx = 0; in coda_fill_super() 164 pr_info("%s: device index: %i\n", __func__, idx); in coda_fill_super() 166 vc = &coda_comms[idx]; in coda_fill_super()
|
/fs/nfs/blocklayout/ |
D | dev.c | 231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask); 236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument 238 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_simple() 353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument 355 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_scsi() 402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument 404 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_slice() 418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument 420 struct pnfs_block_volume *v = &volumes[idx]; in bl_parse_concat() 447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument [all …]
|
/fs/udf/ |
D | unicode.c | 158 int idx, len; in udf_name_from_CS0() local 203 for (idx = ocu_len - u_ch, ext_i_len = 0; in udf_name_from_CS0() 204 (idx >= 0) && (ext_i_len < EXT_SIZE); in udf_name_from_CS0() 205 idx -= u_ch, ext_i_len++) { in udf_name_from_CS0() 206 c = ocu[idx]; in udf_name_from_CS0() 208 c = (c << 8) | ocu[idx + 1]; in udf_name_from_CS0() 212 i_ext = idx; in udf_name_from_CS0() 220 idx = i_ext + u_ch; in udf_name_from_CS0() 222 ocu, ocu_len, &idx, in udf_name_from_CS0() 231 idx = 0; in udf_name_from_CS0() [all …]
|
/fs/ntfs/ |
D | logfile.c | 264 u16 nr_clients, idx; in ntfs_check_log_client_array() local 280 idx = le16_to_cpu(ra->client_free_list); in ntfs_check_log_client_array() 283 for (idx_is_first = true; idx != LOGFILE_NO_CLIENT_CPU; nr_clients--, in ntfs_check_log_client_array() 284 idx = le16_to_cpu(cr->next_client)) { in ntfs_check_log_client_array() 285 if (!nr_clients || idx >= le16_to_cpu(ra->log_clients)) in ntfs_check_log_client_array() 288 cr = ca + idx; in ntfs_check_log_client_array() 299 idx = le16_to_cpu(ra->client_in_use_list); in ntfs_check_log_client_array() 375 pgoff_t idx; in ntfs_check_and_load_restart_page() local 384 idx = (pos + size) >> PAGE_SHIFT; in ntfs_check_and_load_restart_page() 387 page = ntfs_map_page(vi->i_mapping, idx); in ntfs_check_and_load_restart_page() [all …]
|
/fs/hpfs/ |
D | alloc.c | 489 static unsigned find_run(__le32 *bmp, unsigned *idx) in find_run() argument 492 while (tstbits(bmp, *idx, 1)) { in find_run() 493 (*idx)++; in find_run() 494 if (unlikely(*idx >= 0x4000)) in find_run() 498 while (!tstbits(bmp, *idx + len, 1)) in find_run() 529 unsigned idx, len, start_bmp, end_bmp; in hpfs_trim_fs() local 550 idx = 0; in hpfs_trim_fs() 551 while ((len = find_run(bmp, &idx)) && !err) { in hpfs_trim_fs() 552 err = do_trim(s, sbi->sb_dirband_start + idx * 4, len * 4, start, end, minlen, result); in hpfs_trim_fs() 553 idx += len; in hpfs_trim_fs() [all …]
|
/fs/overlayfs/ |
D | export.c | 94 return oe->lowerstack[0].layer->idx; in ovl_connectable_layer() 115 origin_layer = OVL_E(dentry)->lowerstack[0].layer->idx; in ovl_connect_layer() 340 static struct dentry *ovl_dentry_real_at(struct dentry *dentry, int idx) in ovl_dentry_real_at() argument 345 if (!idx) in ovl_dentry_real_at() 349 if (oe->lowerstack[i].layer->idx == idx) in ovl_dentry_real_at() 381 if (ovl_dentry_real_at(connected, layer->idx) != parent) in ovl_lookup_real_one() 400 } else if (ovl_dentry_real_at(this, layer->idx) != real) { in ovl_lookup_real_one() 413 real, layer->idx, connected, err); in ovl_lookup_real_one() 438 inode = ovl_lookup_inode(sb, real, !layer->idx); in ovl_lookup_real_inode() 450 if (!this && layer->idx && ofs->indexdir && !WARN_ON(!d_is_dir(real))) { in ovl_lookup_real_inode() [all …]
|
/fs/hfsplus/ |
D | btree.c | 312 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument 319 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap() 324 prev->next = idx; in hfs_bmap_new_bmap() 325 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap() 377 u32 nidx, idx; in hfs_bmap_alloc() local 399 idx = 0; in hfs_bmap_alloc() 407 idx += i; in hfs_bmap_alloc() 415 idx); in hfs_bmap_alloc() 424 idx += 8; in hfs_bmap_alloc() 431 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
|
D | bfind.c | 235 int idx, res = 0; in hfs_brec_goto() local 246 idx = bnode->prev; in hfs_brec_goto() 247 if (!idx) { in hfs_brec_goto() 252 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto() 264 idx = bnode->next; in hfs_brec_goto() 265 if (!idx) { in hfs_brec_goto() 270 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
|
/fs/hfs/ |
D | btree.c | 188 static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx) in hfs_bmap_new_bmap() argument 195 node = hfs_bnode_create(tree, idx); in hfs_bmap_new_bmap() 202 prev->next = idx; in hfs_bmap_new_bmap() 203 cnid = cpu_to_be32(idx); in hfs_bmap_new_bmap() 251 u32 nidx, idx; in hfs_bmap_alloc() local 273 idx = 0; in hfs_bmap_alloc() 281 idx += i; in hfs_bmap_alloc() 288 return hfs_bnode_create(tree, idx); in hfs_bmap_alloc() 297 idx += 8; in hfs_bmap_alloc() 304 next_node = hfs_bmap_new_bmap(node, idx); in hfs_bmap_alloc()
|
D | bfind.c | 178 int idx, res = 0; in hfs_brec_goto() local 189 idx = bnode->prev; in hfs_brec_goto() 190 if (!idx) { in hfs_brec_goto() 195 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto() 207 idx = bnode->next; in hfs_brec_goto() 208 if (!idx) { in hfs_brec_goto() 213 bnode = hfs_bnode_find(tree, idx); in hfs_brec_goto()
|
/fs/erofs/ |
D | xattr.h | 46 static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) in erofs_xattr_handler() argument 62 return idx && idx < ARRAY_SIZE(xattr_handler_map) ? in erofs_xattr_handler() 63 xattr_handler_map[idx] : NULL; in erofs_xattr_handler()
|
/fs/nfs/flexfilelayout/ |
D | flexfilelayout.c | 718 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx) in ff_layout_mark_ds_unreachable() argument 720 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_mark_ds_unreachable() 727 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx) in ff_layout_mark_ds_reachable() argument 729 struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); in ff_layout_mark_ds_reachable() 744 u32 idx; in ff_layout_choose_ds_for_read() local 747 for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { in ff_layout_choose_ds_for_read() 748 if (idx+1 == fls->mirror_array_cnt) in ff_layout_choose_ds_for_read() 751 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_layout_choose_ds_for_read() 760 *best_idx = idx; in ff_layout_choose_ds_for_read() 987 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx) in ff_layout_pg_set_mirror_write() argument [all …]
|
D | flexfilelayout.h | 142 FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_COMP() argument 146 if (idx < fls->mirror_array_cnt) in FF_LAYOUT_COMP() 147 return fls->mirror_array[idx]; in FF_LAYOUT_COMP() 152 FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx) in FF_LAYOUT_DEVID_NODE() argument 154 struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx); in FF_LAYOUT_DEVID_NODE()
|
D | flexfilelayoutdev.c | 554 u32 idx; in ff_read_layout_has_available_ds() local 556 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_read_layout_has_available_ds() 557 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_read_layout_has_available_ds() 576 u32 idx; in ff_rw_layout_has_available_ds() local 578 for (idx = 0; idx < FF_LAYOUT_MIRROR_COUNT(lseg); idx++) { in ff_rw_layout_has_available_ds() 579 mirror = FF_LAYOUT_COMP(lseg, idx); in ff_rw_layout_has_available_ds()
|
/fs/cifs/ |
D | winucase.c | 629 unsigned char idx; in cifs_toupper() local 634 idx = (in & 0xff00) >> 8; in cifs_toupper() 637 tbl = toplevel[idx]; in cifs_toupper() 642 idx = in & 0xff; in cifs_toupper() 645 out = tbl[idx]; in cifs_toupper()
|
/fs/adfs/ |
D | dir_f.c | 46 #define dir_u8(idx) \ argument 47 ({ int _buf = idx >> blocksize_bits; \ 48 int _off = idx - (_buf << blocksize_bits);\ 52 #define dir_u32(idx) \ argument 53 ({ int _buf = idx >> blocksize_bits; \ 54 int _off = idx - (_buf << blocksize_bits);\
|
/fs/xfs/libxfs/ |
D | xfs_da_format.h | 723 xfs_attr3_leaf_name(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name() argument 727 return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; in xfs_attr3_leaf_name() 731 xfs_attr3_leaf_name_remote(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_remote() argument 733 return (xfs_attr_leaf_name_remote_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_remote() 737 xfs_attr3_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx) in xfs_attr3_leaf_name_local() argument 739 return (xfs_attr_leaf_name_local_t *)xfs_attr3_leaf_name(leafp, idx); in xfs_attr3_leaf_name_local()
|
/fs/ocfs2/dlm/ |
D | dlmcommon.h | 380 static inline char *dlm_list_in_text(enum dlm_lockres_list idx) in dlm_list_in_text() argument 382 if (idx == DLM_GRANTED_LIST) in dlm_list_in_text() 384 else if (idx == DLM_CONVERTING_LIST) in dlm_list_in_text() 386 else if (idx == DLM_BLOCKED_LIST) in dlm_list_in_text() 393 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) in dlm_list_idx_to_ptr() argument 396 if (idx == DLM_GRANTED_LIST) in dlm_list_idx_to_ptr() 398 else if (idx == DLM_CONVERTING_LIST) in dlm_list_idx_to_ptr() 400 else if (idx == DLM_BLOCKED_LIST) in dlm_list_idx_to_ptr() 948 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data); 949 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data); [all …]
|