/fs/squashfs/ |
D | cache.c | 61 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { in squashfs_cache_get() 66 i = (i + 1) % cache->entries; in squashfs_cache_get() 69 if (n == cache->entries) { in squashfs_cache_get() 89 for (n = 0; n < cache->entries; n++) { in squashfs_cache_get() 92 i = (i + 1) % cache->entries; in squashfs_cache_get() 95 cache->next_blk = (i + 1) % cache->entries; in squashfs_cache_get() 204 for (i = 0; i < cache->entries; i++) { in squashfs_cache_delete() 223 struct squashfs_cache *squashfs_cache_init(char *name, int entries, in squashfs_cache_init() argument 234 cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); in squashfs_cache_init() 242 cache->unused = entries; in squashfs_cache_init() [all …]
|
D | file.c | 138 meta->entries = 0; in empty_meta_index() 253 offset = index < meta->offset + meta->entries ? index : in fill_meta_index() 254 meta->offset + meta->entries - 1; in fill_meta_index() 262 meta->entries); in fill_meta_index() 273 for (i = meta->offset + meta->entries; i <= index && in fill_meta_index() 280 if (meta->entries == 0) in fill_meta_index() 296 meta->entries++; in fill_meta_index() 301 meta->offset, meta->entries); in fill_meta_index()
|
D | squashfs_fs_sb.h | 17 int entries; member
|
/fs/nfs_common/ |
D | nfsacl.c | 95 int entries = (acl && acl->a_count) ? max_t(int, acl->a_count, 4) : 0; in nfsacl_encode() local 99 .array_len = encode_entries ? entries : 0, in nfsacl_encode() 110 if (entries > NFS_ACL_MAX_ENTRIES || in nfsacl_encode() 111 xdr_encode_word(buf, base, entries)) in nfsacl_encode() 274 u32 entries; in nfsacl_decode() local 277 if (xdr_decode_word(buf, base, &entries) || in nfsacl_decode() 278 entries > NFS_ACL_MAX_ENTRIES) in nfsacl_decode() 280 nfsacl_desc.desc.array_maxlen = entries; in nfsacl_decode() 285 if (entries != nfsacl_desc.desc.array_len || in nfsacl_decode() 293 *aclcnt = entries; in nfsacl_decode()
|
/fs/ext4/ |
D | namei.c | 229 struct dx_entry entries[0]; member 235 struct dx_entry entries[0]; member 242 struct dx_entry *entries; member 265 static unsigned dx_get_count(struct dx_entry *entries); 266 static unsigned dx_get_limit(struct dx_entry *entries); 267 static void dx_set_count(struct dx_entry *entries, unsigned value); 268 static void dx_set_limit(struct dx_entry *entries, unsigned value); 552 static inline unsigned dx_get_count(struct dx_entry *entries) in dx_get_count() argument 554 return le16_to_cpu(((struct dx_countlimit *) entries)->count); in dx_get_count() 557 static inline unsigned dx_get_limit(struct dx_entry *entries) in dx_get_limit() argument [all …]
|
/fs/reiserfs/ |
D | item_ops.c | 529 int entries = 0; in direntry_check_left() local 538 entries++; in direntry_check_left() 541 if (entries == dir_u->entry_count) { in direntry_check_left() 549 && entries < 2) in direntry_check_left() 550 entries = 0; in direntry_check_left() 552 return entries ? : -1; in direntry_check_left() 558 int entries = 0; in direntry_check_right() local 567 entries++; in direntry_check_right() 569 BUG_ON(entries == dir_u->entry_count); in direntry_check_right() 573 && entries > dir_u->entry_count - 2) in direntry_check_right() [all …]
|
/fs/nfs/ |
D | mount_clnt.c | 424 u32 entries, i; in decode_auth_flavors() local 433 entries = be32_to_cpup(p); in decode_auth_flavors() 434 dprintk("NFS: received %u auth flavors\n", entries); in decode_auth_flavors() 435 if (entries > NFS_MAX_SECFLAVORS) in decode_auth_flavors() 436 entries = NFS_MAX_SECFLAVORS; in decode_auth_flavors() 438 p = xdr_inline_decode(xdr, 4 * entries); in decode_auth_flavors() 442 if (entries > *count) in decode_auth_flavors() 443 entries = *count; in decode_auth_flavors() 445 for (i = 0; i < entries; i++) { in decode_auth_flavors()
|
/fs/xfs/ |
D | xfs_attr_list.c | 306 struct xfs_attr_leaf_entry *entries; in xfs_attr_node_list() local 321 entries = xfs_attr3_leaf_entryp(leaf); in xfs_attr_node_list() 323 entries[leafhdr.count - 1].hashval)) { in xfs_attr_node_list() 328 entries[0].hashval)) { in xfs_attr_node_list() 386 struct xfs_attr_leaf_entry *entries; in xfs_attr3_leaf_list_int() local 395 entries = xfs_attr3_leaf_entryp(leaf); in xfs_attr3_leaf_list_int() 404 entry = &entries[0]; in xfs_attr3_leaf_list_int() 423 entry = &entries[0]; in xfs_attr3_leaf_list_int()
|
/fs/xfs/libxfs/ |
D | xfs_da_format.h | 668 xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */ member 700 struct xfs_attr_leaf_entry entries[1]; member 786 return &((struct xfs_attr3_leafblock *)leafp)->entries[0]; in xfs_attr3_leaf_entryp() 787 return &leafp->entries[0]; in xfs_attr3_leaf_entryp() 796 struct xfs_attr_leaf_entry *entries = xfs_attr3_leaf_entryp(leafp); in xfs_attr3_leaf_name() local 798 return &((char *)leafp)[be16_to_cpu(entries[idx].nameidx)]; in xfs_attr3_leaf_name()
|
D | xfs_attr_leaf.c | 242 struct xfs_attr_leaf_entry *entries; in xfs_attr3_leaf_verify() local 271 entries = xfs_attr3_leaf_entryp(bp->b_addr); in xfs_attr3_leaf_verify() 272 if ((char *)&entries[ichdr.count] > in xfs_attr3_leaf_verify() 1082 struct xfs_attr_leaf_entry *entries; in xfs_attr3_leaf_to_node() local 1128 entries = xfs_attr3_leaf_entryp(leaf); in xfs_attr3_leaf_to_node() 1131 btree[0].hashval = entries[icleafhdr.count - 1].hashval; in xfs_attr3_leaf_to_node() 2272 struct xfs_attr_leaf_entry *entries; in xfs_attr3_leaf_lookup_int() local 2283 entries = xfs_attr3_leaf_entryp(leaf); in xfs_attr3_leaf_lookup_int() 2292 for (entry = &entries[probe]; span > 4; entry = &entries[probe]) { in xfs_attr3_leaf_lookup_int() 2576 struct xfs_attr_leaf_entry *entries; in xfs_attr_leaf_lasthash() local [all …]
|
/fs/overlayfs/ |
D | readdir.c | 34 struct list_head entries; member 233 ovl_cache_free(&cache->entries); in ovl_dir_cache_free() 248 ovl_cache_free(&cache->entries); in ovl_cache_put() 396 list_for_each(p, &od->cache->entries) { in ovl_seek_cursor() 423 INIT_LIST_HEAD(&cache->entries); in ovl_cache_get() 426 res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); in ovl_cache_get() 428 ovl_cache_free(&cache->entries); in ovl_cache_get() 608 res = ovl_dir_read_impure(path, &cache->entries, &cache->root); in ovl_cache_get_impure() 610 ovl_cache_free(&cache->entries); in ovl_cache_get_impure() 614 if (list_empty(&cache->entries)) { in ovl_cache_get_impure() [all …]
|
/fs/gfs2/ |
D | dir.c | 1269 struct gfs2_dirent **darr, u32 entries, in do_filldir_main() argument 1277 if (sort_start < entries) in do_filldir_main() 1278 sort(&darr[sort_start], entries - sort_start, in do_filldir_main() 1284 for (x = 0, y = 1; x < entries; x++, y++) { in do_filldir_main() 1288 if (y < entries) { in do_filldir_main() 1340 unsigned entries) in gfs2_set_cookies() argument 1345 for (i = 0; i < entries; i++) { in gfs2_set_cookies() 1379 unsigned entries = 0, entries2 = 0; in gfs2_dir_read_leaf() local 1394 entries += be16_to_cpu(lf->lf_entries); in gfs2_dir_read_leaf() 1405 if (!entries) in gfs2_dir_read_leaf() [all …]
|
/fs/xfs/scrub/ |
D | agheader.c | 603 xfs_agblock_t *entries; member 635 sai->entries[sai->nr_entries++] = agbno; in xchk_agfl_block() 723 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, in xchk_agfl() 725 if (!sai.entries) { in xchk_agfl() 746 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]), in xchk_agfl() 749 if (sai.entries[i] == sai.entries[i - 1]) { in xchk_agfl() 756 kmem_free(sai.entries); in xchk_agfl()
|
D | attr.c | 313 struct xfs_attr_leaf_entry *entries; in xchk_xattr_block() local 365 entries = xfs_attr3_leaf_entryp(leaf); in xchk_xattr_block() 366 if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused) in xchk_xattr_block() 370 for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) { in xchk_xattr_block()
|
/fs/nfsd/ |
D | nfscache.c | 344 unsigned int entries = 0; in nfsd_cache_insert() local 348 ++entries; in nfsd_cache_insert() 366 if (entries > nn->longest_chain) { in nfsd_cache_insert() 367 nn->longest_chain = entries; in nfsd_cache_insert() 369 } else if (entries == nn->longest_chain) { in nfsd_cache_insert()
|
D | acl.h | 42 int nfs4_acl_bytes(int entries);
|
/fs/ |
D | select.c | 100 struct poll_table_entry entries[0]; member 152 } while (entry > p->entries); in poll_freewait() 175 new_table->entry = new_table->entries; in poll_get_entry() 829 struct pollfd entries[0]; member 897 pfd = walk->entries; in do_poll() 983 if (copy_from_user(walk->entries, ufds + nfds-todo, in do_sys_poll() 992 walk = walk->next = kmalloc(struct_size(walk, entries, len), in do_sys_poll() 1005 struct pollfd *fds = walk->entries; in do_sys_poll()
|
D | binfmt_misc.c | 43 static LIST_HEAD(entries); 96 list_for_each(l, &entries) { in check_file() 749 list_add(&e->list, &entries); in bm_register_write() 800 while (!list_empty(&entries)) in bm_status_write() 801 kill_node(list_first_entry(&entries, Node, list)); in bm_status_write()
|
/fs/btrfs/ |
D | free-space-cache.c | 923 int *entries, int *bitmaps, in write_cache_extent_entries() argument 951 *entries += 1; in write_cache_extent_entries() 986 *entries += 1; in write_cache_extent_entries() 1001 int entries, int bitmaps) in update_cache_item() argument 1037 btrfs_set_free_space_entries(leaf, header, entries); in update_cache_item() 1052 int *entries) in write_pinned_extent_entries() argument 1089 *entries += 1; in write_pinned_extent_entries() 1167 io_ctl->entries, io_ctl->bitmaps); in __btrfs_wait_cache_io() 1246 int entries = 0; in __btrfs_write_out_cache() local 1288 block_group, &entries, &bitmaps, in __btrfs_write_out_cache() [all …]
|
D | free-space-cache.h | 48 int entries; member
|
/fs/incfs/ |
D | format.c | 513 struct incfs_blockmap_entry *entries, in incfs_read_blockmap_entries() argument 523 if (!bfc || !entries) in incfs_read_blockmap_entries() 529 result = incfs_kread(bfc->bc_file, entries, bytes_to_read, in incfs_read_blockmap_entries()
|
D | format.h | 339 struct incfs_blockmap_entry *entries,
|
/fs/cramfs/ |
D | README | 6 swapped around (though it does care that directory entries (inodes) in 27 a directory's entries before recursing down its subdirectories: the 32 Beginning in 2.4.7, directory entries are sorted. This optimization
|
/fs/f2fs/ |
D | segment.c | 1846 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); in add_discard_addrs() local 1870 for (i = 0; i < entries; i++) in add_discard_addrs() 2351 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE); in write_current_sum_page() 2533 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long); in __next_free_blkoff() local 2539 for (i = 0; i < entries; i++) in __next_free_blkoff() 3500 seg_i->sum_blk->entries[j] = *s; in read_compacted_summaries() 3559 struct f2fs_summary *ns = &sum->entries[0]; in read_normal_summaries() 3581 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE); in read_normal_summaries() 3675 *summary = seg_i->sum_blk->entries[j]; in write_compacted_summaries() 3938 &raw_sit->entries[sit_offset]); in f2fs_flush_sit_entries() [all …]
|
/fs/proc/ |
D | base.c | 156 static unsigned int __init pid_entry_nlink(const struct pid_entry *entries, in pid_entry_nlink() argument 164 if (S_ISDIR(entries[i].mode)) in pid_entry_nlink() 429 unsigned long *entries; in proc_pid_stack() local 446 entries = kmalloc_array(MAX_STACK_TRACE_DEPTH, sizeof(*entries), in proc_pid_stack() 448 if (!entries) in proc_pid_stack() 455 nr_entries = stack_trace_save_tsk(task, entries, in proc_pid_stack() 459 seq_printf(m, "[<0>] %pB\n", (void *)entries[i]); in proc_pid_stack() 464 kfree(entries); in proc_pid_stack()
|