Home
last modified time | relevance | path

Searched refs:entry (Results 1 – 25 of 103) sorted by relevance

12345

/fs/squashfs/
Dcache.c69 struct squashfs_cache_entry *entry; in squashfs_cache_get() local
75 if (cache->entry[i].block == block) { in squashfs_cache_get()
103 if (cache->entry[i].refcount == 0) in squashfs_cache_get()
109 entry = &cache->entry[i]; in squashfs_cache_get()
116 entry->block = block; in squashfs_cache_get()
117 entry->refcount = 1; in squashfs_cache_get()
118 entry->pending = 1; in squashfs_cache_get()
119 entry->num_waiters = 0; in squashfs_cache_get()
120 entry->error = 0; in squashfs_cache_get()
123 entry->length = squashfs_read_data(sb, block, length, in squashfs_cache_get()
[all …]
/fs/
Dmbcache2.c54 struct mb2_cache_entry *entry, *dup; in mb2_cache_entry_create() local
58 entry = kmem_cache_alloc(mb2_entry_cache, mask); in mb2_cache_entry_create()
59 if (!entry) in mb2_cache_entry_create()
62 INIT_LIST_HEAD(&entry->e_lru_list); in mb2_cache_entry_create()
64 atomic_set(&entry->e_refcnt, 1); in mb2_cache_entry_create()
65 entry->e_key = key; in mb2_cache_entry_create()
66 entry->e_block = block; in mb2_cache_entry_create()
68 entry->e_hash_list_head = head; in mb2_cache_entry_create()
73 kmem_cache_free(mb2_entry_cache, entry); in mb2_cache_entry_create()
77 hlist_bl_add_head(&entry->e_hash_list, head); in mb2_cache_entry_create()
[all …]
/fs/btrfs/
Dordered-data.c31 static u64 entry_end(struct btrfs_ordered_extent *entry) in entry_end() argument
33 if (entry->file_offset + entry->len < entry->file_offset) in entry_end()
35 return entry->file_offset + entry->len; in entry_end()
46 struct btrfs_ordered_extent *entry; in tree_insert() local
50 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert()
52 if (file_offset < entry->file_offset) in tree_insert()
54 else if (file_offset >= entry_end(entry)) in tree_insert()
83 struct btrfs_ordered_extent *entry; in __tree_search() local
87 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search()
89 prev_entry = entry; in __tree_search()
[all …]
Dfree-space-cache.c527 struct btrfs_free_space_entry *entry; in io_ctl_add_entry() local
532 entry = io_ctl->cur; in io_ctl_add_entry()
533 entry->offset = cpu_to_le64(offset); in io_ctl_add_entry()
534 entry->bytes = cpu_to_le64(bytes); in io_ctl_add_entry()
535 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP : in io_ctl_add_entry()
595 struct btrfs_free_space *entry, u8 *type) in io_ctl_read_entry() argument
607 entry->offset = le64_to_cpu(e->offset); in io_ctl_read_entry()
608 entry->bytes = le64_to_cpu(e->bytes); in io_ctl_read_entry()
622 struct btrfs_free_space *entry) in io_ctl_read_bitmap() argument
630 memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); in io_ctl_read_bitmap()
[all …]
Dextent_map.c96 struct extent_map *entry = NULL; in tree_insert() local
102 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
104 if (em->start < entry->start) in tree_insert()
106 else if (em->start >= extent_map_end(entry)) in tree_insert()
113 while (parent && em->start >= extent_map_end(entry)) { in tree_insert()
115 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
118 if (end > entry->start && em->start < extent_map_end(entry)) in tree_insert()
122 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
123 while (parent && em->start < entry->start) { in tree_insert()
125 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
[all …]
/fs/nilfs2/
Ddat.c91 struct nilfs_dat_entry *entry; in nilfs_dat_commit_alloc() local
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, in nilfs_dat_commit_alloc()
97 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); in nilfs_dat_commit_alloc()
98 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); in nilfs_dat_commit_alloc()
99 entry->de_blocknr = cpu_to_le64(0); in nilfs_dat_commit_alloc()
115 struct nilfs_dat_entry *entry; in nilfs_dat_commit_free() local
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, in nilfs_dat_commit_free()
121 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); in nilfs_dat_commit_free()
122 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); in nilfs_dat_commit_free()
123 entry->de_blocknr = cpu_to_le64(0); in nilfs_dat_commit_free()
[all …]
/fs/afs/
Dvlclient.c64 struct afs_cache_vlocation *entry; in afs_deliver_vl_get_entry_by_xxx() local
79 entry = call->reply; in afs_deliver_vl_get_entry_by_xxx()
83 entry->name[loop] = ntohl(*bp++); in afs_deliver_vl_get_entry_by_xxx()
84 entry->name[loop] = 0; in afs_deliver_vl_get_entry_by_xxx()
88 entry->nservers = ntohl(*bp++); in afs_deliver_vl_get_entry_by_xxx()
91 entry->servers[loop].s_addr = *bp++; in afs_deliver_vl_get_entry_by_xxx()
97 entry->srvtmask[loop] = 0; in afs_deliver_vl_get_entry_by_xxx()
99 entry->srvtmask[loop] |= AFS_VOL_VTM_RW; in afs_deliver_vl_get_entry_by_xxx()
101 entry->srvtmask[loop] |= AFS_VOL_VTM_RO; in afs_deliver_vl_get_entry_by_xxx()
103 entry->srvtmask[loop] |= AFS_VOL_VTM_BAK; in afs_deliver_vl_get_entry_by_xxx()
[all …]
/fs/proc/
Dproc_sysctl.c106 struct ctl_table *entry; in find_entry() local
117 entry = &head->ctl_table[ctl_node - head->node]; in find_entry()
118 procname = entry->procname; in find_entry()
127 return entry; in find_entry()
133 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) in insert_entry() argument
135 struct rb_node *node = &head->node[entry - head->ctl_table].node; in insert_entry()
138 const char *name = entry->procname; in insert_entry()
162 pr_cont("/%s\n", entry->procname); in insert_entry()
172 static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry) in erase_entry() argument
174 struct rb_node *node = &head->node[entry - head->ctl_table].node; in erase_entry()
[all …]
Dnamespaces.c109 const struct proc_ns_operations **entry, **last; in proc_ns_dir_readdir() local
118 entry = ns_entries + (ctx->pos - 2); in proc_ns_dir_readdir()
120 while (entry <= last) { in proc_ns_dir_readdir()
121 const struct proc_ns_operations *ops = *entry; in proc_ns_dir_readdir()
126 entry++; in proc_ns_dir_readdir()
143 const struct proc_ns_operations **entry, **last; in proc_ns_dir_lookup() local
152 for (entry = ns_entries; entry < last; entry++) { in proc_ns_dir_lookup()
153 if (strlen((*entry)->name) != len) in proc_ns_dir_lookup()
155 if (!memcmp(dentry->d_name.name, (*entry)->name, len)) in proc_ns_dir_lookup()
158 if (entry == last) in proc_ns_dir_lookup()
[all …]
Duid.c36 struct uid_hash_entry *entry; in uid_hash_entry_exists_locked() local
38 hash_for_each_possible(proc_uid_hash_table, entry, hash, uid) { in uid_hash_entry_exists_locked()
39 if (entry->uid == uid) in uid_hash_entry_exists_locked()
47 struct uid_hash_entry *entry; in proc_register_uid() local
57 entry = kzalloc(sizeof(struct uid_hash_entry), GFP_KERNEL); in proc_register_uid()
58 if (!entry) in proc_register_uid()
60 entry->uid = uid; in proc_register_uid()
64 kfree(entry); in proc_register_uid()
66 hash_add(proc_uid_hash_table, &entry->hash, uid); in proc_register_uid()
231 struct uid_hash_entry *entry; in proc_uid_readdir() local
[all …]
/fs/ext4/
Dblock_validity.c62 struct ext4_system_zone *new_entry, *entry; in add_system_zone() local
68 entry = rb_entry(parent, struct ext4_system_zone, node); in add_system_zone()
69 if (start_blk < entry->start_blk) in add_system_zone()
71 else if (start_blk >= (entry->start_blk + entry->count)) in add_system_zone()
92 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
93 if (can_merge(entry, new_entry)) { in add_system_zone()
94 new_entry->start_blk = entry->start_blk; in add_system_zone()
95 new_entry->count += entry->count; in add_system_zone()
97 kmem_cache_free(ext4_system_zone_cachep, entry); in add_system_zone()
104 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
[all …]
Dxattr.c186 ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, in ext4_xattr_check_names() argument
189 struct ext4_xattr_entry *e = entry; in ext4_xattr_check_names()
200 while (!IS_LAST_ENTRY(entry)) { in ext4_xattr_check_names()
201 if (entry->e_value_size != 0 && in ext4_xattr_check_names()
202 (value_start + le16_to_cpu(entry->e_value_offs) < in ext4_xattr_check_names()
204 value_start + le16_to_cpu(entry->e_value_offs) + in ext4_xattr_check_names()
205 le32_to_cpu(entry->e_value_size) > end)) in ext4_xattr_check_names()
207 entry = EXT4_XATTR_NEXT(entry); in ext4_xattr_check_names()
237 struct ext4_xattr_entry *entry = IFIRST(header); in __xattr_check_inode() local
243 error = ext4_xattr_check_names(entry, end, entry); in __xattr_check_inode()
[all …]
/fs/f2fs/
Drecovery.c62 struct fsync_inode_entry *entry; in get_fsync_inode() local
64 list_for_each_entry(entry, head, list) in get_fsync_inode()
65 if (entry->inode->i_ino == ino) in get_fsync_inode()
66 return entry; in get_fsync_inode()
75 struct fsync_inode_entry *entry; in add_fsync_inode() local
92 entry = f2fs_kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); in add_fsync_inode()
93 entry->inode = inode; in add_fsync_inode()
94 list_add_tail(&entry->list, head); in add_fsync_inode()
96 return entry; in add_fsync_inode()
102 static void del_fsync_inode(struct fsync_inode_entry *entry) in del_fsync_inode() argument
[all …]
Dxattr.h63 #define ENTRY_SIZE(entry) (XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + \ argument
64 (entry)->e_name_len + le16_to_cpu((entry)->e_value_size)))
66 #define XATTR_NEXT_ENTRY(entry) ((struct f2fs_xattr_entry *)((char *)(entry) +\ argument
67 ENTRY_SIZE(entry)))
69 #define IS_XATTR_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) argument
71 #define list_for_each_xattr(entry, addr) \ argument
72 for (entry = XATTR_FIRST_ENTRY(addr);\
73 !IS_XATTR_LAST_ENTRY(entry);\
74 entry = XATTR_NEXT_ENTRY(entry))
Dacl.c53 struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1); in f2fs_acl_from_disk() local
71 if ((char *)entry > end) in f2fs_acl_from_disk()
74 acl->a_entries[i].e_tag = le16_to_cpu(entry->e_tag); in f2fs_acl_from_disk()
75 acl->a_entries[i].e_perm = le16_to_cpu(entry->e_perm); in f2fs_acl_from_disk()
82 entry = (struct f2fs_acl_entry *)((char *)entry + in f2fs_acl_from_disk()
89 le32_to_cpu(entry->e_id)); in f2fs_acl_from_disk()
90 entry = (struct f2fs_acl_entry *)((char *)entry + in f2fs_acl_from_disk()
96 le32_to_cpu(entry->e_id)); in f2fs_acl_from_disk()
97 entry = (struct f2fs_acl_entry *)((char *)entry + in f2fs_acl_from_disk()
104 if ((char *)entry != end) in f2fs_acl_from_disk()
[all …]
Dxattr.c254 struct f2fs_xattr_entry *entry; in __find_xattr() local
256 list_for_each_xattr(entry, base_addr) { in __find_xattr()
257 if (entry->e_name_index != index) in __find_xattr()
259 if (entry->e_name_len != len) in __find_xattr()
261 if (!memcmp(entry->e_name, name, len)) in __find_xattr()
264 return entry; in __find_xattr()
271 struct f2fs_xattr_entry *entry; in __find_inline_xattr() local
274 list_for_each_xattr(entry, base_addr) { in __find_inline_xattr()
275 if ((void *)entry + sizeof(__u32) > base_addr + inline_size || in __find_inline_xattr()
276 (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) > in __find_inline_xattr()
[all …]
/fs/efivarfs/
Dsuper.c116 struct efivar_entry *entry; in efivarfs_callback() local
125 entry = kzalloc(sizeof(*entry), GFP_KERNEL); in efivarfs_callback()
126 if (!entry) in efivarfs_callback()
129 memcpy(entry->var.VariableName, name16, name_size); in efivarfs_callback()
130 memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t)); in efivarfs_callback()
132 len = ucs2_utf8size(entry->var.VariableName); in efivarfs_callback()
139 ucs2_as_utf8(name, entry->var.VariableName, len); in efivarfs_callback()
141 if (efivar_variable_is_removable(entry->var.VendorGuid, name, len)) in efivarfs_callback()
146 efi_guid_to_str(&entry->var.VendorGuid, name + len + 1); in efivarfs_callback()
167 efivar_entry_size(entry, &size); in efivarfs_callback()
[all …]
/fs/fuse/
Ddir.c41 static inline void fuse_dentry_settime(struct dentry *entry, u64 time) in fuse_dentry_settime() argument
43 entry->d_time = time; in fuse_dentry_settime()
46 static inline u64 fuse_dentry_time(struct dentry *entry) in fuse_dentry_time() argument
48 return entry->d_time; in fuse_dentry_time()
54 static void fuse_dentry_settime(struct dentry *entry, u64 time) in fuse_dentry_settime() argument
56 entry->d_time = time; in fuse_dentry_settime()
57 entry->d_fsdata = (void *) (unsigned long) (time >> 32); in fuse_dentry_settime()
60 static u64 fuse_dentry_time(struct dentry *entry) in fuse_dentry_time() argument
62 return (u64) entry->d_time + in fuse_dentry_time()
63 ((u64) (unsigned long) entry->d_fsdata << 32); in fuse_dentry_time()
[all …]
/fs/nfs_common/
Dnfsacl.c54 struct posix_acl_entry *entry = in xdr_nfsace_encode() local
57 *p++ = htonl(entry->e_tag | nfsacl_desc->typeflag); in xdr_nfsace_encode()
58 switch(entry->e_tag) { in xdr_nfsace_encode()
66 *p++ = htonl(from_kuid(&init_user_ns, entry->e_uid)); in xdr_nfsace_encode()
69 *p++ = htonl(from_kgid(&init_user_ns, entry->e_gid)); in xdr_nfsace_encode()
75 *p++ = htonl(entry->e_perm & S_IRWXO); in xdr_nfsace_encode()
150 struct posix_acl_entry *entry; in xdr_nfsace_decode() local
162 entry = &nfsacl_desc->acl->a_entries[nfsacl_desc->count++]; in xdr_nfsace_decode()
163 entry->e_tag = ntohl(*p++) & ~NFS_ACL_DEFAULT; in xdr_nfsace_decode()
165 entry->e_perm = ntohl(*p++); in xdr_nfsace_decode()
[all …]
/fs/fat/
Dfatent.c21 static void fat12_ent_blocknr(struct super_block *sb, int entry, in fat12_ent_blocknr() argument
25 int bytes = entry + (entry >> 1); in fat12_ent_blocknr()
26 WARN_ON(!fat_valid_entry(sbi, entry)); in fat12_ent_blocknr()
31 static void fat_ent_blocknr(struct super_block *sb, int entry, in fat_ent_blocknr() argument
35 int bytes = (entry << sbi->fatent_shift); in fat_ent_blocknr()
36 WARN_ON(!fat_valid_entry(sbi, entry)); in fat_ent_blocknr()
123 if (fatent->entry & 1) in fat12_ent_get()
161 if (fatent->entry & 1) { in fat12_ent_put()
196 u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1); in fat12_ent_next()
198 fatent->entry++; in fat12_ent_next()
[all …]
/fs/xfs/libxfs/
Dxfs_attr_leaf.c834 struct xfs_attr_leaf_entry *entry; in xfs_attr_shortform_allfit() local
843 entry = xfs_attr3_leaf_entryp(leaf); in xfs_attr_shortform_allfit()
846 for (i = 0; i < leafhdr.count; entry++, i++) { in xfs_attr_shortform_allfit()
847 if (entry->flags & XFS_ATTR_INCOMPLETE) in xfs_attr_shortform_allfit()
849 if (!(entry->flags & XFS_ATTR_LOCAL)) in xfs_attr_shortform_allfit()
878 struct xfs_attr_leaf_entry *entry; in xfs_attr3_leaf_to_shortform() local
896 entry = xfs_attr3_leaf_entryp(leaf); in xfs_attr3_leaf_to_shortform()
930 for (i = 0; i < ichdr.count; entry++, i++) { in xfs_attr3_leaf_to_shortform()
931 if (entry->flags & XFS_ATTR_INCOMPLETE) in xfs_attr3_leaf_to_shortform()
933 if (!entry->nameidx) in xfs_attr3_leaf_to_shortform()
[all …]
/fs/omfs/
Dfile.c35 struct omfs_extent_entry *entry; in omfs_shrink_inode() local
71 entry = &oe->e_entry; in omfs_shrink_inode()
76 start = be64_to_cpu(entry->e_cluster); in omfs_shrink_inode()
77 count = be64_to_cpu(entry->e_blocks); in omfs_shrink_inode()
80 entry++; in omfs_shrink_inode()
120 struct omfs_extent_entry *entry = &oe->e_entry; in omfs_grow_extent() local
142 terminator = entry + extent_count - 1; in omfs_grow_extent()
144 entry = terminator-1; in omfs_grow_extent()
145 new_block = be64_to_cpu(entry->e_cluster) + in omfs_grow_extent()
146 be64_to_cpu(entry->e_blocks); in omfs_grow_extent()
[all …]
/fs/ext2/
Dxattr.c70 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) argument
151 struct ext2_xattr_entry *entry; in ext2_xattr_get() local
188 entry = FIRST_ENTRY(bh); in ext2_xattr_get()
189 while (!IS_LAST_ENTRY(entry)) { in ext2_xattr_get()
191 EXT2_XATTR_NEXT(entry); in ext2_xattr_get()
194 if (name_index == entry->e_name_index && in ext2_xattr_get()
195 name_len == entry->e_name_len && in ext2_xattr_get()
196 memcmp(name, entry->e_name, name_len) == 0) in ext2_xattr_get()
198 entry = next; in ext2_xattr_get()
206 if (entry->e_value_block != 0) in ext2_xattr_get()
[all …]
/fs/nfs/
Ddir.c242 int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page) in nfs_readdir_add_to_array() argument
258 cache_entry->cookie = entry->prev_cookie; in nfs_readdir_add_to_array()
259 cache_entry->ino = entry->ino; in nfs_readdir_add_to_array()
260 cache_entry->d_type = entry->d_type; in nfs_readdir_add_to_array()
261 ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len); in nfs_readdir_add_to_array()
264 array->last_cookie = entry->cookie; in nfs_readdir_add_to_array()
266 if (entry->eof != 0) in nfs_readdir_add_to_array()
382 struct nfs_entry *entry, struct file *file, struct inode *inode) in nfs_readdir_xdr_filler() argument
392 error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages, in nfs_readdir_xdr_filler()
411 struct nfs_entry *entry, struct xdr_stream *xdr) in xdr_decode() argument
[all …]
/fs/jffs2/
Dacl.c59 struct jffs2_acl_entry *entry; in jffs2_acl_from_medium() local
86 entry = value; in jffs2_acl_from_medium()
89 acl->a_entries[i].e_tag = je16_to_cpu(entry->e_tag); in jffs2_acl_from_medium()
90 acl->a_entries[i].e_perm = je16_to_cpu(entry->e_perm); in jffs2_acl_from_medium()
105 je32_to_cpu(entry->e_id)); in jffs2_acl_from_medium()
113 je32_to_cpu(entry->e_id)); in jffs2_acl_from_medium()
131 struct jffs2_acl_entry *entry; in jffs2_acl_to_medium() local
136 header = kmalloc(sizeof(*header) + acl->a_count * sizeof(*entry), GFP_KERNEL); in jffs2_acl_to_medium()
143 entry = e; in jffs2_acl_to_medium()
144 entry->e_tag = cpu_to_je16(acl_e->e_tag); in jffs2_acl_to_medium()
[all …]

12345