/fs/notify/dnotify/ |
D | dnotify.c | 53 struct dnotify_struct *dn; member 67 struct dnotify_struct *dn; in dnotify_recalc_inode_mask() local 74 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) in dnotify_recalc_inode_mask() 75 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); in dnotify_recalc_inode_mask() 96 struct dnotify_struct *dn; in dnotify_handle_event() local 108 prev = &dn_mark->dn; in dnotify_handle_event() 109 while ((dn = *prev) != NULL) { in dnotify_handle_event() 110 if ((dn->dn_mask & test_mask) == 0) { in dnotify_handle_event() 111 prev = &dn->dn_next; in dnotify_handle_event() 114 fown = &dn->dn_filp->f_owner; in dnotify_handle_event() [all …]
|
/fs/f2fs/ |
D | inline.c | 127 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) in f2fs_convert_inline_page() argument 130 .sbi = F2FS_I_SB(dn->inode), in f2fs_convert_inline_page() 131 .ino = dn->inode->i_ino, in f2fs_convert_inline_page() 142 if (!f2fs_exist_data(dn->inode)) in f2fs_convert_inline_page() 145 err = f2fs_reserve_block(dn, 0); in f2fs_convert_inline_page() 149 err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false); in f2fs_convert_inline_page() 151 f2fs_truncate_data_blocks_range(dn, 1); in f2fs_convert_inline_page() 152 f2fs_put_dnode(dn); in f2fs_convert_inline_page() 158 if (unlikely(dn->data_blkaddr != NEW_ADDR)) { in f2fs_convert_inline_page() 159 f2fs_put_dnode(dn); in f2fs_convert_inline_page() [all …]
|
D | recovery.c | 466 block_t blkaddr, struct dnode_of_data *dn) in check_index_in_prev_nodes() argument 474 struct dnode_of_data tdn = *dn; in check_index_in_prev_nodes() 506 max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); in check_index_in_prev_nodes() 509 ofs_in_node, dn->inode->i_ino, nid, max_addrs); in check_index_in_prev_nodes() 514 if (dn->inode->i_ino == nid) { in check_index_in_prev_nodes() 516 if (!dn->inode_page_locked) in check_index_in_prev_nodes() 517 lock_page(dn->inode_page); in check_index_in_prev_nodes() 518 tdn.node_page = dn->inode_page; in check_index_in_prev_nodes() 521 } else if (dn->nid == nid) { in check_index_in_prev_nodes() 535 if (ino != dn->inode->i_ino) { in check_index_in_prev_nodes() [all …]
|
D | data.c | 1185 static void __set_data_blkaddr(struct dnode_of_data *dn) in __set_data_blkaddr() argument 1187 struct f2fs_node *rn = F2FS_NODE(dn->node_page); in __set_data_blkaddr() 1191 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in __set_data_blkaddr() 1192 base = get_extra_isize(dn->inode); in __set_data_blkaddr() 1196 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); in __set_data_blkaddr() 1205 void f2fs_set_data_blkaddr(struct dnode_of_data *dn) in f2fs_set_data_blkaddr() argument 1207 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); in f2fs_set_data_blkaddr() 1208 __set_data_blkaddr(dn); in f2fs_set_data_blkaddr() 1209 if (set_page_dirty(dn->node_page)) in f2fs_set_data_blkaddr() 1210 dn->node_changed = true; in f2fs_set_data_blkaddr() [all …]
|
D | node.c | 645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) in f2fs_get_next_page_offset() argument 647 const long direct_index = ADDRS_PER_INODE(dn->inode); in f2fs_get_next_page_offset() 648 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset() 649 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; in f2fs_get_next_page_offset() 650 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset() 651 int cur_level = dn->cur_level; in f2fs_get_next_page_offset() 652 int max_level = dn->max_level; in f2fs_get_next_page_offset() 655 if (!dn->max_level) in f2fs_get_next_page_offset() 661 switch (dn->max_level) { in f2fs_get_next_page_offset() 672 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); in f2fs_get_next_page_offset() [all …]
|
D | file.c | 59 struct dnode_of_data dn; in f2fs_vm_page_mkwrite() local 116 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_vm_page_mkwrite() 117 err = f2fs_get_block_locked(&dn, page->index); in f2fs_vm_page_mkwrite() 122 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_vm_page_mkwrite() 123 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in f2fs_vm_page_mkwrite() 124 f2fs_put_dnode(&dn); in f2fs_vm_page_mkwrite() 135 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); in f2fs_vm_page_mkwrite() 417 struct dnode_of_data dn; in f2fs_seek_block() local 443 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_seek_block() 444 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); in f2fs_seek_block() [all …]
|
D | compress.c | 899 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn) in f2fs_sanity_check_cluster() argument 901 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); in f2fs_sanity_check_cluster() 902 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; in f2fs_sanity_check_cluster() 903 bool compressed = dn->data_blkaddr == COMPRESS_ADDR; in f2fs_sanity_check_cluster() 912 if (dn->ofs_in_node % cluster_size) { in f2fs_sanity_check_cluster() 918 block_t blkaddr = data_blkaddr(dn->inode, dn->node_page, in f2fs_sanity_check_cluster() 919 dn->ofs_in_node + i); in f2fs_sanity_check_cluster() 940 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); in f2fs_sanity_check_cluster() 948 struct dnode_of_data dn; in __f2fs_cluster_blocks() local 954 set_new_dnode(&dn, inode, NULL, NULL, 0); in __f2fs_cluster_blocks() [all …]
|
D | extent_cache.c | 873 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) in __update_extent_cache() argument 877 if (!__may_extent_tree(dn->inode, type)) in __update_extent_cache() 880 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + in __update_extent_cache() 881 dn->ofs_in_node; in __update_extent_cache() 885 if (dn->data_blkaddr == NEW_ADDR) in __update_extent_cache() 888 ei.blk = dn->data_blkaddr; in __update_extent_cache() 890 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr)) in __update_extent_cache() 893 __update_extent_tree_range(dn->inode, &ei, type); in __update_extent_cache() 992 void f2fs_update_read_extent_cache(struct dnode_of_data *dn) in f2fs_update_read_extent_cache() argument 994 return __update_extent_cache(dn, EX_READ); in f2fs_update_read_extent_cache() [all …]
|
D | f2fs.h | 972 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, in set_new_dnode() argument 975 memset(dn, 0, sizeof(*dn)); in set_new_dnode() 976 dn->inode = inode; in set_new_dnode() 977 dn->inode_page = ipage; in set_new_dnode() 978 dn->node_page = npage; in set_new_dnode() 979 dn->nid = nid; in set_new_dnode() 2758 static inline void f2fs_put_dnode(struct dnode_of_data *dn) in f2fs_put_dnode() argument 2760 if (dn->node_page) in f2fs_put_dnode() 2761 f2fs_put_page(dn->node_page, 1); in f2fs_put_dnode() 2762 if (dn->inode_page && dn->node_page != dn->inode_page) in f2fs_put_dnode() [all …]
|
D | gc.c | 1176 struct dnode_of_data dn; in ra_data_block() local 1196 &dn.data_blkaddr)) { in ra_data_block() 1197 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block() 1206 set_new_dnode(&dn, inode, NULL, NULL, 0); in ra_data_block() 1207 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); in ra_data_block() 1210 f2fs_put_dnode(&dn); in ra_data_block() 1212 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { in ra_data_block() 1216 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block() 1225 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; in ra_data_block() 1233 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); in ra_data_block() [all …]
|
D | segment.c | 216 struct dnode_of_data dn; in __replace_atomic_write_block() local 221 set_new_dnode(&dn, inode, NULL, NULL, 0); in __replace_atomic_write_block() 222 err = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); in __replace_atomic_write_block() 231 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __replace_atomic_write_block() 233 f2fs_put_dnode(&dn); in __replace_atomic_write_block() 242 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); in __replace_atomic_write_block() 243 f2fs_update_data_blkaddr(&dn, new_addr); in __replace_atomic_write_block() 245 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, in __replace_atomic_write_block() 253 f2fs_put_dnode(&dn); in __replace_atomic_write_block() 257 *old_addr = dn.data_blkaddr; in __replace_atomic_write_block() [all …]
|
D | xattr.c | 486 struct dnode_of_data dn; in write_all_xattrs() local 488 set_new_dnode(&dn, inode, NULL, NULL, new_nid); in write_all_xattrs() 489 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); in write_all_xattrs()
|
/fs/ceph/ |
D | inode.c | 1212 struct dentry *dn = *pdn; in splice_dentry() local 1215 BUG_ON(d_inode(dn)); in splice_dentry() 1239 if (!d_unhashed(dn)) in splice_dentry() 1240 d_drop(dn); in splice_dentry() 1241 realdn = d_splice_alias(in, dn); in splice_dentry() 1244 PTR_ERR(realdn), dn, in, ceph_vinop(in)); in splice_dentry() 1251 dn, d_count(dn), in splice_dentry() 1254 dput(dn); in splice_dentry() 1257 BUG_ON(!ceph_dentry(dn)); in splice_dentry() 1259 dn, d_inode(dn), ceph_vinop(d_inode(dn))); in splice_dentry() [all …]
|
D | export.c | 365 struct dentry *dn; in ceph_get_parent() local 372 dn = ERR_PTR(-EINVAL); in ceph_get_parent() 377 dn = ERR_CAST(dir); in ceph_get_parent() 388 dn = ERR_CAST(snapdir); in ceph_get_parent() 397 dn = d_obtain_root(dir); in ceph_get_parent() 399 dn = d_obtain_alias(dir); in ceph_get_parent() 401 dn = __get_parent(child->d_sb, child, 0); in ceph_get_parent() 405 child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn)); in ceph_get_parent() 406 return dn; in ceph_get_parent()
|
D | dir.c | 1346 struct dentry *dn = di->dentry; in __ceph_dentry_lease_touch() local 1349 dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn); in __ceph_dentry_lease_touch() 1357 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; in __ceph_dentry_lease_touch() 1380 struct dentry *dn = di->dentry; in __ceph_dentry_dir_lease_touch() local 1384 di, dn, dn, di->offset); in __ceph_dentry_dir_lease_touch() 1404 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; in __ceph_dentry_dir_lease_touch() 1991 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) in ceph_dentry_hash() argument 1999 return dn->d_name.hash; in ceph_dentry_hash() 2002 spin_lock(&dn->d_lock); in ceph_dentry_hash() 2004 dn->d_name.name, dn->d_name.len); in ceph_dentry_hash() [all …]
|
D | file.c | 687 struct dentry *dn; in ceph_finish_async_create() local 706 dn = d_splice_alias(inode, dentry); in ceph_finish_async_create() 707 WARN_ON_ONCE(dn && dn != dentry); in ceph_finish_async_create() 731 struct dentry *dn; in ceph_atomic_open() local 841 dn = ceph_finish_lookup(req, dentry, err); in ceph_atomic_open() 842 if (IS_ERR(dn)) in ceph_atomic_open() 843 err = PTR_ERR(dn); in ceph_atomic_open() 846 dn = NULL; in ceph_atomic_open() 850 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { in ceph_atomic_open() 852 dout("atomic_open finish_no_open on dn %p\n", dn); in ceph_atomic_open() [all …]
|
D | super.h | 1235 extern int ceph_encode_dentry_release(void **p, struct dentry *dn, 1296 extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
|
D | caps.c | 3281 struct dentry *dn, *prev = NULL; in invalidate_aliases() local 3294 while ((dn = d_find_alias(inode))) { in invalidate_aliases() 3295 if (dn == prev) { in invalidate_aliases() 3296 dput(dn); in invalidate_aliases() 3299 d_invalidate(dn); in invalidate_aliases() 3302 prev = dn; in invalidate_aliases()
|
/fs/ubifs/ |
D | crypto.c | 38 int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, in ubifs_encrypt() argument 42 void *p = &dn->data; in ubifs_encrypt() 47 dn->compr_size = cpu_to_le16(in_len); in ubifs_encrypt() 64 int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, in ubifs_decrypt() argument 69 unsigned int clen = le16_to_cpu(dn->compr_size); in ubifs_decrypt() 78 err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), in ubifs_decrypt() 79 dlen, offset_in_page(&dn->data), in ubifs_decrypt()
|
D | file.c | 46 struct ubifs_data_node *dn) in read_block() argument 54 err = ubifs_tnc_lookup(c, &key, dn); in read_block() 62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > in read_block() 64 len = le32_to_cpu(dn->size); in read_block() 68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; in read_block() 71 err = ubifs_decrypt(inode, dn, &dlen, block); in read_block() 77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, in read_block() 78 le16_to_cpu(dn->compr_type)); in read_block() 95 ubifs_dump_node(c, dn, UBIFS_MAX_DATA_NODE_SZ); in read_block() 104 struct ubifs_data_node *dn; in do_readpage() local [all …]
|
D | journal.c | 1481 unsigned int block, struct ubifs_data_node *dn, in truncate_data_node() argument 1487 out_len = le32_to_cpu(dn->size); in truncate_data_node() 1492 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; in truncate_data_node() 1494 compr_type = le16_to_cpu(dn->compr_type); in truncate_data_node() 1497 err = ubifs_decrypt(inode, dn, &dlen, block); in truncate_data_node() 1505 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); in truncate_data_node() 1509 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); in truncate_data_node() 1513 err = ubifs_encrypt(inode, dn, out_len, &data_size, block); in truncate_data_node() 1519 dn->compr_size = 0; in truncate_data_node() 1523 dn->compr_type = cpu_to_le16(compr_type); in truncate_data_node() [all …]
|
D | debug.c | 500 const struct ubifs_data_node *dn = node; in ubifs_dump_node() local 502 key_read(c, &dn->key, &key); in ubifs_dump_node() 505 pr_err("\tsize %u\n", le32_to_cpu(dn->size)); in ubifs_dump_node() 507 (int)le16_to_cpu(dn->compr_type)); in ubifs_dump_node() 513 (void *)&dn->data, in ubifs_dump_node() 2052 struct ubifs_data_node *dn = node; in check_leaf() local 2060 inum = key_inum_flash(c, &dn->key); in check_leaf() 2070 blk_offs = key_block_flash(c, &dn->key); in check_leaf() 2072 blk_offs += le32_to_cpu(dn->size); in check_leaf()
|
D | ubifs.h | 2121 struct ubifs_data_node *dn, in ubifs_encrypt() argument 2130 struct ubifs_data_node *dn, in ubifs_decrypt() argument 2139 int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, 2141 int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
|
D | replay.c | 752 struct ubifs_data_node *dn = snod->node; in replay_bud() local 753 loff_t new_size = le32_to_cpu(dn->size) + in replay_bud()
|
/fs/hpfs/ |
D | dnode.c | 797 struct quad_buffer_head *qbh, struct dnode **dn) in map_nth_dirent() argument 804 if (dn) *dn=dnode; in map_nth_dirent()
|