Home
last modified time | relevance | path

Searched refs:dn (Results 1 – 25 of 26) sorted by relevance

12

/fs/notify/dnotify/
Ddnotify.c35 struct dnotify_struct *dn; member
49 struct dnotify_struct *dn; in dnotify_recalc_inode_mask() local
56 for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next) in dnotify_recalc_inode_mask()
57 new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT); in dnotify_recalc_inode_mask()
78 struct dnotify_struct *dn; in dnotify_handle_event() local
90 prev = &dn_mark->dn; in dnotify_handle_event()
91 while ((dn = *prev) != NULL) { in dnotify_handle_event()
92 if ((dn->dn_mask & test_mask) == 0) { in dnotify_handle_event()
93 prev = &dn->dn_next; in dnotify_handle_event()
96 fown = &dn->dn_filp->f_owner; in dnotify_handle_event()
[all …]
/fs/f2fs/
Dinline.c145 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) in f2fs_convert_inline_page() argument
148 .sbi = F2FS_I_SB(dn->inode), in f2fs_convert_inline_page()
149 .ino = dn->inode->i_ino, in f2fs_convert_inline_page()
160 if (!f2fs_exist_data(dn->inode)) in f2fs_convert_inline_page()
163 err = f2fs_reserve_block(dn, 0); in f2fs_convert_inline_page()
167 err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false); in f2fs_convert_inline_page()
169 f2fs_truncate_data_blocks_range(dn, 1); in f2fs_convert_inline_page()
170 f2fs_put_dnode(dn); in f2fs_convert_inline_page()
176 if (unlikely(dn->data_blkaddr != NEW_ADDR)) { in f2fs_convert_inline_page()
177 f2fs_put_dnode(dn); in f2fs_convert_inline_page()
[all …]
Drecovery.c443 block_t blkaddr, struct dnode_of_data *dn) in check_index_in_prev_nodes() argument
451 struct dnode_of_data tdn = *dn; in check_index_in_prev_nodes()
483 max_addrs = ADDRS_PER_PAGE(dn->node_page, dn->inode); in check_index_in_prev_nodes()
486 ofs_in_node, dn->inode->i_ino, nid, max_addrs); in check_index_in_prev_nodes()
490 if (dn->inode->i_ino == nid) { in check_index_in_prev_nodes()
492 if (!dn->inode_page_locked) in check_index_in_prev_nodes()
493 lock_page(dn->inode_page); in check_index_in_prev_nodes()
494 tdn.node_page = dn->inode_page; in check_index_in_prev_nodes()
497 } else if (dn->nid == nid) { in check_index_in_prev_nodes()
511 if (ino != dn->inode->i_ino) { in check_index_in_prev_nodes()
[all …]
Dfile.c58 struct dnode_of_data dn; in f2fs_vm_page_mkwrite() local
116 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_vm_page_mkwrite()
117 err = f2fs_get_block(&dn, page->index); in f2fs_vm_page_mkwrite()
123 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_vm_page_mkwrite()
124 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE); in f2fs_vm_page_mkwrite()
125 f2fs_put_dnode(&dn); in f2fs_vm_page_mkwrite()
136 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); in f2fs_vm_page_mkwrite()
408 struct dnode_of_data dn; in f2fs_seek_block() local
434 set_new_dnode(&dn, inode, NULL, NULL, 0); in f2fs_seek_block()
435 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE); in f2fs_seek_block()
[all …]
Dnode.c646 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) in f2fs_get_next_page_offset() argument
648 const long direct_index = ADDRS_PER_INODE(dn->inode); in f2fs_get_next_page_offset()
649 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset()
650 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; in f2fs_get_next_page_offset()
651 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); in f2fs_get_next_page_offset()
652 int cur_level = dn->cur_level; in f2fs_get_next_page_offset()
653 int max_level = dn->max_level; in f2fs_get_next_page_offset()
656 if (!dn->max_level) in f2fs_get_next_page_offset()
662 switch (dn->max_level) { in f2fs_get_next_page_offset()
673 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); in f2fs_get_next_page_offset()
[all …]
Ddata.c1074 static void __set_data_blkaddr(struct dnode_of_data *dn) in __set_data_blkaddr() argument
1076 struct f2fs_node *rn = F2FS_NODE(dn->node_page); in __set_data_blkaddr()
1080 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode)) in __set_data_blkaddr()
1081 base = get_extra_isize(dn->inode); in __set_data_blkaddr()
1085 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); in __set_data_blkaddr()
1094 void f2fs_set_data_blkaddr(struct dnode_of_data *dn) in f2fs_set_data_blkaddr() argument
1096 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true); in f2fs_set_data_blkaddr()
1097 __set_data_blkaddr(dn); in f2fs_set_data_blkaddr()
1098 if (set_page_dirty(dn->node_page)) in f2fs_set_data_blkaddr()
1099 dn->node_changed = true; in f2fs_set_data_blkaddr()
[all …]
Dcompress.c873 struct dnode_of_data dn; in __f2fs_cluster_blocks() local
879 set_new_dnode(&dn, inode, NULL, NULL, 0); in __f2fs_cluster_blocks()
880 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); in __f2fs_cluster_blocks()
887 if (dn.data_blkaddr == COMPRESS_ADDR) { in __f2fs_cluster_blocks()
894 blkaddr = data_blkaddr(dn.inode, in __f2fs_cluster_blocks()
895 dn.node_page, dn.ofs_in_node + i); in __f2fs_cluster_blocks()
910 f2fs_put_dnode(&dn); in __f2fs_cluster_blocks()
1155 struct dnode_of_data dn; in f2fs_write_compressed_pages() local
1180 set_new_dnode(&dn, cc->inode, NULL, NULL, 0); in f2fs_write_compressed_pages()
1182 err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE); in f2fs_write_compressed_pages()
[all …]
Dextent_cache.c938 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type) in __update_extent_cache() argument
942 if (!__may_extent_tree(dn->inode, type)) in __update_extent_cache()
945 ei.fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + in __update_extent_cache()
946 dn->ofs_in_node; in __update_extent_cache()
950 if (dn->data_blkaddr == NEW_ADDR) in __update_extent_cache()
953 ei.blk = dn->data_blkaddr; in __update_extent_cache()
955 if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr)) in __update_extent_cache()
958 __update_extent_tree_range(dn->inode, &ei, type); in __update_extent_cache()
1046 void f2fs_update_read_extent_cache(struct dnode_of_data *dn) in f2fs_update_read_extent_cache() argument
1048 return __update_extent_cache(dn, EX_READ); in f2fs_update_read_extent_cache()
[all …]
Dgc.c1123 struct dnode_of_data dn; in ra_data_block() local
1144 dn.data_blkaddr = ei.blk + index - ei.fofs; in ra_data_block()
1145 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1153 set_new_dnode(&dn, inode, NULL, NULL, 0); in ra_data_block()
1154 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); in ra_data_block()
1157 f2fs_put_dnode(&dn); in ra_data_block()
1159 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) { in ra_data_block()
1163 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, in ra_data_block()
1171 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; in ra_data_block()
1179 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); in ra_data_block()
[all …]
Df2fs.h952 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode, in set_new_dnode() argument
955 memset(dn, 0, sizeof(*dn)); in set_new_dnode()
956 dn->inode = inode; in set_new_dnode()
957 dn->inode_page = ipage; in set_new_dnode()
958 dn->node_page = npage; in set_new_dnode()
959 dn->nid = nid; in set_new_dnode()
2704 static inline void f2fs_put_dnode(struct dnode_of_data *dn) in f2fs_put_dnode() argument
2706 if (dn->node_page) in f2fs_put_dnode()
2707 f2fs_put_page(dn->node_page, 1); in f2fs_put_dnode()
2708 if (dn->inode_page && dn->node_page != dn->inode_page) in f2fs_put_dnode()
[all …]
Dsegment.c235 struct dnode_of_data dn; in __revoke_inmem_pages() local
240 set_new_dnode(&dn, inode, NULL, NULL, 0); in __revoke_inmem_pages()
241 err = f2fs_get_dnode_of_data(&dn, page->index, in __revoke_inmem_pages()
254 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __revoke_inmem_pages()
256 f2fs_put_dnode(&dn); in __revoke_inmem_pages()
261 f2fs_invalidate_blocks(sbi, dn.data_blkaddr); in __revoke_inmem_pages()
262 f2fs_update_data_blkaddr(&dn, NEW_ADDR); in __revoke_inmem_pages()
264 f2fs_replace_block(sbi, &dn, dn.data_blkaddr, in __revoke_inmem_pages()
266 f2fs_put_dnode(&dn); in __revoke_inmem_pages()
3570 void f2fs_outplace_write_data(struct dnode_of_data *dn, in f2fs_outplace_write_data() argument
[all …]
Dxattr.c488 struct dnode_of_data dn; in write_all_xattrs() local
490 set_new_dnode(&dn, inode, NULL, NULL, new_nid); in write_all_xattrs()
491 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); in write_all_xattrs()
/fs/ceph/
Dinode.c1174 struct dentry *dn = *pdn; in splice_dentry() local
1177 BUG_ON(d_inode(dn)); in splice_dentry()
1201 if (!d_unhashed(dn)) in splice_dentry()
1202 d_drop(dn); in splice_dentry()
1203 realdn = d_splice_alias(in, dn); in splice_dentry()
1206 PTR_ERR(realdn), dn, in, ceph_vinop(in)); in splice_dentry()
1213 dn, d_count(dn), in splice_dentry()
1216 dput(dn); in splice_dentry()
1219 BUG_ON(!ceph_dentry(dn)); in splice_dentry()
1221 dn, d_inode(dn), ceph_vinop(d_inode(dn))); in splice_dentry()
[all …]
Dexport.c353 struct dentry *dn; in ceph_get_parent() local
360 dn = ERR_PTR(-EINVAL); in ceph_get_parent()
365 dn = ERR_CAST(dir); in ceph_get_parent()
376 dn = ERR_CAST(snapdir); in ceph_get_parent()
385 dn = d_obtain_root(dir); in ceph_get_parent()
387 dn = d_obtain_alias(dir); in ceph_get_parent()
389 dn = __get_parent(child->d_sb, child, 0); in ceph_get_parent()
393 child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn)); in ceph_get_parent()
394 return dn; in ceph_get_parent()
Ddir.c1265 struct dentry *dn = di->dentry; in __ceph_dentry_lease_touch() local
1268 dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn); in __ceph_dentry_lease_touch()
1276 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; in __ceph_dentry_lease_touch()
1299 struct dentry *dn = di->dentry; in __ceph_dentry_dir_lease_touch() local
1303 di, dn, dn, di->offset); in __ceph_dentry_dir_lease_touch()
1323 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; in __ceph_dentry_dir_lease_touch()
1911 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) in ceph_dentry_hash() argument
1919 return dn->d_name.hash; in ceph_dentry_hash()
1922 spin_lock(&dn->d_lock); in ceph_dentry_hash()
1924 dn->d_name.name, dn->d_name.len); in ceph_dentry_hash()
[all …]
Dfile.c655 struct dentry *dn; in ceph_finish_async_create() local
674 dn = d_splice_alias(inode, dentry); in ceph_finish_async_create()
675 WARN_ON_ONCE(dn && dn != dentry); in ceph_finish_async_create()
693 struct dentry *dn; in ceph_atomic_open() local
787 dn = ceph_finish_lookup(req, dentry, err); in ceph_atomic_open()
788 if (IS_ERR(dn)) in ceph_atomic_open()
789 err = PTR_ERR(dn); in ceph_atomic_open()
792 dn = NULL; in ceph_atomic_open()
796 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) { in ceph_atomic_open()
798 dout("atomic_open finish_no_open on dn %p\n", dn); in ceph_atomic_open()
[all …]
Dsuper.h1149 extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
1200 extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn);
Dcaps.c3302 struct dentry *dn, *prev = NULL; in invalidate_aliases() local
3315 while ((dn = d_find_alias(inode))) { in invalidate_aliases()
3316 if (dn == prev) { in invalidate_aliases()
3317 dput(dn); in invalidate_aliases()
3320 d_invalidate(dn); in invalidate_aliases()
3323 prev = dn; in invalidate_aliases()
/fs/ubifs/
Dcrypto.c27 int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn, in ubifs_encrypt() argument
31 void *p = &dn->data; in ubifs_encrypt()
36 dn->compr_size = cpu_to_le16(in_len); in ubifs_encrypt()
53 int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn, in ubifs_decrypt() argument
58 unsigned int clen = le16_to_cpu(dn->compr_size); in ubifs_decrypt()
67 err = fscrypt_decrypt_block_inplace(inode, virt_to_page(&dn->data), in ubifs_decrypt()
68 dlen, offset_in_page(&dn->data), in ubifs_decrypt()
Dfile.c46 struct ubifs_data_node *dn) in read_block() argument
54 err = ubifs_tnc_lookup(c, &key, dn); in read_block()
62 ubifs_assert(c, le64_to_cpu(dn->ch.sqnum) > in read_block()
64 len = le32_to_cpu(dn->size); in read_block()
68 dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; in read_block()
71 err = ubifs_decrypt(inode, dn, &dlen, block); in read_block()
77 err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len, in read_block()
78 le16_to_cpu(dn->compr_type)); in read_block()
95 ubifs_dump_node(c, dn); in read_block()
104 struct ubifs_data_node *dn; in do_readpage() local
[all …]
Djournal.c1444 unsigned int block, struct ubifs_data_node *dn, in truncate_data_node() argument
1450 out_len = le32_to_cpu(dn->size); in truncate_data_node()
1455 dlen = old_dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ; in truncate_data_node()
1456 compr_type = le16_to_cpu(dn->compr_type); in truncate_data_node()
1459 err = ubifs_decrypt(inode, dn, &dlen, block); in truncate_data_node()
1467 err = ubifs_decompress(c, &dn->data, dlen, buf, &out_len, compr_type); in truncate_data_node()
1471 ubifs_compress(c, buf, *new_len, &dn->data, &out_len, &compr_type); in truncate_data_node()
1475 err = ubifs_encrypt(inode, dn, out_len, &old_dlen, block); in truncate_data_node()
1481 dn->compr_size = 0; in truncate_data_node()
1485 dn->compr_type = cpu_to_le16(compr_type); in truncate_data_node()
[all …]
Ddebug.c469 const struct ubifs_data_node *dn = node; in ubifs_dump_node() local
472 key_read(c, &dn->key, &key); in ubifs_dump_node()
475 pr_err("\tsize %u\n", le32_to_cpu(dn->size)); in ubifs_dump_node()
477 (int)le16_to_cpu(dn->compr_type)); in ubifs_dump_node()
481 (void *)&dn->data, dlen, 0); in ubifs_dump_node()
2031 struct ubifs_data_node *dn = node; in check_leaf() local
2039 inum = key_inum_flash(c, &dn->key); in check_leaf()
2049 blk_offs = key_block_flash(c, &dn->key); in check_leaf()
2051 blk_offs += le32_to_cpu(dn->size); in check_leaf()
Dubifs.h2080 struct ubifs_data_node *dn, in ubifs_encrypt() argument
2089 struct ubifs_data_node *dn, in ubifs_decrypt() argument
2098 int ubifs_encrypt(const struct inode *inode, struct ubifs_data_node *dn,
2100 int ubifs_decrypt(const struct inode *inode, struct ubifs_data_node *dn,
Dreplay.c752 struct ubifs_data_node *dn = snod->node; in replay_bud() local
753 loff_t new_size = le32_to_cpu(dn->size) + in replay_bud()
/fs/hpfs/
Ddnode.c797 struct quad_buffer_head *qbh, struct dnode **dn) in map_nth_dirent() argument
804 if (dn) *dn=dnode; in map_nth_dirent()

12