/fs/f2fs/ |
D | node.c | 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) in f2fs_check_nid_range() argument 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { in f2fs_check_nid_range() 38 __func__, nid); in f2fs_check_nid_range() 133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_current_nat_page() argument 135 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); in get_current_nat_page() 138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) in get_next_nat_page() argument 147 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); in get_next_nat_page() 150 src_page = get_current_nat_page(sbi, nid); in get_next_nat_page() 162 set_to_next_nat(nm_i, nid); in get_next_nat_page() 168 nid_t nid, bool no_fail) in __alloc_nat_entry() argument [all …]
|
D | node.h | 9 #define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK) argument 60 nid_t nid; /* node id */ member 72 #define nat_get_nid(nat) ((nat)->ni.nid) 73 #define nat_set_nid(nat, n) ((nat)->ni.nid = (n)) 86 dst->nid = src->nid; in copy_node_info() 163 nid_t nid; /* node id */ member 167 static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid) in next_free_nid() argument 178 *nid = fnid->nid; in next_free_nid() 249 return le32_to_cpu(rn->footer.nid); in nid_of_node() 271 static inline void fill_node_footer(struct page *page, nid_t nid, in fill_node_footer() argument [all …]
|
D | recovery.c | 475 nid_t ino, nid; in check_index_in_prev_nodes() local 503 nid = le32_to_cpu(sum.nid); in check_index_in_prev_nodes() 509 ofs_in_node, dn->inode->i_ino, nid, max_addrs); in check_index_in_prev_nodes() 514 if (dn->inode->i_ino == nid) { in check_index_in_prev_nodes() 515 tdn.nid = nid; in check_index_in_prev_nodes() 521 } else if (dn->nid == nid) { in check_index_in_prev_nodes() 527 node_page = f2fs_get_node_page(sbi, nid); in check_index_in_prev_nodes() 580 if (dn->inode->i_ino == nid && !dn->inode_page_locked) in check_index_in_prev_nodes() 630 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in do_recover_data() 743 fill_node_footer(dn.node_page, dn.nid, ni.ino, in do_recover_data()
|
D | gc.c | 1017 nid_t nid = le32_to_cpu(entry->nid); in gc_node_segment() local 1030 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_node_segment() 1036 f2fs_ra_node_page(sbi, nid); in gc_node_segment() 1041 node_page = f2fs_get_node_page(sbi, nid); in gc_node_segment() 1051 if (f2fs_get_node_info(sbi, nid, &ni, false)) { in gc_node_segment() 1108 nid_t nid; in is_alive() local 1112 nid = le32_to_cpu(sum->nid); in is_alive() 1115 node_page = f2fs_get_node_page(sbi, nid); in is_alive() 1119 if (f2fs_get_node_info(sbi, nid, dni, false)) { in is_alive() 1145 base, ofs_in_node, max_addrs, dni->ino, dni->nid); in is_alive() [all …]
|
D | f2fs.h | 431 #define nid_in_journal(jnl, i) ((jnl)->nat_j.entries[i].nid) 963 nid_t nid; /* node id of the direct node block */ member 973 struct page *ipage, struct page *npage, nid_t nid) in set_new_dnode() argument 979 dn->nid = nid; in set_new_dnode() 2841 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 3579 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid); 3585 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); 3586 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); 3588 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 3600 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); [all …]
|
D | segment.h | 877 static inline void set_summary(struct f2fs_summary *sum, nid_t nid, in set_summary() argument 880 sum->nid = cpu_to_le32(nid); in set_summary()
|
D | inline.c | 149 err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false); in f2fs_convert_inline_page()
|
D | data.c | 1235 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, in f2fs_reserve_new_blocks() 1484 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); in __allocate_data_block() 1495 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); in __allocate_data_block() 2807 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); in f2fs_do_write_data_page()
|
D | compress.c | 940 dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason); in f2fs_sanity_check_cluster() 1279 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); in f2fs_write_compressed_pages()
|
D | segment.c | 231 err = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __replace_atomic_write_block() 3509 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio) in f2fs_do_write_node_page() argument 3513 set_summary(&sum, nid, 0, 0); in f2fs_do_write_node_page() 3528 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version); in f2fs_outplace_write_data() 3698 set_summary(&sum, dn->nid, dn->ofs_in_node, version); in f2fs_replace_block()
|
D | file.c | 634 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid, in f2fs_truncate_data_blocks_range() 1284 ret = f2fs_get_node_info(sbi, dn.nid, &ni, false); in __clone_blkaddrs()
|
/fs/erofs/ |
D | inode.c | 30 __func__, vi->nid, *ofs, blkaddr); in erofs_read_inode() 35 vi->nid, PTR_ERR(kaddr)); in erofs_read_inode() 44 ifmt, vi->nid); in erofs_read_inode() 52 vi->datalayout, vi->nid); in erofs_read_inode() 77 vi->nid, PTR_ERR(kaddr)); in erofs_read_inode() 166 erofs_inode_version(ifmt), vi->nid); in erofs_read_inode() 175 vi->chunkformat, vi->nid); in erofs_read_inode() 202 inode->i_mode, vi->nid); in erofs_read_inode() 235 vi->nid); in erofs_fill_symlink() 323 const erofs_nid_t nid = *(erofs_nid_t *)opaque; in erofs_ilookup_test_actor() local [all …]
|
D | namei.c | 114 mid, EROFS_I(dir)->nid); in erofs_find_target_block() 160 int erofs_namei(struct inode *dir, const struct qstr *name, erofs_nid_t *nid, in erofs_namei() argument 184 *nid = le64_to_cpu(de->nid); in erofs_namei() 195 erofs_nid_t nid; in erofs_lookup() local 204 err = erofs_namei(dir, &dentry->d_name, &nid, &d_type); in erofs_lookup() 213 dentry, nid, d_type); in erofs_lookup() 214 inode = erofs_iget(dir->i_sb, nid); in erofs_lookup()
|
D | internal.h | 289 erofs_nid_t nid; member 334 (EROFS_I(inode)->nid << sbi->islotbits); in erofs_iloc() 470 static inline unsigned long erofs_inode_hash(erofs_nid_t nid) in erofs_inode_hash() argument 473 return (nid >> 32) ^ (nid & 0xffffffff); in erofs_inode_hash() 475 return nid; in erofs_inode_hash() 483 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid); 492 erofs_nid_t *nid, unsigned int *d_type);
|
D | dir.c | 50 EROFS_I(dir)->nid); in erofs_fill_dentries() 57 le64_to_cpu(de->nid), d_type)) in erofs_fill_dentries() 85 i, EROFS_I(dir)->nid); in erofs_readdir() 93 nameoff, EROFS_I(dir)->nid); in erofs_readdir()
|
D | zmap.c | 329 vi->nid); in z_erofs_extent_lookback() 344 m->type, lcn, vi->nid); in z_erofs_extent_lookback() 351 vi->nid); in z_erofs_extent_lookback() 417 lcn, vi->nid); in z_erofs_get_extent_compressedlen() 427 lcn, vi->nid); in z_erofs_get_extent_compressedlen() 464 m->type, lcn, vi->nid); in z_erofs_get_extent_decompressedlen() 526 vi->nid); in z_erofs_do_map_blocks() 543 m.type, ofs, vi->nid); in z_erofs_do_map_blocks() 585 afmt, vi->nid); in z_erofs_do_map_blocks() 661 headnr + 1, vi->z_algorithmtype[headnr], vi->nid); in z_erofs_fill_inode_lazy() [all …]
|
D | data.c | 110 vi->nid); in erofs_map_blocks_flatmode() 117 vi->nid, inode->i_size, map->m_la); in erofs_map_blocks_flatmode()
|
D | super.c | 570 erofs_nid_t nid; in erofs_get_parent() local 574 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type); in erofs_get_parent() 577 return d_obtain_alias(erofs_iget(child->d_sb, nid)); in erofs_get_parent()
|
D | erofs_fs.h | 410 __le64 nid; /* node number */ member
|
D | xattr.c | 56 vi->xattr_isize, vi->nid); in init_inode_xattrs() 61 erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid); in init_inode_xattrs()
|
D | zdata.c | 1822 index, EROFS_I(inode)->nid); in z_erofs_pcluster_readmore() 1890 page->index, EROFS_I(inode)->nid); in z_erofs_readahead()
|
/fs/proc/ |
D | task_mmu.c | 1825 int nid; in can_gather_numa_stats() local 1837 nid = page_to_nid(page); in can_gather_numa_stats() 1838 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats() 1850 int nid; in can_gather_numa_stats_pmd() local 1862 nid = page_to_nid(page); in can_gather_numa_stats_pmd() 1863 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats_pmd() 1952 int nid; in show_numa_map() local 2009 for_each_node_state(nid, N_MEMORY) in show_numa_map() 2010 if (md->node[nid]) in show_numa_map() 2011 seq_printf(m, " N%d=%lu", nid, md->node[nid]); in show_numa_map()
|
D | kcore.c | 236 int nid, ret; in kcore_ram_list() local 242 for_each_node_state(nid, N_MEMORY) { in kcore_ram_list() 244 node_end = node_end_pfn(nid); in kcore_ram_list()
|