Searched refs:NAT_ENTRY_PER_BLOCK (Results 1 – 6 of 6) sorted by relevance
48 blocks_for_nat = ALIGN(total_valid_blks_available, NAT_ENTRY_PER_BLOCK); in get_new_sb()264 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; in shrink_nats()266 for (nid = nm_i->max_nid - 1; nid > new_max_nid; nid -= NAT_ENTRY_PER_BLOCK) { in shrink_nats()267 block_off = nid / NAT_ENTRY_PER_BLOCK; in shrink_nats()309 for (nid = nm_i->max_nid - 1; nid >= 0; nid -= NAT_ENTRY_PER_BLOCK) { in migrate_nat()310 block_off = nid / NAT_ENTRY_PER_BLOCK; in migrate_nat()334 new_max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; in migrate_nat()342 nid += NAT_ENTRY_PER_BLOCK) { in migrate_nat()343 block_off = nid / NAT_ENTRY_PER_BLOCK; in migrate_nat()
491 sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; in init_sb_info()738 if (!(nid % NAT_ENTRY_PER_BLOCK)) { in f2fs_init_nid_bitmap()746 if (nat_block.entries[nid % NAT_ENTRY_PER_BLOCK].block_addr) in f2fs_init_nid_bitmap()820 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) { in write_nat_bits()827 else if (valid == NAT_ENTRY_PER_BLOCK) in write_nat_bits()860 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; in init_node_manager()1330 entry_off = nid % NAT_ENTRY_PER_BLOCK; in get_nat_entry()1402 entry_off = nid % NAT_ENTRY_PER_BLOCK; in update_nat_blkaddr()1634 entry_off = nid % NAT_ENTRY_PER_BLOCK; in flush_nat_journal_entries()1855 entry_off = nid % NAT_ENTRY_PER_BLOCK; in nullify_nat_entry()[all …]
65 nid = block_off * NAT_ENTRY_PER_BLOCK; in nat_dump()66 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) { in nat_dump()
361 return (nid <= (NAT_ENTRY_PER_BLOCK * in IS_VALID_NID()
696 #define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) macro697 #define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)706 struct f2fs_nat_entry entries[NAT_ENTRY_PER_BLOCK];
261 NAT_ENTRY_PER_BLOCK); in f2fs_prepare_super_block()