• Home
  • Raw
  • Download

Lines Matching refs:nm_i

23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
47 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_available_free_memory() local
54 if (!nm_i) in f2fs_available_free_memory()
66 mem_size = (nm_i->nid_cnt[FREE_NID] * in f2fs_available_free_memory()
68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * in f2fs_available_free_memory()
72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); in f2fs_available_free_memory()
87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); in f2fs_available_free_memory()
97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); in f2fs_available_free_memory()
101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); in f2fs_available_free_memory()
145 struct f2fs_nm_info *nm_i = NM_I(sbi); in get_next_nat_page() local
162 set_to_next_nat(nm_i, nid); in get_next_nat_page()
187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, in __init_nat_entry() argument
191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); in __init_nat_entry()
192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) in __init_nat_entry()
198 spin_lock(&nm_i->nat_list_lock); in __init_nat_entry()
199 list_add_tail(&ne->list, &nm_i->nat_entries); in __init_nat_entry()
200 spin_unlock(&nm_i->nat_list_lock); in __init_nat_entry()
202 nm_i->nat_cnt[TOTAL_NAT]++; in __init_nat_entry()
203 nm_i->nat_cnt[RECLAIMABLE_NAT]++; in __init_nat_entry()
207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) in __lookup_nat_cache() argument
211 ne = radix_tree_lookup(&nm_i->nat_root, n); in __lookup_nat_cache()
215 spin_lock(&nm_i->nat_list_lock); in __lookup_nat_cache()
217 list_move_tail(&ne->list, &nm_i->nat_entries); in __lookup_nat_cache()
218 spin_unlock(&nm_i->nat_list_lock); in __lookup_nat_cache()
224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, in __gang_lookup_nat_cache() argument
227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); in __gang_lookup_nat_cache()
230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) in __del_from_nat_cache() argument
232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); in __del_from_nat_cache()
233 nm_i->nat_cnt[TOTAL_NAT]--; in __del_from_nat_cache()
234 nm_i->nat_cnt[RECLAIMABLE_NAT]--; in __del_from_nat_cache()
238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, in __grab_nat_entry_set() argument
244 head = radix_tree_lookup(&nm_i->nat_set_root, set); in __grab_nat_entry_set()
253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); in __grab_nat_entry_set()
258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, in __set_nat_cache_dirty() argument
265 head = __grab_nat_entry_set(nm_i, ne); in __set_nat_cache_dirty()
281 nm_i->nat_cnt[DIRTY_NAT]++; in __set_nat_cache_dirty()
282 nm_i->nat_cnt[RECLAIMABLE_NAT]--; in __set_nat_cache_dirty()
285 spin_lock(&nm_i->nat_list_lock); in __set_nat_cache_dirty()
290 spin_unlock(&nm_i->nat_list_lock); in __set_nat_cache_dirty()
293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, in __clear_nat_cache_dirty() argument
296 spin_lock(&nm_i->nat_list_lock); in __clear_nat_cache_dirty()
297 list_move_tail(&ne->list, &nm_i->nat_entries); in __clear_nat_cache_dirty()
298 spin_unlock(&nm_i->nat_list_lock); in __clear_nat_cache_dirty()
302 nm_i->nat_cnt[DIRTY_NAT]--; in __clear_nat_cache_dirty()
303 nm_i->nat_cnt[RECLAIMABLE_NAT]++; in __clear_nat_cache_dirty()
306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, in __gang_lookup_nat_set() argument
309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, in __gang_lookup_nat_set()
382 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_dentry_mark() local
386 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_need_dentry_mark()
387 e = __lookup_nat_cache(nm_i, nid); in f2fs_need_dentry_mark()
393 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_need_dentry_mark()
399 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_is_checkpointed_node() local
403 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_is_checkpointed_node()
404 e = __lookup_nat_cache(nm_i, nid); in f2fs_is_checkpointed_node()
407 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_is_checkpointed_node()
413 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_need_inode_block_update() local
417 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_need_inode_block_update()
418 e = __lookup_nat_cache(nm_i, ino); in f2fs_need_inode_block_update()
423 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_need_inode_block_update()
431 struct f2fs_nm_info *nm_i = NM_I(sbi); in cache_nat_entry() local
442 f2fs_down_write(&nm_i->nat_tree_lock); in cache_nat_entry()
443 e = __lookup_nat_cache(nm_i, nid); in cache_nat_entry()
445 e = __init_nat_entry(nm_i, new, ne, false); in cache_nat_entry()
451 f2fs_up_write(&nm_i->nat_tree_lock); in cache_nat_entry()
459 struct f2fs_nm_info *nm_i = NM_I(sbi); in set_node_addr() local
463 f2fs_down_write(&nm_i->nat_tree_lock); in set_node_addr()
464 e = __lookup_nat_cache(nm_i, ni->nid); in set_node_addr()
466 e = __init_nat_entry(nm_i, new, NULL, true); in set_node_addr()
502 __set_nat_cache_dirty(nm_i, e); in set_node_addr()
506 e = __lookup_nat_cache(nm_i, ni->ino); in set_node_addr()
512 f2fs_up_write(&nm_i->nat_tree_lock); in set_node_addr()
517 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nats() local
520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) in f2fs_try_to_free_nats()
523 spin_lock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
527 if (list_empty(&nm_i->nat_entries)) in f2fs_try_to_free_nats()
530 ne = list_first_entry(&nm_i->nat_entries, in f2fs_try_to_free_nats()
533 spin_unlock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
535 __del_from_nat_cache(nm_i, ne); in f2fs_try_to_free_nats()
538 spin_lock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
540 spin_unlock(&nm_i->nat_list_lock); in f2fs_try_to_free_nats()
542 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_try_to_free_nats()
549 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_get_node_info() local
564 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
565 e = __lookup_nat_cache(nm_i, nid); in f2fs_get_node_info()
570 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || in f2fs_get_node_info()
584 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
595 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
601 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_get_node_info()
2175 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, in __lookup_free_nid_list() argument
2178 return radix_tree_lookup(&nm_i->free_nid_root, n); in __lookup_free_nid_list()
2184 struct f2fs_nm_info *nm_i = NM_I(sbi); in __insert_free_nid() local
2185 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); in __insert_free_nid()
2190 nm_i->nid_cnt[FREE_NID]++; in __insert_free_nid()
2191 list_add_tail(&i->list, &nm_i->free_nid_list); in __insert_free_nid()
2198 struct f2fs_nm_info *nm_i = NM_I(sbi); in __remove_free_nid() local
2201 nm_i->nid_cnt[state]--; in __remove_free_nid()
2204 radix_tree_delete(&nm_i->free_nid_root, i->nid); in __remove_free_nid()
2210 struct f2fs_nm_info *nm_i = NM_I(sbi); in __move_free_nid() local
2214 nm_i->nid_cnt[org_state]--; in __move_free_nid()
2215 nm_i->nid_cnt[dst_state]++; in __move_free_nid()
2222 list_add_tail(&i->list, &nm_i->free_nid_list); in __move_free_nid()
2231 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_nat_bitmap_enabled() local
2235 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_nat_bitmap_enabled()
2236 for (i = 0; i < nm_i->nat_blocks; i++) { in f2fs_nat_bitmap_enabled()
2237 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { in f2fs_nat_bitmap_enabled()
2242 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_nat_bitmap_enabled()
2250 struct f2fs_nm_info *nm_i = NM_I(sbi); in update_free_nid_bitmap() local
2254 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) in update_free_nid_bitmap()
2258 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) in update_free_nid_bitmap()
2260 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); in update_free_nid_bitmap()
2261 nm_i->free_nid_count[nat_ofs]++; in update_free_nid_bitmap()
2263 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) in update_free_nid_bitmap()
2265 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); in update_free_nid_bitmap()
2267 nm_i->free_nid_count[nat_ofs]--; in update_free_nid_bitmap()
2275 struct f2fs_nm_info *nm_i = NM_I(sbi); in add_free_nid() local
2294 spin_lock(&nm_i->nid_list_lock); in add_free_nid()
2318 ne = __lookup_nat_cache(nm_i, nid); in add_free_nid()
2323 e = __lookup_free_nid_list(nm_i, nid); in add_free_nid()
2336 nm_i->available_nids++; in add_free_nid()
2338 spin_unlock(&nm_i->nid_list_lock); in add_free_nid()
2348 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_free_nid() local
2352 spin_lock(&nm_i->nid_list_lock); in remove_free_nid()
2353 i = __lookup_free_nid_list(nm_i, nid); in remove_free_nid()
2358 spin_unlock(&nm_i->nid_list_lock); in remove_free_nid()
2367 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_nat_page() local
2373 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); in scan_nat_page()
2378 if (unlikely(start_nid >= nm_i->max_nid)) in scan_nat_page()
2421 struct f2fs_nm_info *nm_i = NM_I(sbi); in scan_free_nid_bits() local
2425 f2fs_down_read(&nm_i->nat_tree_lock); in scan_free_nid_bits()
2427 for (i = 0; i < nm_i->nat_blocks; i++) { in scan_free_nid_bits()
2428 if (!test_bit_le(i, nm_i->nat_block_bitmap)) in scan_free_nid_bits()
2430 if (!nm_i->free_nid_count[i]) in scan_free_nid_bits()
2433 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], in scan_free_nid_bits()
2441 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) in scan_free_nid_bits()
2448 f2fs_up_read(&nm_i->nat_tree_lock); in scan_free_nid_bits()
2454 struct f2fs_nm_info *nm_i = NM_I(sbi); in __f2fs_build_free_nids() local
2456 nid_t nid = nm_i->next_scan_nid; in __f2fs_build_free_nids()
2458 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2465 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) in __f2fs_build_free_nids()
2475 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) in __f2fs_build_free_nids()
2483 f2fs_down_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2487 nm_i->nat_block_bitmap)) { in __f2fs_build_free_nids()
2498 f2fs_up_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2505 if (unlikely(nid >= nm_i->max_nid)) in __f2fs_build_free_nids()
2513 nm_i->next_scan_nid = nid; in __f2fs_build_free_nids()
2518 f2fs_up_read(&nm_i->nat_tree_lock); in __f2fs_build_free_nids()
2520 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), in __f2fs_build_free_nids()
2521 nm_i->ra_nid_pages, META_NAT, false); in __f2fs_build_free_nids()
2544 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid() local
2550 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2552 if (unlikely(nm_i->available_nids == 0)) { in f2fs_alloc_nid()
2553 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2558 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { in f2fs_alloc_nid()
2559 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); in f2fs_alloc_nid()
2560 i = list_first_entry(&nm_i->free_nid_list, in f2fs_alloc_nid()
2565 nm_i->available_nids--; in f2fs_alloc_nid()
2569 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2572 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid()
2585 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_done() local
2588 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid_done()
2589 i = __lookup_free_nid_list(nm_i, nid); in f2fs_alloc_nid_done()
2592 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid_done()
2602 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_alloc_nid_failed() local
2609 spin_lock(&nm_i->nid_list_lock); in f2fs_alloc_nid_failed()
2610 i = __lookup_free_nid_list(nm_i, nid); in f2fs_alloc_nid_failed()
2620 nm_i->available_nids++; in f2fs_alloc_nid_failed()
2624 spin_unlock(&nm_i->nid_list_lock); in f2fs_alloc_nid_failed()
2632 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_try_to_free_nids() local
2635 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) in f2fs_try_to_free_nids()
2638 if (!mutex_trylock(&nm_i->build_lock)) in f2fs_try_to_free_nids()
2641 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { in f2fs_try_to_free_nids()
2645 spin_lock(&nm_i->nid_list_lock); in f2fs_try_to_free_nids()
2646 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { in f2fs_try_to_free_nids()
2648 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) in f2fs_try_to_free_nids()
2655 spin_unlock(&nm_i->nid_list_lock); in f2fs_try_to_free_nids()
2658 mutex_unlock(&nm_i->build_lock); in f2fs_try_to_free_nids()
2860 struct f2fs_nm_info *nm_i = NM_I(sbi); in remove_nats_in_journal() local
2876 ne = __lookup_nat_cache(nm_i, nid); in remove_nats_in_journal()
2879 __init_nat_entry(nm_i, ne, &raw_ne, true); in remove_nats_in_journal()
2889 spin_lock(&nm_i->nid_list_lock); in remove_nats_in_journal()
2890 nm_i->available_nids--; in remove_nats_in_journal()
2891 spin_unlock(&nm_i->nid_list_lock); in remove_nats_in_journal()
2894 __set_nat_cache_dirty(nm_i, ne); in remove_nats_in_journal()
2918 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, in __update_nat_bits() argument
2922 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); in __update_nat_bits()
2923 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2927 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); in __update_nat_bits()
2929 __set_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2931 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); in __update_nat_bits()
2937 struct f2fs_nm_info *nm_i = NM_I(sbi); in update_nat_bits() local
2955 __update_nat_bits(nm_i, nat_index, valid); in update_nat_bits()
2960 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_enable_nat_bits() local
2963 f2fs_down_read(&nm_i->nat_tree_lock); in f2fs_enable_nat_bits()
2965 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { in f2fs_enable_nat_bits()
2976 nm_i->free_nid_bitmap[nat_ofs])) in f2fs_enable_nat_bits()
2980 __update_nat_bits(nm_i, nat_ofs, valid); in f2fs_enable_nat_bits()
2983 f2fs_up_read(&nm_i->nat_tree_lock); in f2fs_enable_nat_bits()
3066 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_flush_nat_entries() local
3081 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3083 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3086 if (!nm_i->nat_cnt[DIRTY_NAT]) in f2fs_flush_nat_entries()
3089 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3098 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) in f2fs_flush_nat_entries()
3101 while ((found = __gang_lookup_nat_set(nm_i, in f2fs_flush_nat_entries()
3118 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_flush_nat_entries()
3127 struct f2fs_nm_info *nm_i = NM_I(sbi); in __get_nat_bitmaps() local
3128 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; in __get_nat_bitmaps()
3133 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); in __get_nat_bitmaps()
3134 nm_i->nat_bits = f2fs_kvzalloc(sbi, in __get_nat_bitmaps()
3135 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL); in __get_nat_bitmaps()
3136 if (!nm_i->nat_bits) in __get_nat_bitmaps()
3139 nm_i->full_nat_bits = nm_i->nat_bits + 8; in __get_nat_bitmaps()
3140 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; in __get_nat_bitmaps()
3146 nm_i->nat_bits_blocks; in __get_nat_bitmaps()
3147 for (i = 0; i < nm_i->nat_bits_blocks; i++) { in __get_nat_bitmaps()
3154 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS), in __get_nat_bitmaps()
3160 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { in __get_nat_bitmaps()
3163 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); in __get_nat_bitmaps()
3173 struct f2fs_nm_info *nm_i = NM_I(sbi); in load_free_nid_bitmap() local
3180 for (i = 0; i < nm_i->nat_blocks; i++) { in load_free_nid_bitmap()
3181 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); in load_free_nid_bitmap()
3182 if (i >= nm_i->nat_blocks) in load_free_nid_bitmap()
3185 __set_bit_le(i, nm_i->nat_block_bitmap); in load_free_nid_bitmap()
3196 for (i = 0; i < nm_i->nat_blocks; i++) { in load_free_nid_bitmap()
3197 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); in load_free_nid_bitmap()
3198 if (i >= nm_i->nat_blocks) in load_free_nid_bitmap()
3201 __set_bit_le(i, nm_i->nat_block_bitmap); in load_free_nid_bitmap()
3208 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_node_manager() local
3213 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); in init_node_manager()
3217 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); in init_node_manager()
3218 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; in init_node_manager()
3221 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - in init_node_manager()
3223 nm_i->nid_cnt[FREE_NID] = 0; in init_node_manager()
3224 nm_i->nid_cnt[PREALLOC_NID] = 0; in init_node_manager()
3225 nm_i->ram_thresh = DEF_RAM_THRESHOLD; in init_node_manager()
3226 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; in init_node_manager()
3227 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; in init_node_manager()
3228 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; in init_node_manager()
3230 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); in init_node_manager()
3231 INIT_LIST_HEAD(&nm_i->free_nid_list); in init_node_manager()
3232 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); in init_node_manager()
3233 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); in init_node_manager()
3234 INIT_LIST_HEAD(&nm_i->nat_entries); in init_node_manager()
3235 spin_lock_init(&nm_i->nat_list_lock); in init_node_manager()
3237 mutex_init(&nm_i->build_lock); in init_node_manager()
3238 spin_lock_init(&nm_i->nid_list_lock); in init_node_manager()
3239 init_f2fs_rwsem(&nm_i->nat_tree_lock); in init_node_manager()
3241 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); in init_node_manager()
3242 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); in init_node_manager()
3244 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, in init_node_manager()
3246 if (!nm_i->nat_bitmap) in init_node_manager()
3254 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, in init_node_manager()
3256 if (!nm_i->nat_bitmap_mir) in init_node_manager()
3265 struct f2fs_nm_info *nm_i = NM_I(sbi); in init_free_nid_cache() local
3268 nm_i->free_nid_bitmap = in init_free_nid_cache()
3270 nm_i->nat_blocks), in init_free_nid_cache()
3272 if (!nm_i->free_nid_bitmap) in init_free_nid_cache()
3275 for (i = 0; i < nm_i->nat_blocks; i++) { in init_free_nid_cache()
3276 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, in init_free_nid_cache()
3278 if (!nm_i->free_nid_bitmap[i]) in init_free_nid_cache()
3282 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, in init_free_nid_cache()
3284 if (!nm_i->nat_block_bitmap) in init_free_nid_cache()
3287 nm_i->free_nid_count = in init_free_nid_cache()
3289 nm_i->nat_blocks), in init_free_nid_cache()
3291 if (!nm_i->free_nid_count) in init_free_nid_cache()
3321 struct f2fs_nm_info *nm_i = NM_I(sbi); in f2fs_destroy_node_manager() local
3328 if (!nm_i) in f2fs_destroy_node_manager()
3332 spin_lock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3333 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { in f2fs_destroy_node_manager()
3335 spin_unlock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3337 spin_lock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3339 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); in f2fs_destroy_node_manager()
3340 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); in f2fs_destroy_node_manager()
3341 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); in f2fs_destroy_node_manager()
3342 spin_unlock(&nm_i->nid_list_lock); in f2fs_destroy_node_manager()
3345 f2fs_down_write(&nm_i->nat_tree_lock); in f2fs_destroy_node_manager()
3346 while ((found = __gang_lookup_nat_cache(nm_i, in f2fs_destroy_node_manager()
3352 spin_lock(&nm_i->nat_list_lock); in f2fs_destroy_node_manager()
3354 spin_unlock(&nm_i->nat_list_lock); in f2fs_destroy_node_manager()
3356 __del_from_nat_cache(nm_i, natvec[idx]); in f2fs_destroy_node_manager()
3359 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); in f2fs_destroy_node_manager()
3363 while ((found = __gang_lookup_nat_set(nm_i, in f2fs_destroy_node_manager()
3371 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); in f2fs_destroy_node_manager()
3375 f2fs_up_write(&nm_i->nat_tree_lock); in f2fs_destroy_node_manager()
3377 kvfree(nm_i->nat_block_bitmap); in f2fs_destroy_node_manager()
3378 if (nm_i->free_nid_bitmap) { in f2fs_destroy_node_manager()
3381 for (i = 0; i < nm_i->nat_blocks; i++) in f2fs_destroy_node_manager()
3382 kvfree(nm_i->free_nid_bitmap[i]); in f2fs_destroy_node_manager()
3383 kvfree(nm_i->free_nid_bitmap); in f2fs_destroy_node_manager()
3385 kvfree(nm_i->free_nid_count); in f2fs_destroy_node_manager()
3387 kvfree(nm_i->nat_bitmap); in f2fs_destroy_node_manager()
3388 kvfree(nm_i->nat_bits); in f2fs_destroy_node_manager()
3390 kvfree(nm_i->nat_bitmap_mir); in f2fs_destroy_node_manager()
3393 kfree(nm_i); in f2fs_destroy_node_manager()