• Home
  • Raw
  • Download

Lines Matching refs:sbi

28 	struct f2fs_sb_info *sbi = data;  in gc_thread_func()  local
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in gc_thread_func()
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; in gc_thread_func()
51 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) { in gc_thread_func()
57 if (time_to_inject(sbi, FAULT_CHECKPOINT)) { in gc_thread_func()
59 f2fs_stop_checkpoint(sbi, false); in gc_thread_func()
63 if (!sb_start_write_trylock(sbi->sb)) in gc_thread_func()
81 mutex_lock(&sbi->gc_mutex); in gc_thread_func()
85 if (!mutex_trylock(&sbi->gc_mutex)) in gc_thread_func()
88 if (!is_idle(sbi)) { in gc_thread_func()
90 mutex_unlock(&sbi->gc_mutex); in gc_thread_func()
94 if (has_enough_invalid_blocks(sbi)) in gc_thread_func()
99 stat_inc_bggc_count(sbi); in gc_thread_func()
102 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO)) in gc_thread_func()
105 trace_f2fs_background_gc(sbi->sb, wait_ms, in gc_thread_func()
106 prefree_segments(sbi), free_segments(sbi)); in gc_thread_func()
109 f2fs_balance_fs_bg(sbi); in gc_thread_func()
111 sb_end_write(sbi->sb); in gc_thread_func()
117 int start_gc_thread(struct f2fs_sb_info *sbi) in start_gc_thread() argument
120 dev_t dev = sbi->sb->s_bdev->bd_dev; in start_gc_thread()
123 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL); in start_gc_thread()
138 sbi->gc_thread = gc_th; in start_gc_thread()
139 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head); in start_gc_thread()
140 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi, in start_gc_thread()
145 sbi->gc_thread = NULL; in start_gc_thread()
151 void stop_gc_thread(struct f2fs_sb_info *sbi) in stop_gc_thread() argument
153 struct f2fs_gc_kthread *gc_th = sbi->gc_thread; in stop_gc_thread()
158 sbi->gc_thread = NULL; in stop_gc_thread()
179 static void select_policy(struct f2fs_sb_info *sbi, int gc_type, in select_policy() argument
182 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in select_policy()
190 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type); in select_policy()
193 p->ofs_unit = sbi->segs_per_sec; in select_policy()
198 (sbi->gc_thread && !sbi->gc_thread->gc_urgent) && in select_policy()
199 p->max_search > sbi->max_victim_search) in select_policy()
200 p->max_search = sbi->max_victim_search; in select_policy()
203 if (test_opt(sbi, NOHEAP) && in select_policy()
207 p->offset = SIT_I(sbi)->last_victim[p->gc_mode]; in select_policy()
210 static unsigned int get_max_cost(struct f2fs_sb_info *sbi, in get_max_cost() argument
215 return sbi->blocks_per_seg; in get_max_cost()
217 return 2 * sbi->blocks_per_seg * p->ofs_unit; in get_max_cost()
224 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi) in check_bg_victims() argument
226 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in check_bg_victims()
234 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) { in check_bg_victims()
235 if (sec_usage_check(sbi, secno)) in check_bg_victims()
238 if (no_fggc_candidate(sbi, secno)) in check_bg_victims()
242 return GET_SEG_FROM_SEC(sbi, secno); in check_bg_victims()
247 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) in get_cb_cost() argument
249 struct sit_info *sit_i = SIT_I(sbi); in get_cb_cost()
250 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); in get_cb_cost()
251 unsigned int start = GET_SEG_FROM_SEC(sbi, secno); in get_cb_cost()
258 for (i = 0; i < sbi->segs_per_sec; i++) in get_cb_cost()
259 mtime += get_seg_entry(sbi, start + i)->mtime; in get_cb_cost()
260 vblocks = get_valid_blocks(sbi, segno, true); in get_cb_cost()
262 mtime = div_u64(mtime, sbi->segs_per_sec); in get_cb_cost()
263 vblocks = div_u64(vblocks, sbi->segs_per_sec); in get_cb_cost()
265 u = (vblocks * 100) >> sbi->log_blocks_per_seg; in get_cb_cost()
279 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi, in get_gc_cost() argument
283 return get_seg_entry(sbi, segno)->ckpt_valid_blocks; in get_gc_cost()
287 return get_valid_blocks(sbi, segno, true); in get_gc_cost()
289 return get_cb_cost(sbi, segno); in get_gc_cost()
312 static int get_victim_by_default(struct f2fs_sb_info *sbi, in get_victim_by_default() argument
315 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); in get_victim_by_default()
316 struct sit_info *sm = SIT_I(sbi); in get_victim_by_default()
319 unsigned int last_segment = MAIN_SEGS(sbi); in get_victim_by_default()
325 select_policy(sbi, gc_type, type, &p); in get_victim_by_default()
328 p.min_cost = get_max_cost(sbi, &p); in get_victim_by_default()
331 if (IS_DATASEG(get_seg_entry(sbi, *result)->type) && in get_victim_by_default()
332 get_valid_blocks(sbi, *result, false) && in get_victim_by_default()
333 !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) in get_victim_by_default()
343 p.min_segno = check_bg_victims(sbi); in get_victim_by_default()
374 secno = GET_SEC_FROM_SEG(sbi, segno); in get_victim_by_default()
376 if (sec_usage_check(sbi, secno)) in get_victim_by_default()
381 no_fggc_candidate(sbi, secno)) in get_victim_by_default()
384 cost = get_gc_cost(sbi, segno, &p); in get_victim_by_default()
396 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); in get_victim_by_default()
403 secno = GET_SEC_FROM_SEG(sbi, p.min_segno); in get_victim_by_default()
405 sbi->cur_victim_sec = secno; in get_victim_by_default()
411 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p, in get_victim_by_default()
412 sbi->cur_victim_sec, in get_victim_by_default()
413 prefree_segments(sbi), free_segments(sbi)); in get_victim_by_default()
461 static int check_valid_map(struct f2fs_sb_info *sbi, in check_valid_map() argument
464 struct sit_info *sit_i = SIT_I(sbi); in check_valid_map()
469 sentry = get_seg_entry(sbi, segno); in check_valid_map()
480 static void gc_node_segment(struct f2fs_sb_info *sbi, in gc_node_segment() argument
488 start_addr = START_BLOCK(sbi, segno); in gc_node_segment()
493 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_node_segment()
499 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) in gc_node_segment()
502 if (check_valid_map(sbi, segno, off) == 0) in gc_node_segment()
506 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_node_segment()
512 ra_node_page(sbi, nid); in gc_node_segment()
517 node_page = get_node_page(sbi, nid); in gc_node_segment()
522 if (check_valid_map(sbi, segno, off) == 0) { in gc_node_segment()
527 get_node_info(sbi, nid, &ni); in gc_node_segment()
534 stat_inc_node_blk_count(sbi, 1, gc_type); in gc_node_segment()
568 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in is_alive() argument
579 node_page = get_node_page(sbi, nid); in is_alive()
583 get_node_info(sbi, nid, dni); in is_alive()
586 f2fs_msg(sbi->sb, KERN_WARNING, in is_alive()
589 set_sbi_flag(sbi, SBI_NEED_FSCK); in is_alive()
609 .sbi = F2FS_I_SB(inode), in move_data_block()
657 get_node_info(fio.sbi, dn.nid, &ni); in move_data_block()
664 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, in move_data_block()
667 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), in move_data_block()
681 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) { in move_data_block()
693 dec_page_count(fio.sbi, F2FS_DIRTY_META); in move_data_block()
710 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE); in move_data_block()
720 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr, in move_data_block()
755 .sbi = F2FS_I_SB(inode), in move_data_page()
797 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, in gc_data_segment() argument
800 struct super_block *sb = sbi->sb; in gc_data_segment()
806 start_addr = START_BLOCK(sbi, segno); in gc_data_segment()
811 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) { in gc_data_segment()
820 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) in gc_data_segment()
823 if (check_valid_map(sbi, segno, off) == 0) in gc_data_segment()
827 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1, in gc_data_segment()
833 ra_node_page(sbi, nid); in gc_data_segment()
838 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs)) in gc_data_segment()
842 ra_node_page(sbi, dni.ino); in gc_data_segment()
913 stat_inc_data_blk_count(sbi, 1, gc_type); in gc_data_segment()
921 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim, in __get_victim() argument
924 struct sit_info *sit_i = SIT_I(sbi); in __get_victim()
928 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type, in __get_victim()
934 static int do_garbage_collect(struct f2fs_sb_info *sbi, in do_garbage_collect() argument
942 unsigned int end_segno = start_segno + sbi->segs_per_sec; in do_garbage_collect()
944 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ? in do_garbage_collect()
948 if (sbi->segs_per_sec > 1) in do_garbage_collect()
949 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), in do_garbage_collect()
950 sbi->segs_per_sec, META_SSA, true); in do_garbage_collect()
954 sum_page = get_sum_page(sbi, segno++); in do_garbage_collect()
963 sum_page = find_get_page(META_MAPPING(sbi), in do_garbage_collect()
964 GET_SUM_BLOCK(sbi, segno)); in do_garbage_collect()
967 if (get_valid_blocks(sbi, segno, false) == 0 || in do_garbage_collect()
969 unlikely(f2fs_cp_error(sbi))) in do_garbage_collect()
973 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer))); in do_garbage_collect()
983 gc_node_segment(sbi, sum->entries, segno, gc_type); in do_garbage_collect()
985 gc_data_segment(sbi, sum->entries, gc_list, segno, in do_garbage_collect()
988 stat_inc_seg_count(sbi, type, gc_type); in do_garbage_collect()
991 get_valid_blocks(sbi, segno, false) == 0) in do_garbage_collect()
998 f2fs_submit_merged_write(sbi, in do_garbage_collect()
1003 stat_inc_call_count(sbi->stat_info); in do_garbage_collect()
1008 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, in f2fs_gc() argument
1021 trace_f2fs_gc_begin(sbi->sb, sync, background, in f2fs_gc()
1022 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1023 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1024 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1025 free_sections(sbi), in f2fs_gc()
1026 free_segments(sbi), in f2fs_gc()
1027 reserved_segments(sbi), in f2fs_gc()
1028 prefree_segments(sbi)); in f2fs_gc()
1030 cpc.reason = __get_cp_reason(sbi); in f2fs_gc()
1032 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) { in f2fs_gc()
1036 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_gc()
1041 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) { in f2fs_gc()
1047 if (prefree_segments(sbi)) { in f2fs_gc()
1048 ret = write_checkpoint(sbi, &cpc); in f2fs_gc()
1052 if (has_not_enough_free_secs(sbi, 0, 0)) in f2fs_gc()
1061 if (!__get_victim(sbi, &segno, gc_type)) { in f2fs_gc()
1066 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); in f2fs_gc()
1067 if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec) in f2fs_gc()
1072 sbi->cur_victim_sec = NULL_SEGNO; in f2fs_gc()
1075 if (has_not_enough_free_secs(sbi, sec_freed, 0)) { in f2fs_gc()
1081 ret = write_checkpoint(sbi, &cpc); in f2fs_gc()
1084 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0; in f2fs_gc()
1085 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno; in f2fs_gc()
1087 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed, in f2fs_gc()
1088 get_pages(sbi, F2FS_DIRTY_NODES), in f2fs_gc()
1089 get_pages(sbi, F2FS_DIRTY_DENTS), in f2fs_gc()
1090 get_pages(sbi, F2FS_DIRTY_IMETA), in f2fs_gc()
1091 free_sections(sbi), in f2fs_gc()
1092 free_segments(sbi), in f2fs_gc()
1093 reserved_segments(sbi), in f2fs_gc()
1094 prefree_segments(sbi)); in f2fs_gc()
1096 mutex_unlock(&sbi->gc_mutex); in f2fs_gc()
1105 void build_gc_manager(struct f2fs_sb_info *sbi) in build_gc_manager() argument
1109 DIRTY_I(sbi)->v_ops = &default_v_ops; in build_gc_manager()
1112 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg; in build_gc_manager()
1113 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg; in build_gc_manager()
1114 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg; in build_gc_manager()
1116 sbi->fggc_threshold = div64_u64((main_count - ovp_count) * in build_gc_manager()
1117 BLKS_PER_SEC(sbi), (main_count - resv_count)); in build_gc_manager()
1118 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES; in build_gc_manager()
1121 if (sbi->s_ndevs && sbi->segs_per_sec == 1) in build_gc_manager()
1122 SIT_I(sbi)->last_victim[ALLOC_NEXT] = in build_gc_manager()
1123 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; in build_gc_manager()