1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/backing-dev.h>
12 #include <linux/init.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/kthread.h>
15 #include <linux/delay.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
18 #include <linux/random.h>
19
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include "iostat.h"
25 #include <trace/events/f2fs.h>
26
27 static struct kmem_cache *victim_entry_slab;
28
29 static unsigned int count_bits(const unsigned long *addr,
30 unsigned int offset, unsigned int len);
31
gc_thread_func(void * data)32 static int gc_thread_func(void *data)
33 {
34 struct f2fs_sb_info *sbi = data;
35 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
36 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
37 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
38 unsigned int wait_ms;
39
40 wait_ms = gc_th->min_sleep_time;
41
42 set_freezable();
43 do {
44 bool sync_mode, foreground = false;
45
46 wait_event_interruptible_timeout(*wq,
47 kthread_should_stop() || freezing(current) ||
48 waitqueue_active(fggc_wq) ||
49 gc_th->gc_wake,
50 msecs_to_jiffies(wait_ms));
51
52 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
53 foreground = true;
54
55 /* give it a try one time */
56 if (gc_th->gc_wake)
57 gc_th->gc_wake = 0;
58
59 if (try_to_freeze()) {
60 stat_other_skip_bggc_count(sbi);
61 continue;
62 }
63 if (kthread_should_stop())
64 break;
65
66 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
67 increase_sleep_time(gc_th, &wait_ms);
68 stat_other_skip_bggc_count(sbi);
69 continue;
70 }
71
72 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
73 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
74 f2fs_stop_checkpoint(sbi, false,
75 STOP_CP_REASON_FAULT_INJECT);
76 }
77
78 if (!sb_start_write_trylock(sbi->sb)) {
79 stat_other_skip_bggc_count(sbi);
80 continue;
81 }
82
83 /*
84 * [GC triggering condition]
85 * 0. GC is not conducted currently.
86 * 1. There are enough dirty segments.
87 * 2. IO subsystem is idle by checking the # of writeback pages.
88 * 3. IO subsystem is idle by checking the # of requests in
89 * bdev's request list.
90 *
91 * Note) We have to avoid triggering GCs frequently.
92 * Because it is possible that some segments can be
93 * invalidated soon after by user update or deletion.
94 * So, I'd like to wait some time to collect dirty segments.
95 */
96 if (sbi->gc_mode == GC_URGENT_HIGH ||
97 sbi->gc_mode == GC_URGENT_MID) {
98 wait_ms = gc_th->urgent_sleep_time;
99 f2fs_down_write(&sbi->gc_lock);
100 goto do_gc;
101 }
102
103 if (foreground) {
104 f2fs_down_write(&sbi->gc_lock);
105 goto do_gc;
106 } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
107 stat_other_skip_bggc_count(sbi);
108 goto next;
109 }
110
111 if (!is_idle(sbi, GC_TIME)) {
112 increase_sleep_time(gc_th, &wait_ms);
113 f2fs_up_write(&sbi->gc_lock);
114 stat_io_skip_bggc_count(sbi);
115 goto next;
116 }
117
118 if (has_enough_invalid_blocks(sbi))
119 decrease_sleep_time(gc_th, &wait_ms);
120 else
121 increase_sleep_time(gc_th, &wait_ms);
122 do_gc:
123 if (!foreground)
124 stat_inc_bggc_count(sbi->stat_info);
125
126 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
127
128 /* foreground GC was been triggered via f2fs_balance_fs() */
129 if (foreground)
130 sync_mode = false;
131
132 /* if return value is not zero, no victim was selected */
133 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
134 wait_ms = gc_th->no_gc_sleep_time;
135
136 if (foreground)
137 wake_up_all(&gc_th->fggc_wq);
138
139 trace_f2fs_background_gc(sbi->sb, wait_ms,
140 prefree_segments(sbi), free_segments(sbi));
141
142 /* balancing f2fs's metadata periodically */
143 f2fs_balance_fs_bg(sbi, true);
144 next:
145 if (sbi->gc_mode == GC_URGENT_HIGH) {
146 spin_lock(&sbi->gc_urgent_high_lock);
147 if (sbi->gc_urgent_high_remaining) {
148 sbi->gc_urgent_high_remaining--;
149 if (!sbi->gc_urgent_high_remaining)
150 sbi->gc_mode = GC_NORMAL;
151 }
152 spin_unlock(&sbi->gc_urgent_high_lock);
153 }
154 sb_end_write(sbi->sb);
155
156 } while (!kthread_should_stop());
157 return 0;
158 }
159
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)160 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
161 {
162 struct f2fs_gc_kthread *gc_th;
163 dev_t dev = sbi->sb->s_bdev->bd_dev;
164 int err = 0;
165
166 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
167 if (!gc_th) {
168 err = -ENOMEM;
169 goto out;
170 }
171
172 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
173 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
174 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
175 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
176
177 gc_th->gc_wake = 0;
178
179 sbi->gc_thread = gc_th;
180 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
181 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
182 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
183 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
184 if (IS_ERR(gc_th->f2fs_gc_task)) {
185 err = PTR_ERR(gc_th->f2fs_gc_task);
186 kfree(gc_th);
187 sbi->gc_thread = NULL;
188 }
189 out:
190 return err;
191 }
192
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)193 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
194 {
195 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
196
197 if (!gc_th)
198 return;
199 kthread_stop(gc_th->f2fs_gc_task);
200 wake_up_all(&gc_th->fggc_wq);
201 kfree(gc_th);
202 sbi->gc_thread = NULL;
203 }
204
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)205 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
206 {
207 int gc_mode;
208
209 if (gc_type == BG_GC) {
210 if (sbi->am.atgc_enabled)
211 gc_mode = GC_AT;
212 else
213 gc_mode = GC_CB;
214 } else {
215 gc_mode = GC_GREEDY;
216 }
217
218 switch (sbi->gc_mode) {
219 case GC_IDLE_CB:
220 gc_mode = GC_CB;
221 break;
222 case GC_IDLE_GREEDY:
223 case GC_URGENT_HIGH:
224 gc_mode = GC_GREEDY;
225 break;
226 case GC_IDLE_AT:
227 gc_mode = GC_AT;
228 break;
229 }
230
231 return gc_mode;
232 }
233
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)234 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
235 int type, struct victim_sel_policy *p)
236 {
237 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
238
239 if (p->alloc_mode == SSR) {
240 p->gc_mode = GC_GREEDY;
241 p->dirty_bitmap = dirty_i->dirty_segmap[type];
242 p->max_search = dirty_i->nr_dirty[type];
243 p->ofs_unit = 1;
244 } else if (p->alloc_mode == AT_SSR) {
245 p->gc_mode = GC_GREEDY;
246 p->dirty_bitmap = dirty_i->dirty_segmap[type];
247 p->max_search = dirty_i->nr_dirty[type];
248 p->ofs_unit = 1;
249 } else {
250 p->gc_mode = select_gc_type(sbi, gc_type);
251 p->ofs_unit = sbi->segs_per_sec;
252 if (__is_large_section(sbi)) {
253 p->dirty_bitmap = dirty_i->dirty_secmap;
254 p->max_search = count_bits(p->dirty_bitmap,
255 0, MAIN_SECS(sbi));
256 } else {
257 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
258 p->max_search = dirty_i->nr_dirty[DIRTY];
259 }
260 }
261
262 /*
263 * adjust candidates range, should select all dirty segments for
264 * foreground GC and urgent GC cases.
265 */
266 if (gc_type != FG_GC &&
267 (sbi->gc_mode != GC_URGENT_HIGH) &&
268 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
269 p->max_search > sbi->max_victim_search)
270 p->max_search = sbi->max_victim_search;
271
272 /* let's select beginning hot/small space first in no_heap mode*/
273 if (f2fs_need_rand_seg(sbi))
274 p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
275 else if (test_opt(sbi, NOHEAP) &&
276 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
277 p->offset = 0;
278 else
279 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
280 }
281
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)282 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
283 struct victim_sel_policy *p)
284 {
285 /* SSR allocates in a segment unit */
286 if (p->alloc_mode == SSR)
287 return sbi->blocks_per_seg;
288 else if (p->alloc_mode == AT_SSR)
289 return UINT_MAX;
290
291 /* LFS */
292 if (p->gc_mode == GC_GREEDY)
293 return 2 * sbi->blocks_per_seg * p->ofs_unit;
294 else if (p->gc_mode == GC_CB)
295 return UINT_MAX;
296 else if (p->gc_mode == GC_AT)
297 return UINT_MAX;
298 else /* No other gc_mode */
299 return 0;
300 }
301
check_bg_victims(struct f2fs_sb_info * sbi)302 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
303 {
304 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
305 unsigned int secno;
306
307 /*
308 * If the gc_type is FG_GC, we can select victim segments
309 * selected by background GC before.
310 * Those segments guarantee they have small valid blocks.
311 */
312 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
313 if (sec_usage_check(sbi, secno))
314 continue;
315 clear_bit(secno, dirty_i->victim_secmap);
316 return GET_SEG_FROM_SEC(sbi, secno);
317 }
318 return NULL_SEGNO;
319 }
320
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)321 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
322 {
323 struct sit_info *sit_i = SIT_I(sbi);
324 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
325 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
326 unsigned long long mtime = 0;
327 unsigned int vblocks;
328 unsigned char age = 0;
329 unsigned char u;
330 unsigned int i;
331 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
332
333 for (i = 0; i < usable_segs_per_sec; i++)
334 mtime += get_seg_entry(sbi, start + i)->mtime;
335 vblocks = get_valid_blocks(sbi, segno, true);
336
337 mtime = div_u64(mtime, usable_segs_per_sec);
338 vblocks = div_u64(vblocks, usable_segs_per_sec);
339
340 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
341
342 /* Handle if the system time has changed by the user */
343 if (mtime < sit_i->min_mtime)
344 sit_i->min_mtime = mtime;
345 if (mtime > sit_i->max_mtime)
346 sit_i->max_mtime = mtime;
347 if (sit_i->max_mtime != sit_i->min_mtime)
348 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
349 sit_i->max_mtime - sit_i->min_mtime);
350
351 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
352 }
353
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p)354 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
355 unsigned int segno, struct victim_sel_policy *p)
356 {
357 if (p->alloc_mode == SSR)
358 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
359
360 /* alloc_mode == LFS */
361 if (p->gc_mode == GC_GREEDY)
362 return get_valid_blocks(sbi, segno, true);
363 else if (p->gc_mode == GC_CB)
364 return get_cb_cost(sbi, segno);
365
366 f2fs_bug_on(sbi, 1);
367 return 0;
368 }
369
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)370 static unsigned int count_bits(const unsigned long *addr,
371 unsigned int offset, unsigned int len)
372 {
373 unsigned int end = offset + len, sum = 0;
374
375 while (offset < end) {
376 if (test_bit(offset++, addr))
377 ++sum;
378 }
379 return sum;
380 }
381
attach_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno,struct rb_node * parent,struct rb_node ** p,bool left_most)382 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
383 unsigned long long mtime, unsigned int segno,
384 struct rb_node *parent, struct rb_node **p,
385 bool left_most)
386 {
387 struct atgc_management *am = &sbi->am;
388 struct victim_entry *ve;
389
390 ve = f2fs_kmem_cache_alloc(victim_entry_slab,
391 GFP_NOFS, true, NULL);
392
393 ve->mtime = mtime;
394 ve->segno = segno;
395
396 rb_link_node(&ve->rb_node, parent, p);
397 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
398
399 list_add_tail(&ve->list, &am->victim_list);
400
401 am->victim_count++;
402
403 return ve;
404 }
405
insert_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)406 static void insert_victim_entry(struct f2fs_sb_info *sbi,
407 unsigned long long mtime, unsigned int segno)
408 {
409 struct atgc_management *am = &sbi->am;
410 struct rb_node **p;
411 struct rb_node *parent = NULL;
412 bool left_most = true;
413
414 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
415 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
416 }
417
add_victim_entry(struct f2fs_sb_info * sbi,struct victim_sel_policy * p,unsigned int segno)418 static void add_victim_entry(struct f2fs_sb_info *sbi,
419 struct victim_sel_policy *p, unsigned int segno)
420 {
421 struct sit_info *sit_i = SIT_I(sbi);
422 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
423 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
424 unsigned long long mtime = 0;
425 unsigned int i;
426
427 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
428 if (p->gc_mode == GC_AT &&
429 get_valid_blocks(sbi, segno, true) == 0)
430 return;
431 }
432
433 for (i = 0; i < sbi->segs_per_sec; i++)
434 mtime += get_seg_entry(sbi, start + i)->mtime;
435 mtime = div_u64(mtime, sbi->segs_per_sec);
436
437 /* Handle if the system time has changed by the user */
438 if (mtime < sit_i->min_mtime)
439 sit_i->min_mtime = mtime;
440 if (mtime > sit_i->max_mtime)
441 sit_i->max_mtime = mtime;
442 if (mtime < sit_i->dirty_min_mtime)
443 sit_i->dirty_min_mtime = mtime;
444 if (mtime > sit_i->dirty_max_mtime)
445 sit_i->dirty_max_mtime = mtime;
446
447 /* don't choose young section as candidate */
448 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
449 return;
450
451 insert_victim_entry(sbi, mtime, segno);
452 }
453
lookup_central_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)454 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
455 struct victim_sel_policy *p)
456 {
457 struct atgc_management *am = &sbi->am;
458 struct rb_node *parent = NULL;
459 bool left_most;
460
461 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
462
463 return parent;
464 }
465
atgc_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)466 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
467 struct victim_sel_policy *p)
468 {
469 struct sit_info *sit_i = SIT_I(sbi);
470 struct atgc_management *am = &sbi->am;
471 struct rb_root_cached *root = &am->root;
472 struct rb_node *node;
473 struct rb_entry *re;
474 struct victim_entry *ve;
475 unsigned long long total_time;
476 unsigned long long age, u, accu;
477 unsigned long long max_mtime = sit_i->dirty_max_mtime;
478 unsigned long long min_mtime = sit_i->dirty_min_mtime;
479 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
480 unsigned int vblocks;
481 unsigned int dirty_threshold = max(am->max_candidate_count,
482 am->candidate_ratio *
483 am->victim_count / 100);
484 unsigned int age_weight = am->age_weight;
485 unsigned int cost;
486 unsigned int iter = 0;
487
488 if (max_mtime < min_mtime)
489 return;
490
491 max_mtime += 1;
492 total_time = max_mtime - min_mtime;
493
494 accu = div64_u64(ULLONG_MAX, total_time);
495 accu = min_t(unsigned long long, div_u64(accu, 100),
496 DEFAULT_ACCURACY_CLASS);
497
498 node = rb_first_cached(root);
499 next:
500 re = rb_entry_safe(node, struct rb_entry, rb_node);
501 if (!re)
502 return;
503
504 ve = (struct victim_entry *)re;
505
506 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
507 goto skip;
508
509 /* age = 10000 * x% * 60 */
510 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
511 age_weight;
512
513 vblocks = get_valid_blocks(sbi, ve->segno, true);
514 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
515
516 /* u = 10000 * x% * 40 */
517 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
518 (100 - age_weight);
519
520 f2fs_bug_on(sbi, age + u >= UINT_MAX);
521
522 cost = UINT_MAX - (age + u);
523 iter++;
524
525 if (cost < p->min_cost ||
526 (cost == p->min_cost && age > p->oldest_age)) {
527 p->min_cost = cost;
528 p->oldest_age = age;
529 p->min_segno = ve->segno;
530 }
531 skip:
532 if (iter < dirty_threshold) {
533 node = rb_next(node);
534 goto next;
535 }
536 }
537
538 /*
539 * select candidates around source section in range of
540 * [target - dirty_threshold, target + dirty_threshold]
541 */
atssr_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)542 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
543 struct victim_sel_policy *p)
544 {
545 struct sit_info *sit_i = SIT_I(sbi);
546 struct atgc_management *am = &sbi->am;
547 struct rb_node *node;
548 struct rb_entry *re;
549 struct victim_entry *ve;
550 unsigned long long age;
551 unsigned long long max_mtime = sit_i->dirty_max_mtime;
552 unsigned long long min_mtime = sit_i->dirty_min_mtime;
553 unsigned int seg_blocks = sbi->blocks_per_seg;
554 unsigned int vblocks;
555 unsigned int dirty_threshold = max(am->max_candidate_count,
556 am->candidate_ratio *
557 am->victim_count / 100);
558 unsigned int cost;
559 unsigned int iter = 0;
560 int stage = 0;
561
562 if (max_mtime < min_mtime)
563 return;
564 max_mtime += 1;
565 next_stage:
566 node = lookup_central_victim(sbi, p);
567 next_node:
568 re = rb_entry_safe(node, struct rb_entry, rb_node);
569 if (!re) {
570 if (stage == 0)
571 goto skip_stage;
572 return;
573 }
574
575 ve = (struct victim_entry *)re;
576
577 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
578 goto skip_node;
579
580 age = max_mtime - ve->mtime;
581
582 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
583 f2fs_bug_on(sbi, !vblocks);
584
585 /* rare case */
586 if (vblocks == seg_blocks)
587 goto skip_node;
588
589 iter++;
590
591 age = max_mtime - abs(p->age - age);
592 cost = UINT_MAX - vblocks;
593
594 if (cost < p->min_cost ||
595 (cost == p->min_cost && age > p->oldest_age)) {
596 p->min_cost = cost;
597 p->oldest_age = age;
598 p->min_segno = ve->segno;
599 }
600 skip_node:
601 if (iter < dirty_threshold) {
602 if (stage == 0)
603 node = rb_prev(node);
604 else if (stage == 1)
605 node = rb_next(node);
606 goto next_node;
607 }
608 skip_stage:
609 if (stage < 1) {
610 stage++;
611 iter = 0;
612 goto next_stage;
613 }
614 }
lookup_victim_by_age(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)615 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
616 struct victim_sel_policy *p)
617 {
618 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
619 &sbi->am.root, true));
620
621 if (p->gc_mode == GC_AT)
622 atgc_lookup_victim(sbi, p);
623 else if (p->alloc_mode == AT_SSR)
624 atssr_lookup_victim(sbi, p);
625 else
626 f2fs_bug_on(sbi, 1);
627 }
628
release_victim_entry(struct f2fs_sb_info * sbi)629 static void release_victim_entry(struct f2fs_sb_info *sbi)
630 {
631 struct atgc_management *am = &sbi->am;
632 struct victim_entry *ve, *tmp;
633
634 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
635 list_del(&ve->list);
636 kmem_cache_free(victim_entry_slab, ve);
637 am->victim_count--;
638 }
639
640 am->root = RB_ROOT_CACHED;
641
642 f2fs_bug_on(sbi, am->victim_count);
643 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
644 }
645
f2fs_pin_section(struct f2fs_sb_info * sbi,unsigned int segno)646 static bool f2fs_pin_section(struct f2fs_sb_info *sbi, unsigned int segno)
647 {
648 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
649 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
650
651 if (!dirty_i->enable_pin_section)
652 return false;
653 if (!test_and_set_bit(secno, dirty_i->pinned_secmap))
654 dirty_i->pinned_secmap_cnt++;
655 return true;
656 }
657
f2fs_pinned_section_exists(struct dirty_seglist_info * dirty_i)658 static bool f2fs_pinned_section_exists(struct dirty_seglist_info *dirty_i)
659 {
660 return dirty_i->pinned_secmap_cnt;
661 }
662
f2fs_section_is_pinned(struct dirty_seglist_info * dirty_i,unsigned int secno)663 static bool f2fs_section_is_pinned(struct dirty_seglist_info *dirty_i,
664 unsigned int secno)
665 {
666 return dirty_i->enable_pin_section &&
667 f2fs_pinned_section_exists(dirty_i) &&
668 test_bit(secno, dirty_i->pinned_secmap);
669 }
670
f2fs_unpin_all_sections(struct f2fs_sb_info * sbi,bool enable)671 static void f2fs_unpin_all_sections(struct f2fs_sb_info *sbi, bool enable)
672 {
673 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
674
675 if (f2fs_pinned_section_exists(DIRTY_I(sbi))) {
676 memset(DIRTY_I(sbi)->pinned_secmap, 0, bitmap_size);
677 DIRTY_I(sbi)->pinned_secmap_cnt = 0;
678 }
679 DIRTY_I(sbi)->enable_pin_section = enable;
680 }
681
f2fs_gc_pinned_control(struct inode * inode,int gc_type,unsigned int segno)682 static int f2fs_gc_pinned_control(struct inode *inode, int gc_type,
683 unsigned int segno)
684 {
685 if (!f2fs_is_pinned_file(inode))
686 return 0;
687 if (gc_type != FG_GC)
688 return -EBUSY;
689 if (!f2fs_pin_section(F2FS_I_SB(inode), segno))
690 f2fs_pin_file_control(inode, true);
691 return -EAGAIN;
692 }
693
694 /*
695 * This function is called from two paths.
696 * One is garbage collection and the other is SSR segment selection.
697 * When it is called during GC, it just gets a victim segment
698 * and it does not remove it from dirty seglist.
699 * When it is called from SSR segment selection, it finds a segment
700 * which has minimum valid blocks and removes it from dirty seglist.
701 */
get_victim_by_default(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode,unsigned long long age)702 static int get_victim_by_default(struct f2fs_sb_info *sbi,
703 unsigned int *result, int gc_type, int type,
704 char alloc_mode, unsigned long long age)
705 {
706 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
707 struct sit_info *sm = SIT_I(sbi);
708 struct victim_sel_policy p;
709 unsigned int secno, last_victim;
710 unsigned int last_segment;
711 unsigned int nsearched;
712 bool is_atgc;
713 int ret = 0;
714
715 mutex_lock(&dirty_i->seglist_lock);
716 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
717
718 p.alloc_mode = alloc_mode;
719 p.age = age;
720 p.age_threshold = sbi->am.age_threshold;
721
722 retry:
723 select_policy(sbi, gc_type, type, &p);
724 p.min_segno = NULL_SEGNO;
725 p.oldest_age = 0;
726 p.min_cost = get_max_cost(sbi, &p);
727
728 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
729 nsearched = 0;
730
731 if (is_atgc)
732 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
733
734 if (*result != NULL_SEGNO) {
735 if (!get_valid_blocks(sbi, *result, false)) {
736 ret = -ENODATA;
737 goto out;
738 }
739
740 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
741 ret = -EBUSY;
742 else
743 p.min_segno = *result;
744 goto out;
745 }
746
747 ret = -ENODATA;
748 if (p.max_search == 0)
749 goto out;
750
751 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
752 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
753 p.min_segno = sbi->next_victim_seg[BG_GC];
754 *result = p.min_segno;
755 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
756 goto got_result;
757 }
758 if (gc_type == FG_GC &&
759 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
760 p.min_segno = sbi->next_victim_seg[FG_GC];
761 *result = p.min_segno;
762 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
763 goto got_result;
764 }
765 }
766
767 last_victim = sm->last_victim[p.gc_mode];
768 if (p.alloc_mode == LFS && gc_type == FG_GC) {
769 p.min_segno = check_bg_victims(sbi);
770 if (p.min_segno != NULL_SEGNO)
771 goto got_it;
772 }
773
774 while (1) {
775 unsigned long cost, *dirty_bitmap;
776 unsigned int unit_no, segno;
777
778 dirty_bitmap = p.dirty_bitmap;
779 unit_no = find_next_bit(dirty_bitmap,
780 last_segment / p.ofs_unit,
781 p.offset / p.ofs_unit);
782 segno = unit_no * p.ofs_unit;
783 if (segno >= last_segment) {
784 if (sm->last_victim[p.gc_mode]) {
785 last_segment =
786 sm->last_victim[p.gc_mode];
787 sm->last_victim[p.gc_mode] = 0;
788 p.offset = 0;
789 continue;
790 }
791 break;
792 }
793
794 p.offset = segno + p.ofs_unit;
795 nsearched++;
796
797 #ifdef CONFIG_F2FS_CHECK_FS
798 /*
799 * skip selecting the invalid segno (that is failed due to block
800 * validity check failure during GC) to avoid endless GC loop in
801 * such cases.
802 */
803 if (test_bit(segno, sm->invalid_segmap))
804 goto next;
805 #endif
806
807 secno = GET_SEC_FROM_SEG(sbi, segno);
808
809 if (sec_usage_check(sbi, secno))
810 goto next;
811
812 /* Don't touch checkpointed data */
813 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
814 if (p.alloc_mode == LFS) {
815 /*
816 * LFS is set to find source section during GC.
817 * The victim should have no checkpointed data.
818 */
819 if (get_ckpt_valid_blocks(sbi, segno, true))
820 goto next;
821 } else {
822 /*
823 * SSR | AT_SSR are set to find target segment
824 * for writes which can be full by checkpointed
825 * and newly written blocks.
826 */
827 if (!f2fs_segment_has_free_slot(sbi, segno))
828 goto next;
829 }
830 }
831
832 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
833 goto next;
834
835 if (gc_type == FG_GC && f2fs_section_is_pinned(dirty_i, secno))
836 goto next;
837
838 if (is_atgc) {
839 add_victim_entry(sbi, &p, segno);
840 goto next;
841 }
842
843 cost = get_gc_cost(sbi, segno, &p);
844
845 if (p.min_cost > cost) {
846 p.min_segno = segno;
847 p.min_cost = cost;
848 }
849 next:
850 if (nsearched >= p.max_search) {
851 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
852 sm->last_victim[p.gc_mode] =
853 last_victim + p.ofs_unit;
854 else
855 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
856 sm->last_victim[p.gc_mode] %=
857 (MAIN_SECS(sbi) * sbi->segs_per_sec);
858 break;
859 }
860 }
861
862 /* get victim for GC_AT/AT_SSR */
863 if (is_atgc) {
864 lookup_victim_by_age(sbi, &p);
865 release_victim_entry(sbi);
866 }
867
868 if (is_atgc && p.min_segno == NULL_SEGNO &&
869 sm->elapsed_time < p.age_threshold) {
870 p.age_threshold = 0;
871 goto retry;
872 }
873
874 if (p.min_segno != NULL_SEGNO) {
875 got_it:
876 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
877 got_result:
878 if (p.alloc_mode == LFS) {
879 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
880 if (gc_type == FG_GC)
881 sbi->cur_victim_sec = secno;
882 else
883 set_bit(secno, dirty_i->victim_secmap);
884 }
885 ret = 0;
886
887 }
888 out:
889 if (p.min_segno != NULL_SEGNO)
890 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
891 sbi->cur_victim_sec,
892 prefree_segments(sbi), free_segments(sbi));
893 mutex_unlock(&dirty_i->seglist_lock);
894
895 return ret;
896 }
897
898 static const struct victim_selection default_v_ops = {
899 .get_victim = get_victim_by_default,
900 };
901
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)902 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
903 {
904 struct inode_entry *ie;
905
906 ie = radix_tree_lookup(&gc_list->iroot, ino);
907 if (ie)
908 return ie->inode;
909 return NULL;
910 }
911
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)912 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
913 {
914 struct inode_entry *new_ie;
915
916 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
917 iput(inode);
918 return;
919 }
920 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab,
921 GFP_NOFS, true, NULL);
922 new_ie->inode = inode;
923
924 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
925 list_add_tail(&new_ie->list, &gc_list->ilist);
926 }
927
put_gc_inode(struct gc_inode_list * gc_list)928 static void put_gc_inode(struct gc_inode_list *gc_list)
929 {
930 struct inode_entry *ie, *next_ie;
931
932 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
933 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
934 iput(ie->inode);
935 list_del(&ie->list);
936 kmem_cache_free(f2fs_inode_entry_slab, ie);
937 }
938 }
939
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)940 static int check_valid_map(struct f2fs_sb_info *sbi,
941 unsigned int segno, int offset)
942 {
943 struct sit_info *sit_i = SIT_I(sbi);
944 struct seg_entry *sentry;
945 int ret;
946
947 down_read(&sit_i->sentry_lock);
948 sentry = get_seg_entry(sbi, segno);
949 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
950 up_read(&sit_i->sentry_lock);
951 return ret;
952 }
953
954 /*
955 * This function compares node address got in summary with that in NAT.
956 * On validity, copy that node with cold status, otherwise (invalid node)
957 * ignore that.
958 */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type)959 static int gc_node_segment(struct f2fs_sb_info *sbi,
960 struct f2fs_summary *sum, unsigned int segno, int gc_type)
961 {
962 struct f2fs_summary *entry;
963 block_t start_addr;
964 int off;
965 int phase = 0;
966 bool fggc = (gc_type == FG_GC);
967 int submitted = 0;
968 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
969
970 start_addr = START_BLOCK(sbi, segno);
971
972 next_step:
973 entry = sum;
974
975 if (fggc && phase == 2)
976 atomic_inc(&sbi->wb_sync_req[NODE]);
977
978 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
979 nid_t nid = le32_to_cpu(entry->nid);
980 struct page *node_page;
981 struct node_info ni;
982 int err;
983
984 /* stop BG_GC if there is not enough free sections. */
985 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
986 return submitted;
987
988 if (check_valid_map(sbi, segno, off) == 0)
989 continue;
990
991 if (phase == 0) {
992 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
993 META_NAT, true);
994 continue;
995 }
996
997 if (phase == 1) {
998 f2fs_ra_node_page(sbi, nid);
999 continue;
1000 }
1001
1002 /* phase == 2 */
1003 node_page = f2fs_get_node_page(sbi, nid);
1004 if (IS_ERR(node_page))
1005 continue;
1006
1007 /* block may become invalid during f2fs_get_node_page */
1008 if (check_valid_map(sbi, segno, off) == 0) {
1009 f2fs_put_page(node_page, 1);
1010 continue;
1011 }
1012
1013 if (f2fs_get_node_info(sbi, nid, &ni, false)) {
1014 f2fs_put_page(node_page, 1);
1015 continue;
1016 }
1017
1018 if (ni.blk_addr != start_addr + off) {
1019 f2fs_put_page(node_page, 1);
1020 continue;
1021 }
1022
1023 err = f2fs_move_node_page(node_page, gc_type);
1024 if (!err && gc_type == FG_GC)
1025 submitted++;
1026 stat_inc_node_blk_count(sbi, 1, gc_type);
1027 }
1028
1029 if (++phase < 3)
1030 goto next_step;
1031
1032 if (fggc)
1033 atomic_dec(&sbi->wb_sync_req[NODE]);
1034 return submitted;
1035 }
1036
1037 /*
1038 * Calculate start block index indicating the given node offset.
1039 * Be careful, caller should give this node offset only indicating direct node
1040 * blocks. If any node offsets, which point the other types of node blocks such
1041 * as indirect or double indirect node blocks, are given, it must be a caller's
1042 * bug.
1043 */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)1044 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
1045 {
1046 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
1047 unsigned int bidx;
1048
1049 if (node_ofs == 0)
1050 return 0;
1051
1052 if (node_ofs <= 2) {
1053 bidx = node_ofs - 1;
1054 } else if (node_ofs <= indirect_blks) {
1055 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
1056
1057 bidx = node_ofs - 2 - dec;
1058 } else {
1059 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
1060
1061 bidx = node_ofs - 5 - dec;
1062 }
1063 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
1064 }
1065
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)1066 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1067 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
1068 {
1069 struct page *node_page;
1070 nid_t nid;
1071 unsigned int ofs_in_node, max_addrs, base;
1072 block_t source_blkaddr;
1073
1074 nid = le32_to_cpu(sum->nid);
1075 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1076
1077 node_page = f2fs_get_node_page(sbi, nid);
1078 if (IS_ERR(node_page))
1079 return false;
1080
1081 if (f2fs_get_node_info(sbi, nid, dni, false)) {
1082 f2fs_put_page(node_page, 1);
1083 return false;
1084 }
1085
1086 if (sum->version != dni->version) {
1087 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1088 __func__);
1089 set_sbi_flag(sbi, SBI_NEED_FSCK);
1090 }
1091
1092 if (f2fs_check_nid_range(sbi, dni->ino)) {
1093 f2fs_put_page(node_page, 1);
1094 return false;
1095 }
1096
1097 if (IS_INODE(node_page)) {
1098 base = offset_in_addr(F2FS_INODE(node_page));
1099 max_addrs = DEF_ADDRS_PER_INODE;
1100 } else {
1101 base = 0;
1102 max_addrs = DEF_ADDRS_PER_BLOCK;
1103 }
1104
1105 if (base + ofs_in_node >= max_addrs) {
1106 f2fs_err(sbi, "Inconsistent blkaddr offset: base:%u, ofs_in_node:%u, max:%u, ino:%u, nid:%u",
1107 base, ofs_in_node, max_addrs, dni->ino, dni->nid);
1108 f2fs_put_page(node_page, 1);
1109 return false;
1110 }
1111
1112 *nofs = ofs_of_node(node_page);
1113 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1114 f2fs_put_page(node_page, 1);
1115
1116 if (source_blkaddr != blkaddr) {
1117 #ifdef CONFIG_F2FS_CHECK_FS
1118 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1119 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1120
1121 if (unlikely(check_valid_map(sbi, segno, offset))) {
1122 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1123 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
1124 blkaddr, source_blkaddr, segno);
1125 set_sbi_flag(sbi, SBI_NEED_FSCK);
1126 }
1127 }
1128 #endif
1129 return false;
1130 }
1131 return true;
1132 }
1133
ra_data_block(struct inode * inode,pgoff_t index)1134 static int ra_data_block(struct inode *inode, pgoff_t index)
1135 {
1136 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1137 struct address_space *mapping = inode->i_mapping;
1138 struct dnode_of_data dn;
1139 struct page *page;
1140 struct extent_info ei = {0, 0, 0};
1141 struct f2fs_io_info fio = {
1142 .sbi = sbi,
1143 .ino = inode->i_ino,
1144 .type = DATA,
1145 .temp = COLD,
1146 .op = REQ_OP_READ,
1147 .op_flags = 0,
1148 .encrypted_page = NULL,
1149 .in_list = false,
1150 .retry = false,
1151 };
1152 int err;
1153
1154 page = f2fs_grab_cache_page(mapping, index, true);
1155 if (!page)
1156 return -ENOMEM;
1157
1158 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1159 dn.data_blkaddr = ei.blk + index - ei.fofs;
1160 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1161 DATA_GENERIC_ENHANCE_READ))) {
1162 err = -EFSCORRUPTED;
1163 goto put_page;
1164 }
1165 goto got_it;
1166 }
1167
1168 set_new_dnode(&dn, inode, NULL, NULL, 0);
1169 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1170 if (err)
1171 goto put_page;
1172 f2fs_put_dnode(&dn);
1173
1174 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1175 err = -ENOENT;
1176 goto put_page;
1177 }
1178 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1179 DATA_GENERIC_ENHANCE))) {
1180 err = -EFSCORRUPTED;
1181 goto put_page;
1182 }
1183 got_it:
1184 /* read page */
1185 fio.page = page;
1186 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1187
1188 /*
1189 * don't cache encrypted data into meta inode until previous dirty
1190 * data were writebacked to avoid racing between GC and flush.
1191 */
1192 f2fs_wait_on_page_writeback(page, DATA, true, true);
1193
1194 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1195
1196 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1197 dn.data_blkaddr,
1198 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1199 if (!fio.encrypted_page) {
1200 err = -ENOMEM;
1201 goto put_page;
1202 }
1203
1204 err = f2fs_submit_page_bio(&fio);
1205 if (err)
1206 goto put_encrypted_page;
1207 f2fs_put_page(fio.encrypted_page, 0);
1208 f2fs_put_page(page, 1);
1209
1210 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1211 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1212
1213 return 0;
1214 put_encrypted_page:
1215 f2fs_put_page(fio.encrypted_page, 1);
1216 put_page:
1217 f2fs_put_page(page, 1);
1218 return err;
1219 }
1220
1221 /*
1222 * Move data block via META_MAPPING while keeping locked data page.
1223 * This can be used to move blocks, aka LBAs, directly on disk.
1224 */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1225 static int move_data_block(struct inode *inode, block_t bidx,
1226 int gc_type, unsigned int segno, int off)
1227 {
1228 struct f2fs_io_info fio = {
1229 .sbi = F2FS_I_SB(inode),
1230 .ino = inode->i_ino,
1231 .type = DATA,
1232 .temp = COLD,
1233 .op = REQ_OP_READ,
1234 .op_flags = 0,
1235 .encrypted_page = NULL,
1236 .in_list = false,
1237 .retry = false,
1238 };
1239 struct dnode_of_data dn;
1240 struct f2fs_summary sum;
1241 struct node_info ni;
1242 struct page *page, *mpage;
1243 block_t newaddr;
1244 int err = 0;
1245 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1246 int type = fio.sbi->am.atgc_enabled && (gc_type == BG_GC) &&
1247 (fio.sbi->gc_mode != GC_URGENT_HIGH) ?
1248 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1249
1250 /* do not read out */
1251 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1252 if (!page)
1253 return -ENOMEM;
1254
1255 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1256 err = -ENOENT;
1257 goto out;
1258 }
1259
1260 if (f2fs_is_atomic_file(inode)) {
1261 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1262 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1263 err = -EAGAIN;
1264 goto out;
1265 }
1266
1267 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1268 if (err)
1269 goto out;
1270
1271 set_new_dnode(&dn, inode, NULL, NULL, 0);
1272 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1273 if (err)
1274 goto out;
1275
1276 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1277 ClearPageUptodate(page);
1278 err = -ENOENT;
1279 goto put_out;
1280 }
1281
1282 /*
1283 * don't cache encrypted data into meta inode until previous dirty
1284 * data were writebacked to avoid racing between GC and flush.
1285 */
1286 f2fs_wait_on_page_writeback(page, DATA, true, true);
1287
1288 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1289
1290 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
1291 if (err)
1292 goto put_out;
1293
1294 /* read page */
1295 fio.page = page;
1296 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1297
1298 if (lfs_mode)
1299 f2fs_down_write(&fio.sbi->io_order_lock);
1300
1301 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1302 fio.old_blkaddr, false);
1303 if (!mpage) {
1304 err = -ENOMEM;
1305 goto up_out;
1306 }
1307
1308 fio.encrypted_page = mpage;
1309
1310 /* read source block in mpage */
1311 if (!PageUptodate(mpage)) {
1312 err = f2fs_submit_page_bio(&fio);
1313 if (err) {
1314 f2fs_put_page(mpage, 1);
1315 goto up_out;
1316 }
1317
1318 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1319 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1320
1321 lock_page(mpage);
1322 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1323 !PageUptodate(mpage))) {
1324 err = -EIO;
1325 f2fs_put_page(mpage, 1);
1326 goto up_out;
1327 }
1328 }
1329
1330 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1331
1332 /* allocate block address */
1333 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1334 &sum, type, NULL);
1335
1336 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1337 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1338 if (!fio.encrypted_page) {
1339 err = -ENOMEM;
1340 f2fs_put_page(mpage, 1);
1341 goto recover_block;
1342 }
1343
1344 /* write target block */
1345 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1346 memcpy(page_address(fio.encrypted_page),
1347 page_address(mpage), PAGE_SIZE);
1348 f2fs_put_page(mpage, 1);
1349 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1350 fio.old_blkaddr, fio.old_blkaddr);
1351 f2fs_invalidate_compress_page(fio.sbi, fio.old_blkaddr);
1352
1353 set_page_dirty(fio.encrypted_page);
1354 if (clear_page_dirty_for_io(fio.encrypted_page))
1355 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1356
1357 set_page_writeback(fio.encrypted_page);
1358 ClearPageError(page);
1359
1360 fio.op = REQ_OP_WRITE;
1361 fio.op_flags = REQ_SYNC;
1362 fio.new_blkaddr = newaddr;
1363 f2fs_submit_page_write(&fio);
1364 if (fio.retry) {
1365 err = -EAGAIN;
1366 if (PageWriteback(fio.encrypted_page))
1367 end_page_writeback(fio.encrypted_page);
1368 goto put_page_out;
1369 }
1370
1371 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1372
1373 f2fs_update_data_blkaddr(&dn, newaddr);
1374 set_inode_flag(inode, FI_APPEND_WRITE);
1375 if (page->index == 0)
1376 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1377 put_page_out:
1378 f2fs_put_page(fio.encrypted_page, 1);
1379 recover_block:
1380 if (err)
1381 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1382 true, true, true);
1383 up_out:
1384 if (lfs_mode)
1385 f2fs_up_write(&fio.sbi->io_order_lock);
1386 put_out:
1387 f2fs_put_dnode(&dn);
1388 out:
1389 f2fs_put_page(page, 1);
1390 return err;
1391 }
1392
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1393 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1394 unsigned int segno, int off)
1395 {
1396 struct page *page;
1397 int err = 0;
1398
1399 page = f2fs_get_lock_data_page(inode, bidx, true);
1400 if (IS_ERR(page))
1401 return PTR_ERR(page);
1402
1403 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1404 err = -ENOENT;
1405 goto out;
1406 }
1407
1408 if (f2fs_is_atomic_file(inode)) {
1409 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1410 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1411 err = -EAGAIN;
1412 goto out;
1413 }
1414 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1415 if (err)
1416 goto out;
1417
1418 if (gc_type == BG_GC) {
1419 if (PageWriteback(page)) {
1420 err = -EAGAIN;
1421 goto out;
1422 }
1423 set_page_dirty(page);
1424 set_page_private_gcing(page);
1425 } else {
1426 struct f2fs_io_info fio = {
1427 .sbi = F2FS_I_SB(inode),
1428 .ino = inode->i_ino,
1429 .type = DATA,
1430 .temp = COLD,
1431 .op = REQ_OP_WRITE,
1432 .op_flags = REQ_SYNC,
1433 .old_blkaddr = NULL_ADDR,
1434 .page = page,
1435 .encrypted_page = NULL,
1436 .need_lock = LOCK_REQ,
1437 .io_type = FS_GC_DATA_IO,
1438 };
1439 bool is_dirty = PageDirty(page);
1440
1441 retry:
1442 f2fs_wait_on_page_writeback(page, DATA, true, true);
1443
1444 set_page_dirty(page);
1445 if (clear_page_dirty_for_io(page)) {
1446 inode_dec_dirty_pages(inode);
1447 f2fs_remove_dirty_inode(inode);
1448 }
1449
1450 set_page_private_gcing(page);
1451
1452 err = f2fs_do_write_data_page(&fio);
1453 if (err) {
1454 clear_page_private_gcing(page);
1455 if (err == -ENOMEM) {
1456 congestion_wait(BLK_RW_ASYNC,
1457 DEFAULT_IO_TIMEOUT);
1458 goto retry;
1459 }
1460 if (is_dirty)
1461 set_page_dirty(page);
1462 }
1463 }
1464 out:
1465 f2fs_put_page(page, 1);
1466 return err;
1467 }
1468
1469 /*
1470 * This function tries to get parent node of victim data block, and identifies
1471 * data block validity. If the block is valid, copy that with cold status and
1472 * modify parent node.
1473 * If the parent node is not valid or the data block address is different,
1474 * the victim data block is ignored.
1475 */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type,bool force_migrate)1476 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1477 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1478 bool force_migrate)
1479 {
1480 struct super_block *sb = sbi->sb;
1481 struct f2fs_summary *entry;
1482 block_t start_addr;
1483 int off;
1484 int phase = 0;
1485 int submitted = 0;
1486 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1487
1488 start_addr = START_BLOCK(sbi, segno);
1489
1490 next_step:
1491 entry = sum;
1492
1493 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1494 struct page *data_page;
1495 struct inode *inode;
1496 struct node_info dni; /* dnode info for the data */
1497 unsigned int ofs_in_node, nofs;
1498 block_t start_bidx;
1499 nid_t nid = le32_to_cpu(entry->nid);
1500
1501 /*
1502 * stop BG_GC if there is not enough free sections.
1503 * Or, stop GC if the segment becomes fully valid caused by
1504 * race condition along with SSR block allocation.
1505 */
1506 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1507 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1508 BLKS_PER_SEC(sbi)))
1509 return submitted;
1510
1511 if (check_valid_map(sbi, segno, off) == 0)
1512 continue;
1513
1514 if (phase == 0) {
1515 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1516 META_NAT, true);
1517 continue;
1518 }
1519
1520 if (phase == 1) {
1521 f2fs_ra_node_page(sbi, nid);
1522 continue;
1523 }
1524
1525 /* Get an inode by ino with checking validity */
1526 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1527 continue;
1528
1529 if (phase == 2) {
1530 f2fs_ra_node_page(sbi, dni.ino);
1531 continue;
1532 }
1533
1534 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1535
1536 if (phase == 3) {
1537 int err;
1538
1539 inode = f2fs_iget(sb, dni.ino);
1540 if (IS_ERR(inode) || is_bad_inode(inode) ||
1541 special_file(inode->i_mode))
1542 continue;
1543
1544 err = f2fs_gc_pinned_control(inode, gc_type, segno);
1545 if (err == -EAGAIN) {
1546 iput(inode);
1547 return submitted;
1548 }
1549
1550 if (!f2fs_down_write_trylock(
1551 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1552 iput(inode);
1553 sbi->skipped_gc_rwsem++;
1554 continue;
1555 }
1556
1557 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1558 ofs_in_node;
1559
1560 if (f2fs_post_read_required(inode)) {
1561 int err = ra_data_block(inode, start_bidx);
1562
1563 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1564 if (err) {
1565 iput(inode);
1566 continue;
1567 }
1568 add_gc_inode(gc_list, inode);
1569 continue;
1570 }
1571
1572 data_page = f2fs_get_read_data_page(inode,
1573 start_bidx, REQ_RAHEAD, true);
1574 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1575 if (IS_ERR(data_page)) {
1576 iput(inode);
1577 continue;
1578 }
1579
1580 f2fs_put_page(data_page, 0);
1581 add_gc_inode(gc_list, inode);
1582 continue;
1583 }
1584
1585 /* phase 4 */
1586 inode = find_gc_inode(gc_list, dni.ino);
1587 if (inode) {
1588 struct f2fs_inode_info *fi = F2FS_I(inode);
1589 bool locked = false;
1590 int err;
1591
1592 if (S_ISREG(inode->i_mode)) {
1593 if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) {
1594 sbi->skipped_gc_rwsem++;
1595 continue;
1596 }
1597 if (!f2fs_down_write_trylock(
1598 &fi->i_gc_rwsem[WRITE])) {
1599 sbi->skipped_gc_rwsem++;
1600 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1601 continue;
1602 }
1603 locked = true;
1604
1605 /* wait for all inflight aio data */
1606 inode_dio_wait(inode);
1607 }
1608
1609 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1610 + ofs_in_node;
1611 if (f2fs_post_read_required(inode))
1612 err = move_data_block(inode, start_bidx,
1613 gc_type, segno, off);
1614 else
1615 err = move_data_page(inode, start_bidx, gc_type,
1616 segno, off);
1617
1618 if (!err && (gc_type == FG_GC ||
1619 f2fs_post_read_required(inode)))
1620 submitted++;
1621
1622 if (locked) {
1623 f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
1624 f2fs_up_write(&fi->i_gc_rwsem[READ]);
1625 }
1626
1627 stat_inc_data_blk_count(sbi, 1, gc_type);
1628 }
1629 }
1630
1631 if (++phase < 5)
1632 goto next_step;
1633
1634 return submitted;
1635 }
1636
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type)1637 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1638 int gc_type)
1639 {
1640 struct sit_info *sit_i = SIT_I(sbi);
1641 int ret;
1642
1643 down_write(&sit_i->sentry_lock);
1644 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1645 NO_CHECK_TYPE, LFS, 0);
1646 up_write(&sit_i->sentry_lock);
1647 return ret;
1648 }
1649
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type,bool force_migrate)1650 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1651 unsigned int start_segno,
1652 struct gc_inode_list *gc_list, int gc_type,
1653 bool force_migrate)
1654 {
1655 struct page *sum_page;
1656 struct f2fs_summary_block *sum;
1657 struct blk_plug plug;
1658 unsigned int segno = start_segno;
1659 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1660 int seg_freed = 0, migrated = 0;
1661 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1662 SUM_TYPE_DATA : SUM_TYPE_NODE;
1663 int submitted = 0;
1664
1665 if (__is_large_section(sbi))
1666 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1667
1668 /*
1669 * zone-capacity can be less than zone-size in zoned devices,
1670 * resulting in less than expected usable segments in the zone,
1671 * calculate the end segno in the zone which can be garbage collected
1672 */
1673 if (f2fs_sb_has_blkzoned(sbi))
1674 end_segno -= sbi->segs_per_sec -
1675 f2fs_usable_segs_in_sec(sbi, segno);
1676
1677 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1678
1679 /* readahead multi ssa blocks those have contiguous address */
1680 if (__is_large_section(sbi))
1681 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1682 end_segno - segno, META_SSA, true);
1683
1684 /* reference all summary page */
1685 while (segno < end_segno) {
1686 sum_page = f2fs_get_sum_page(sbi, segno++);
1687 if (IS_ERR(sum_page)) {
1688 int err = PTR_ERR(sum_page);
1689
1690 end_segno = segno - 1;
1691 for (segno = start_segno; segno < end_segno; segno++) {
1692 sum_page = find_get_page(META_MAPPING(sbi),
1693 GET_SUM_BLOCK(sbi, segno));
1694 f2fs_put_page(sum_page, 0);
1695 f2fs_put_page(sum_page, 0);
1696 }
1697 return err;
1698 }
1699 unlock_page(sum_page);
1700 }
1701
1702 blk_start_plug(&plug);
1703
1704 for (segno = start_segno; segno < end_segno; segno++) {
1705
1706 /* find segment summary of victim */
1707 sum_page = find_get_page(META_MAPPING(sbi),
1708 GET_SUM_BLOCK(sbi, segno));
1709 f2fs_put_page(sum_page, 0);
1710
1711 if (get_valid_blocks(sbi, segno, false) == 0)
1712 goto freed;
1713 if (gc_type == BG_GC && __is_large_section(sbi) &&
1714 migrated >= sbi->migration_granularity)
1715 goto skip;
1716 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1717 goto skip;
1718
1719 sum = page_address(sum_page);
1720 if (type != GET_SUM_TYPE((&sum->footer))) {
1721 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1722 segno, type, GET_SUM_TYPE((&sum->footer)));
1723 set_sbi_flag(sbi, SBI_NEED_FSCK);
1724 f2fs_stop_checkpoint(sbi, false,
1725 STOP_CP_REASON_CORRUPTED_SUMMARY);
1726 goto skip;
1727 }
1728
1729 /*
1730 * this is to avoid deadlock:
1731 * - lock_page(sum_page) - f2fs_replace_block
1732 * - check_valid_map() - down_write(sentry_lock)
1733 * - down_read(sentry_lock) - change_curseg()
1734 * - lock_page(sum_page)
1735 */
1736 if (type == SUM_TYPE_NODE)
1737 submitted += gc_node_segment(sbi, sum->entries, segno,
1738 gc_type);
1739 else
1740 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1741 segno, gc_type,
1742 force_migrate);
1743
1744 stat_inc_seg_count(sbi, type, gc_type);
1745 sbi->gc_reclaimed_segs[sbi->gc_mode]++;
1746 migrated++;
1747
1748 freed:
1749 if (gc_type == FG_GC &&
1750 get_valid_blocks(sbi, segno, false) == 0)
1751 seg_freed++;
1752
1753 if (__is_large_section(sbi))
1754 sbi->next_victim_seg[gc_type] =
1755 (segno + 1 < end_segno) ? segno + 1 : NULL_SEGNO;
1756 skip:
1757 f2fs_put_page(sum_page, 0);
1758 }
1759
1760 if (submitted)
1761 f2fs_submit_merged_write(sbi,
1762 (type == SUM_TYPE_NODE) ? NODE : DATA);
1763
1764 blk_finish_plug(&plug);
1765
1766 stat_inc_call_count(sbi->stat_info);
1767
1768 return seg_freed;
1769 }
1770
f2fs_gc(struct f2fs_sb_info * sbi,bool sync,bool background,bool force,unsigned int segno)1771 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1772 bool background, bool force, unsigned int segno)
1773 {
1774 int gc_type = sync ? FG_GC : BG_GC;
1775 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1776 int ret = 0;
1777 struct cp_control cpc;
1778 unsigned int init_segno = segno;
1779 struct gc_inode_list gc_list = {
1780 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1781 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1782 };
1783 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1784 unsigned long long first_skipped;
1785 unsigned int skipped_round = 0, round = 0;
1786
1787 trace_f2fs_gc_begin(sbi->sb, sync, background,
1788 get_pages(sbi, F2FS_DIRTY_NODES),
1789 get_pages(sbi, F2FS_DIRTY_DENTS),
1790 get_pages(sbi, F2FS_DIRTY_IMETA),
1791 free_sections(sbi),
1792 free_segments(sbi),
1793 reserved_segments(sbi),
1794 prefree_segments(sbi));
1795
1796 cpc.reason = __get_cp_reason(sbi);
1797 sbi->skipped_gc_rwsem = 0;
1798 first_skipped = last_skipped;
1799 gc_more:
1800 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1801 ret = -EINVAL;
1802 goto stop;
1803 }
1804 if (unlikely(f2fs_cp_error(sbi))) {
1805 ret = -EIO;
1806 goto stop;
1807 }
1808
1809 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1810 /*
1811 * For example, if there are many prefree_segments below given
1812 * threshold, we can make them free by checkpoint. Then, we
1813 * secure free segments which doesn't need fggc any more.
1814 */
1815 if (prefree_segments(sbi) &&
1816 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1817 ret = f2fs_write_checkpoint(sbi, &cpc);
1818 if (ret)
1819 goto stop;
1820 }
1821 if (has_not_enough_free_secs(sbi, 0, 0))
1822 gc_type = FG_GC;
1823 }
1824
1825 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1826 if (gc_type == BG_GC && !background) {
1827 ret = -EINVAL;
1828 goto stop;
1829 }
1830 retry:
1831 ret = __get_victim(sbi, &segno, gc_type);
1832 if (ret) {
1833 /* allow to search victim from sections has pinned data */
1834 if (ret == -ENODATA && gc_type == FG_GC &&
1835 f2fs_pinned_section_exists(DIRTY_I(sbi))) {
1836 f2fs_unpin_all_sections(sbi, false);
1837 goto retry;
1838 }
1839 goto stop;
1840 }
1841
1842 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1843 if (gc_type == FG_GC &&
1844 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1845 sec_freed++;
1846 total_freed += seg_freed;
1847
1848 if (gc_type == FG_GC) {
1849 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1850 sbi->skipped_gc_rwsem)
1851 skipped_round++;
1852 last_skipped = sbi->skipped_atomic_files[FG_GC];
1853 round++;
1854 }
1855
1856 if (gc_type == FG_GC)
1857 sbi->cur_victim_sec = NULL_SEGNO;
1858
1859 if (sync)
1860 goto stop;
1861
1862 if (!has_not_enough_free_secs(sbi, sec_freed, 0))
1863 goto stop;
1864
1865 if (skipped_round <= MAX_SKIP_GC_COUNT || skipped_round * 2 < round) {
1866
1867 /* Write checkpoint to reclaim prefree segments */
1868 if (free_sections(sbi) < NR_CURSEG_PERSIST_TYPE &&
1869 prefree_segments(sbi) &&
1870 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1871 ret = f2fs_write_checkpoint(sbi, &cpc);
1872 if (ret)
1873 goto stop;
1874 }
1875 segno = NULL_SEGNO;
1876 goto gc_more;
1877 }
1878 if (first_skipped < last_skipped &&
1879 (last_skipped - first_skipped) >
1880 sbi->skipped_gc_rwsem) {
1881 f2fs_drop_inmem_pages_all(sbi, true);
1882 segno = NULL_SEGNO;
1883 goto gc_more;
1884 }
1885 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1886 ret = f2fs_write_checkpoint(sbi, &cpc);
1887 stop:
1888 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1889 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1890
1891 if (gc_type == FG_GC)
1892 f2fs_unpin_all_sections(sbi, true);
1893
1894 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1895 get_pages(sbi, F2FS_DIRTY_NODES),
1896 get_pages(sbi, F2FS_DIRTY_DENTS),
1897 get_pages(sbi, F2FS_DIRTY_IMETA),
1898 free_sections(sbi),
1899 free_segments(sbi),
1900 reserved_segments(sbi),
1901 prefree_segments(sbi));
1902
1903 f2fs_up_write(&sbi->gc_lock);
1904
1905 put_gc_inode(&gc_list);
1906
1907 if (sync && !ret)
1908 ret = sec_freed ? 0 : -EAGAIN;
1909 return ret;
1910 }
1911
f2fs_create_garbage_collection_cache(void)1912 int __init f2fs_create_garbage_collection_cache(void)
1913 {
1914 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1915 sizeof(struct victim_entry));
1916 if (!victim_entry_slab)
1917 return -ENOMEM;
1918 return 0;
1919 }
1920
f2fs_destroy_garbage_collection_cache(void)1921 void f2fs_destroy_garbage_collection_cache(void)
1922 {
1923 kmem_cache_destroy(victim_entry_slab);
1924 }
1925
init_atgc_management(struct f2fs_sb_info * sbi)1926 static void init_atgc_management(struct f2fs_sb_info *sbi)
1927 {
1928 struct atgc_management *am = &sbi->am;
1929
1930 if (test_opt(sbi, ATGC) &&
1931 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1932 am->atgc_enabled = true;
1933
1934 am->root = RB_ROOT_CACHED;
1935 INIT_LIST_HEAD(&am->victim_list);
1936 am->victim_count = 0;
1937
1938 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1939 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1940 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1941 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1942 }
1943
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)1944 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1945 {
1946 DIRTY_I(sbi)->v_ops = &default_v_ops;
1947
1948 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1949
1950 /* give warm/cold data area from slower device */
1951 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1952 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1953 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1954
1955 init_atgc_management(sbi);
1956 }
1957
free_segment_range(struct f2fs_sb_info * sbi,unsigned int secs,bool gc_only)1958 static int free_segment_range(struct f2fs_sb_info *sbi,
1959 unsigned int secs, bool gc_only)
1960 {
1961 unsigned int segno, next_inuse, start, end;
1962 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1963 int gc_mode, gc_type;
1964 int err = 0;
1965 int type;
1966
1967 /* Force block allocation for GC */
1968 MAIN_SECS(sbi) -= secs;
1969 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1970 end = MAIN_SEGS(sbi) - 1;
1971
1972 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1973 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1974 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1975 SIT_I(sbi)->last_victim[gc_mode] = 0;
1976
1977 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1978 if (sbi->next_victim_seg[gc_type] >= start)
1979 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1980 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1981
1982 /* Move out cursegs from the target range */
1983 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1984 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1985
1986 /* do GC to move out valid blocks in the range */
1987 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1988 struct gc_inode_list gc_list = {
1989 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1990 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1991 };
1992
1993 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1994 put_gc_inode(&gc_list);
1995
1996 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1997 err = -EAGAIN;
1998 goto out;
1999 }
2000 if (fatal_signal_pending(current)) {
2001 err = -ERESTARTSYS;
2002 goto out;
2003 }
2004 }
2005 if (gc_only)
2006 goto out;
2007
2008 err = f2fs_write_checkpoint(sbi, &cpc);
2009 if (err)
2010 goto out;
2011
2012 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
2013 if (next_inuse <= end) {
2014 f2fs_err(sbi, "segno %u should be free but still inuse!",
2015 next_inuse);
2016 f2fs_bug_on(sbi, 1);
2017 }
2018 out:
2019 MAIN_SECS(sbi) += secs;
2020 return err;
2021 }
2022
update_sb_metadata(struct f2fs_sb_info * sbi,int secs)2023 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
2024 {
2025 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
2026 int section_count;
2027 int segment_count;
2028 int segment_count_main;
2029 long long block_count;
2030 int segs = secs * sbi->segs_per_sec;
2031
2032 f2fs_down_write(&sbi->sb_lock);
2033
2034 section_count = le32_to_cpu(raw_sb->section_count);
2035 segment_count = le32_to_cpu(raw_sb->segment_count);
2036 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
2037 block_count = le64_to_cpu(raw_sb->block_count);
2038
2039 raw_sb->section_count = cpu_to_le32(section_count + secs);
2040 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
2041 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
2042 raw_sb->block_count = cpu_to_le64(block_count +
2043 (long long)segs * sbi->blocks_per_seg);
2044 if (f2fs_is_multi_device(sbi)) {
2045 int last_dev = sbi->s_ndevs - 1;
2046 int dev_segs =
2047 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
2048
2049 raw_sb->devs[last_dev].total_segments =
2050 cpu_to_le32(dev_segs + segs);
2051 }
2052
2053 f2fs_up_write(&sbi->sb_lock);
2054 }
2055
update_fs_metadata(struct f2fs_sb_info * sbi,int secs)2056 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
2057 {
2058 int segs = secs * sbi->segs_per_sec;
2059 long long blks = (long long)segs * sbi->blocks_per_seg;
2060 long long user_block_count =
2061 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
2062
2063 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
2064 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
2065 MAIN_SECS(sbi) += secs;
2066 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
2067 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
2068 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
2069
2070 if (f2fs_is_multi_device(sbi)) {
2071 int last_dev = sbi->s_ndevs - 1;
2072
2073 FDEV(last_dev).total_segments =
2074 (int)FDEV(last_dev).total_segments + segs;
2075 FDEV(last_dev).end_blk =
2076 (long long)FDEV(last_dev).end_blk + blks;
2077 #ifdef CONFIG_BLK_DEV_ZONED
2078 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
2079 (int)(blks >> sbi->log_blocks_per_blkz);
2080 #endif
2081 }
2082 }
2083
f2fs_resize_fs(struct file * filp,__u64 block_count)2084 int f2fs_resize_fs(struct file *filp, __u64 block_count)
2085 {
2086 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
2087 __u64 old_block_count, shrunk_blocks;
2088 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
2089 unsigned int secs;
2090 int err = 0;
2091 __u32 rem;
2092
2093 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
2094 if (block_count > old_block_count)
2095 return -EINVAL;
2096
2097 if (f2fs_is_multi_device(sbi)) {
2098 int last_dev = sbi->s_ndevs - 1;
2099 __u64 last_segs = FDEV(last_dev).total_segments;
2100
2101 if (block_count + last_segs * sbi->blocks_per_seg <=
2102 old_block_count)
2103 return -EINVAL;
2104 }
2105
2106 /* new fs size should align to section size */
2107 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
2108 if (rem)
2109 return -EINVAL;
2110
2111 if (block_count == old_block_count)
2112 return 0;
2113
2114 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2115 f2fs_err(sbi, "Should run fsck to repair first.");
2116 return -EFSCORRUPTED;
2117 }
2118
2119 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2120 f2fs_err(sbi, "Checkpoint should be enabled.");
2121 return -EINVAL;
2122 }
2123
2124 err = mnt_want_write_file(filp);
2125 if (err)
2126 return err;
2127
2128 shrunk_blocks = old_block_count - block_count;
2129 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2130
2131 /* stop other GC */
2132 if (!f2fs_down_write_trylock(&sbi->gc_lock)) {
2133 err = -EAGAIN;
2134 goto out_drop_write;
2135 }
2136
2137 /* stop CP to protect MAIN_SEC in free_segment_range */
2138 f2fs_lock_op(sbi);
2139
2140 spin_lock(&sbi->stat_lock);
2141 if (shrunk_blocks + valid_user_blocks(sbi) +
2142 sbi->current_reserved_blocks + sbi->unusable_block_count +
2143 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2144 err = -ENOSPC;
2145 spin_unlock(&sbi->stat_lock);
2146
2147 if (err)
2148 goto out_unlock;
2149
2150 err = free_segment_range(sbi, secs, true);
2151
2152 out_unlock:
2153 f2fs_unlock_op(sbi);
2154 f2fs_up_write(&sbi->gc_lock);
2155 out_drop_write:
2156 mnt_drop_write_file(filp);
2157 if (err)
2158 return err;
2159
2160 freeze_super(sbi->sb);
2161
2162 if (f2fs_readonly(sbi->sb)) {
2163 thaw_super(sbi->sb);
2164 return -EROFS;
2165 }
2166
2167 f2fs_down_write(&sbi->gc_lock);
2168 f2fs_down_write(&sbi->cp_global_sem);
2169
2170 spin_lock(&sbi->stat_lock);
2171 if (shrunk_blocks + valid_user_blocks(sbi) +
2172 sbi->current_reserved_blocks + sbi->unusable_block_count +
2173 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2174 err = -ENOSPC;
2175 else
2176 sbi->user_block_count -= shrunk_blocks;
2177 spin_unlock(&sbi->stat_lock);
2178 if (err)
2179 goto out_err;
2180
2181 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2182 err = free_segment_range(sbi, secs, false);
2183 if (err)
2184 goto recover_out;
2185
2186 update_sb_metadata(sbi, -secs);
2187
2188 err = f2fs_commit_super(sbi, false);
2189 if (err) {
2190 update_sb_metadata(sbi, secs);
2191 goto recover_out;
2192 }
2193
2194 update_fs_metadata(sbi, -secs);
2195 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2196 set_sbi_flag(sbi, SBI_IS_DIRTY);
2197
2198 err = f2fs_write_checkpoint(sbi, &cpc);
2199 if (err) {
2200 update_fs_metadata(sbi, secs);
2201 update_sb_metadata(sbi, secs);
2202 f2fs_commit_super(sbi, false);
2203 }
2204 recover_out:
2205 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2206 if (err) {
2207 set_sbi_flag(sbi, SBI_NEED_FSCK);
2208 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2209
2210 spin_lock(&sbi->stat_lock);
2211 sbi->user_block_count += shrunk_blocks;
2212 spin_unlock(&sbi->stat_lock);
2213 }
2214 out_err:
2215 f2fs_up_write(&sbi->cp_global_sem);
2216 f2fs_up_write(&sbi->gc_lock);
2217 thaw_super(sbi->sb);
2218 return err;
2219 }
2220