1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/gc.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/module.h>
10 #include <linux/backing-dev.h>
11 #include <linux/init.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/kthread.h>
14 #include <linux/delay.h>
15 #include <linux/freezer.h>
16 #include <linux/sched/signal.h>
17
18 #include "f2fs.h"
19 #include "node.h"
20 #include "segment.h"
21 #include "gc.h"
22 #include <trace/events/f2fs.h>
23
24 static struct kmem_cache *victim_entry_slab;
25
26 static unsigned int count_bits(const unsigned long *addr,
27 unsigned int offset, unsigned int len);
28
gc_thread_func(void * data)29 static int gc_thread_func(void *data)
30 {
31 struct f2fs_sb_info *sbi = data;
32 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
33 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
34 wait_queue_head_t *fggc_wq = &sbi->gc_thread->fggc_wq;
35 unsigned int wait_ms;
36
37 wait_ms = gc_th->min_sleep_time;
38
39 set_freezable();
40 do {
41 bool sync_mode, foreground = false;
42
43 wait_event_interruptible_timeout(*wq,
44 kthread_should_stop() || freezing(current) ||
45 waitqueue_active(fggc_wq) ||
46 gc_th->gc_wake,
47 msecs_to_jiffies(wait_ms));
48
49 if (test_opt(sbi, GC_MERGE) && waitqueue_active(fggc_wq))
50 foreground = true;
51
52 /* give it a try one time */
53 if (gc_th->gc_wake)
54 gc_th->gc_wake = 0;
55
56 if (try_to_freeze()) {
57 stat_other_skip_bggc_count(sbi);
58 continue;
59 }
60 if (kthread_should_stop())
61 break;
62
63 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
64 increase_sleep_time(gc_th, &wait_ms);
65 stat_other_skip_bggc_count(sbi);
66 continue;
67 }
68
69 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
70 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
71 f2fs_stop_checkpoint(sbi, false);
72 }
73
74 if (!sb_start_write_trylock(sbi->sb)) {
75 stat_other_skip_bggc_count(sbi);
76 continue;
77 }
78
79 /*
80 * [GC triggering condition]
81 * 0. GC is not conducted currently.
82 * 1. There are enough dirty segments.
83 * 2. IO subsystem is idle by checking the # of writeback pages.
84 * 3. IO subsystem is idle by checking the # of requests in
85 * bdev's request list.
86 *
87 * Note) We have to avoid triggering GCs frequently.
88 * Because it is possible that some segments can be
89 * invalidated soon after by user update or deletion.
90 * So, I'd like to wait some time to collect dirty segments.
91 */
92 if (sbi->gc_mode == GC_URGENT_HIGH) {
93 wait_ms = gc_th->urgent_sleep_time;
94 down_write(&sbi->gc_lock);
95 goto do_gc;
96 }
97
98 if (foreground) {
99 down_write(&sbi->gc_lock);
100 goto do_gc;
101 } else if (!down_write_trylock(&sbi->gc_lock)) {
102 stat_other_skip_bggc_count(sbi);
103 goto next;
104 }
105
106 if (!is_idle(sbi, GC_TIME)) {
107 increase_sleep_time(gc_th, &wait_ms);
108 up_write(&sbi->gc_lock);
109 stat_io_skip_bggc_count(sbi);
110 goto next;
111 }
112
113 if (has_enough_invalid_blocks(sbi))
114 decrease_sleep_time(gc_th, &wait_ms);
115 else
116 increase_sleep_time(gc_th, &wait_ms);
117 do_gc:
118 if (!foreground)
119 stat_inc_bggc_count(sbi->stat_info);
120
121 sync_mode = F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC;
122
123 /* foreground GC was been triggered via f2fs_balance_fs() */
124 if (foreground)
125 sync_mode = false;
126
127 /* if return value is not zero, no victim was selected */
128 if (f2fs_gc(sbi, sync_mode, !foreground, false, NULL_SEGNO))
129 wait_ms = gc_th->no_gc_sleep_time;
130
131 if (foreground)
132 wake_up_all(&gc_th->fggc_wq);
133
134 trace_f2fs_background_gc(sbi->sb, wait_ms,
135 prefree_segments(sbi), free_segments(sbi));
136
137 /* balancing f2fs's metadata periodically */
138 f2fs_balance_fs_bg(sbi, true);
139 next:
140 sb_end_write(sbi->sb);
141
142 } while (!kthread_should_stop());
143 return 0;
144 }
145
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)146 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
147 {
148 struct f2fs_gc_kthread *gc_th;
149 dev_t dev = sbi->sb->s_bdev->bd_dev;
150 int err = 0;
151
152 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
153 if (!gc_th) {
154 err = -ENOMEM;
155 goto out;
156 }
157
158 gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
159 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
160 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
161 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
162
163 gc_th->gc_wake= 0;
164
165 sbi->gc_thread = gc_th;
166 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
167 init_waitqueue_head(&sbi->gc_thread->fggc_wq);
168 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
169 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
170 if (IS_ERR(gc_th->f2fs_gc_task)) {
171 err = PTR_ERR(gc_th->f2fs_gc_task);
172 kfree(gc_th);
173 sbi->gc_thread = NULL;
174 }
175 out:
176 return err;
177 }
178
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)179 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
180 {
181 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
182 if (!gc_th)
183 return;
184 kthread_stop(gc_th->f2fs_gc_task);
185 wake_up_all(&gc_th->fggc_wq);
186 kfree(gc_th);
187 sbi->gc_thread = NULL;
188 }
189
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)190 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
191 {
192 int gc_mode;
193
194 if (gc_type == BG_GC) {
195 if (sbi->am.atgc_enabled)
196 gc_mode = GC_AT;
197 else
198 gc_mode = GC_CB;
199 } else {
200 gc_mode = GC_GREEDY;
201 }
202
203 switch (sbi->gc_mode) {
204 case GC_IDLE_CB:
205 gc_mode = GC_CB;
206 break;
207 case GC_IDLE_GREEDY:
208 case GC_URGENT_HIGH:
209 gc_mode = GC_GREEDY;
210 break;
211 case GC_IDLE_AT:
212 gc_mode = GC_AT;
213 break;
214 }
215
216 return gc_mode;
217 }
218
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)219 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
220 int type, struct victim_sel_policy *p)
221 {
222 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
223
224 if (p->alloc_mode == SSR) {
225 p->gc_mode = GC_GREEDY;
226 p->dirty_bitmap = dirty_i->dirty_segmap[type];
227 p->max_search = dirty_i->nr_dirty[type];
228 p->ofs_unit = 1;
229 } else if (p->alloc_mode == AT_SSR) {
230 p->gc_mode = GC_GREEDY;
231 p->dirty_bitmap = dirty_i->dirty_segmap[type];
232 p->max_search = dirty_i->nr_dirty[type];
233 p->ofs_unit = 1;
234 } else {
235 p->gc_mode = select_gc_type(sbi, gc_type);
236 p->ofs_unit = sbi->segs_per_sec;
237 if (__is_large_section(sbi)) {
238 p->dirty_bitmap = dirty_i->dirty_secmap;
239 p->max_search = count_bits(p->dirty_bitmap,
240 0, MAIN_SECS(sbi));
241 } else {
242 p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY];
243 p->max_search = dirty_i->nr_dirty[DIRTY];
244 }
245 }
246
247 /*
248 * adjust candidates range, should select all dirty segments for
249 * foreground GC and urgent GC cases.
250 */
251 if (gc_type != FG_GC &&
252 (sbi->gc_mode != GC_URGENT_HIGH) &&
253 (p->gc_mode != GC_AT && p->alloc_mode != AT_SSR) &&
254 p->max_search > sbi->max_victim_search)
255 p->max_search = sbi->max_victim_search;
256
257 /* let's select beginning hot/small space first in no_heap mode*/
258 if (test_opt(sbi, NOHEAP) &&
259 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
260 p->offset = 0;
261 else
262 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
263 }
264
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)265 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
266 struct victim_sel_policy *p)
267 {
268 /* SSR allocates in a segment unit */
269 if (p->alloc_mode == SSR)
270 return sbi->blocks_per_seg;
271 else if (p->alloc_mode == AT_SSR)
272 return UINT_MAX;
273
274 /* LFS */
275 if (p->gc_mode == GC_GREEDY)
276 return 2 * sbi->blocks_per_seg * p->ofs_unit;
277 else if (p->gc_mode == GC_CB)
278 return UINT_MAX;
279 else if (p->gc_mode == GC_AT)
280 return UINT_MAX;
281 else /* No other gc_mode */
282 return 0;
283 }
284
check_bg_victims(struct f2fs_sb_info * sbi)285 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
286 {
287 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
288 unsigned int secno;
289
290 /*
291 * If the gc_type is FG_GC, we can select victim segments
292 * selected by background GC before.
293 * Those segments guarantee they have small valid blocks.
294 */
295 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
296 if (sec_usage_check(sbi, secno))
297 continue;
298 clear_bit(secno, dirty_i->victim_secmap);
299 return GET_SEG_FROM_SEC(sbi, secno);
300 }
301 return NULL_SEGNO;
302 }
303
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)304 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
305 {
306 struct sit_info *sit_i = SIT_I(sbi);
307 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
308 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
309 unsigned long long mtime = 0;
310 unsigned int vblocks;
311 unsigned char age = 0;
312 unsigned char u;
313 unsigned int i;
314 unsigned int usable_segs_per_sec = f2fs_usable_segs_in_sec(sbi, segno);
315
316 for (i = 0; i < usable_segs_per_sec; i++)
317 mtime += get_seg_entry(sbi, start + i)->mtime;
318 vblocks = get_valid_blocks(sbi, segno, true);
319
320 mtime = div_u64(mtime, usable_segs_per_sec);
321 vblocks = div_u64(vblocks, usable_segs_per_sec);
322
323 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
324
325 /* Handle if the system time has changed by the user */
326 if (mtime < sit_i->min_mtime)
327 sit_i->min_mtime = mtime;
328 if (mtime > sit_i->max_mtime)
329 sit_i->max_mtime = mtime;
330 if (sit_i->max_mtime != sit_i->min_mtime)
331 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
332 sit_i->max_mtime - sit_i->min_mtime);
333
334 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
335 }
336
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p)337 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
338 unsigned int segno, struct victim_sel_policy *p)
339 {
340 if (p->alloc_mode == SSR)
341 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
342
343 /* alloc_mode == LFS */
344 if (p->gc_mode == GC_GREEDY)
345 return get_valid_blocks(sbi, segno, true);
346 else if (p->gc_mode == GC_CB)
347 return get_cb_cost(sbi, segno);
348
349 f2fs_bug_on(sbi, 1);
350 return 0;
351 }
352
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)353 static unsigned int count_bits(const unsigned long *addr,
354 unsigned int offset, unsigned int len)
355 {
356 unsigned int end = offset + len, sum = 0;
357
358 while (offset < end) {
359 if (test_bit(offset++, addr))
360 ++sum;
361 }
362 return sum;
363 }
364
attach_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno,struct rb_node * parent,struct rb_node ** p,bool left_most)365 static struct victim_entry *attach_victim_entry(struct f2fs_sb_info *sbi,
366 unsigned long long mtime, unsigned int segno,
367 struct rb_node *parent, struct rb_node **p,
368 bool left_most)
369 {
370 struct atgc_management *am = &sbi->am;
371 struct victim_entry *ve;
372
373 ve = f2fs_kmem_cache_alloc(victim_entry_slab, GFP_NOFS);
374
375 ve->mtime = mtime;
376 ve->segno = segno;
377
378 rb_link_node(&ve->rb_node, parent, p);
379 rb_insert_color_cached(&ve->rb_node, &am->root, left_most);
380
381 list_add_tail(&ve->list, &am->victim_list);
382
383 am->victim_count++;
384
385 return ve;
386 }
387
insert_victim_entry(struct f2fs_sb_info * sbi,unsigned long long mtime,unsigned int segno)388 static void insert_victim_entry(struct f2fs_sb_info *sbi,
389 unsigned long long mtime, unsigned int segno)
390 {
391 struct atgc_management *am = &sbi->am;
392 struct rb_node **p;
393 struct rb_node *parent = NULL;
394 bool left_most = true;
395
396 p = f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, mtime, &left_most);
397 attach_victim_entry(sbi, mtime, segno, parent, p, left_most);
398 }
399
add_victim_entry(struct f2fs_sb_info * sbi,struct victim_sel_policy * p,unsigned int segno)400 static void add_victim_entry(struct f2fs_sb_info *sbi,
401 struct victim_sel_policy *p, unsigned int segno)
402 {
403 struct sit_info *sit_i = SIT_I(sbi);
404 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
405 unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
406 unsigned long long mtime = 0;
407 unsigned int i;
408
409 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
410 if (p->gc_mode == GC_AT &&
411 get_valid_blocks(sbi, segno, true) == 0)
412 return;
413 }
414
415 for (i = 0; i < sbi->segs_per_sec; i++)
416 mtime += get_seg_entry(sbi, start + i)->mtime;
417 mtime = div_u64(mtime, sbi->segs_per_sec);
418
419 /* Handle if the system time has changed by the user */
420 if (mtime < sit_i->min_mtime)
421 sit_i->min_mtime = mtime;
422 if (mtime > sit_i->max_mtime)
423 sit_i->max_mtime = mtime;
424 if (mtime < sit_i->dirty_min_mtime)
425 sit_i->dirty_min_mtime = mtime;
426 if (mtime > sit_i->dirty_max_mtime)
427 sit_i->dirty_max_mtime = mtime;
428
429 /* don't choose young section as candidate */
430 if (sit_i->dirty_max_mtime - mtime < p->age_threshold)
431 return;
432
433 insert_victim_entry(sbi, mtime, segno);
434 }
435
lookup_central_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)436 static struct rb_node *lookup_central_victim(struct f2fs_sb_info *sbi,
437 struct victim_sel_policy *p)
438 {
439 struct atgc_management *am = &sbi->am;
440 struct rb_node *parent = NULL;
441 bool left_most;
442
443 f2fs_lookup_rb_tree_ext(sbi, &am->root, &parent, p->age, &left_most);
444
445 return parent;
446 }
447
atgc_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)448 static void atgc_lookup_victim(struct f2fs_sb_info *sbi,
449 struct victim_sel_policy *p)
450 {
451 struct sit_info *sit_i = SIT_I(sbi);
452 struct atgc_management *am = &sbi->am;
453 struct rb_root_cached *root = &am->root;
454 struct rb_node *node;
455 struct rb_entry *re;
456 struct victim_entry *ve;
457 unsigned long long total_time;
458 unsigned long long age, u, accu;
459 unsigned long long max_mtime = sit_i->dirty_max_mtime;
460 unsigned long long min_mtime = sit_i->dirty_min_mtime;
461 unsigned int sec_blocks = BLKS_PER_SEC(sbi);
462 unsigned int vblocks;
463 unsigned int dirty_threshold = max(am->max_candidate_count,
464 am->candidate_ratio *
465 am->victim_count / 100);
466 unsigned int age_weight = am->age_weight;
467 unsigned int cost;
468 unsigned int iter = 0;
469
470 if (max_mtime < min_mtime)
471 return;
472
473 max_mtime += 1;
474 total_time = max_mtime - min_mtime;
475
476 accu = div64_u64(ULLONG_MAX, total_time);
477 accu = min_t(unsigned long long, div_u64(accu, 100),
478 DEFAULT_ACCURACY_CLASS);
479
480 node = rb_first_cached(root);
481 next:
482 re = rb_entry_safe(node, struct rb_entry, rb_node);
483 if (!re)
484 return;
485
486 ve = (struct victim_entry *)re;
487
488 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
489 goto skip;
490
491 /* age = 10000 * x% * 60 */
492 age = div64_u64(accu * (max_mtime - ve->mtime), total_time) *
493 age_weight;
494
495 vblocks = get_valid_blocks(sbi, ve->segno, true);
496 f2fs_bug_on(sbi, !vblocks || vblocks == sec_blocks);
497
498 /* u = 10000 * x% * 40 */
499 u = div64_u64(accu * (sec_blocks - vblocks), sec_blocks) *
500 (100 - age_weight);
501
502 f2fs_bug_on(sbi, age + u >= UINT_MAX);
503
504 cost = UINT_MAX - (age + u);
505 iter++;
506
507 if (cost < p->min_cost ||
508 (cost == p->min_cost && age > p->oldest_age)) {
509 p->min_cost = cost;
510 p->oldest_age = age;
511 p->min_segno = ve->segno;
512 }
513 skip:
514 if (iter < dirty_threshold) {
515 node = rb_next(node);
516 goto next;
517 }
518 }
519
520 /*
521 * select candidates around source section in range of
522 * [target - dirty_threshold, target + dirty_threshold]
523 */
atssr_lookup_victim(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)524 static void atssr_lookup_victim(struct f2fs_sb_info *sbi,
525 struct victim_sel_policy *p)
526 {
527 struct sit_info *sit_i = SIT_I(sbi);
528 struct atgc_management *am = &sbi->am;
529 struct rb_node *node;
530 struct rb_entry *re;
531 struct victim_entry *ve;
532 unsigned long long age;
533 unsigned long long max_mtime = sit_i->dirty_max_mtime;
534 unsigned long long min_mtime = sit_i->dirty_min_mtime;
535 unsigned int seg_blocks = sbi->blocks_per_seg;
536 unsigned int vblocks;
537 unsigned int dirty_threshold = max(am->max_candidate_count,
538 am->candidate_ratio *
539 am->victim_count / 100);
540 unsigned int cost;
541 unsigned int iter = 0;
542 int stage = 0;
543
544 if (max_mtime < min_mtime)
545 return;
546 max_mtime += 1;
547 next_stage:
548 node = lookup_central_victim(sbi, p);
549 next_node:
550 re = rb_entry_safe(node, struct rb_entry, rb_node);
551 if (!re) {
552 if (stage == 0)
553 goto skip_stage;
554 return;
555 }
556
557 ve = (struct victim_entry *)re;
558
559 if (ve->mtime >= max_mtime || ve->mtime < min_mtime)
560 goto skip_node;
561
562 age = max_mtime - ve->mtime;
563
564 vblocks = get_seg_entry(sbi, ve->segno)->ckpt_valid_blocks;
565 f2fs_bug_on(sbi, !vblocks);
566
567 /* rare case */
568 if (vblocks == seg_blocks)
569 goto skip_node;
570
571 iter++;
572
573 age = max_mtime - abs(p->age - age);
574 cost = UINT_MAX - vblocks;
575
576 if (cost < p->min_cost ||
577 (cost == p->min_cost && age > p->oldest_age)) {
578 p->min_cost = cost;
579 p->oldest_age = age;
580 p->min_segno = ve->segno;
581 }
582 skip_node:
583 if (iter < dirty_threshold) {
584 if (stage == 0)
585 node = rb_prev(node);
586 else if (stage == 1)
587 node = rb_next(node);
588 goto next_node;
589 }
590 skip_stage:
591 if (stage < 1) {
592 stage++;
593 iter = 0;
594 goto next_stage;
595 }
596 }
lookup_victim_by_age(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)597 static void lookup_victim_by_age(struct f2fs_sb_info *sbi,
598 struct victim_sel_policy *p)
599 {
600 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
601 &sbi->am.root, true));
602
603 if (p->gc_mode == GC_AT)
604 atgc_lookup_victim(sbi, p);
605 else if (p->alloc_mode == AT_SSR)
606 atssr_lookup_victim(sbi, p);
607 else
608 f2fs_bug_on(sbi, 1);
609 }
610
release_victim_entry(struct f2fs_sb_info * sbi)611 static void release_victim_entry(struct f2fs_sb_info *sbi)
612 {
613 struct atgc_management *am = &sbi->am;
614 struct victim_entry *ve, *tmp;
615
616 list_for_each_entry_safe(ve, tmp, &am->victim_list, list) {
617 list_del(&ve->list);
618 kmem_cache_free(victim_entry_slab, ve);
619 am->victim_count--;
620 }
621
622 am->root = RB_ROOT_CACHED;
623
624 f2fs_bug_on(sbi, am->victim_count);
625 f2fs_bug_on(sbi, !list_empty(&am->victim_list));
626 }
627
628 /*
629 * This function is called from two paths.
630 * One is garbage collection and the other is SSR segment selection.
631 * When it is called during GC, it just gets a victim segment
632 * and it does not remove it from dirty seglist.
633 * When it is called from SSR segment selection, it finds a segment
634 * which has minimum valid blocks and removes it from dirty seglist.
635 */
get_victim_by_default(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode,unsigned long long age)636 static int get_victim_by_default(struct f2fs_sb_info *sbi,
637 unsigned int *result, int gc_type, int type,
638 char alloc_mode, unsigned long long age)
639 {
640 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
641 struct sit_info *sm = SIT_I(sbi);
642 struct victim_sel_policy p;
643 unsigned int secno, last_victim;
644 unsigned int last_segment;
645 unsigned int nsearched;
646 bool is_atgc;
647 int ret = 0;
648
649 mutex_lock(&dirty_i->seglist_lock);
650 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
651
652 p.alloc_mode = alloc_mode;
653 p.age = age;
654 p.age_threshold = sbi->am.age_threshold;
655
656 retry:
657 select_policy(sbi, gc_type, type, &p);
658 p.min_segno = NULL_SEGNO;
659 p.oldest_age = 0;
660 p.min_cost = get_max_cost(sbi, &p);
661
662 is_atgc = (p.gc_mode == GC_AT || p.alloc_mode == AT_SSR);
663 nsearched = 0;
664
665 if (is_atgc)
666 SIT_I(sbi)->dirty_min_mtime = ULLONG_MAX;
667
668 if (*result != NULL_SEGNO) {
669 if (!get_valid_blocks(sbi, *result, false)) {
670 ret = -ENODATA;
671 goto out;
672 }
673
674 if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
675 ret = -EBUSY;
676 else
677 p.min_segno = *result;
678 goto out;
679 }
680
681 ret = -ENODATA;
682 if (p.max_search == 0)
683 goto out;
684
685 if (__is_large_section(sbi) && p.alloc_mode == LFS) {
686 if (sbi->next_victim_seg[BG_GC] != NULL_SEGNO) {
687 p.min_segno = sbi->next_victim_seg[BG_GC];
688 *result = p.min_segno;
689 sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
690 goto got_result;
691 }
692 if (gc_type == FG_GC &&
693 sbi->next_victim_seg[FG_GC] != NULL_SEGNO) {
694 p.min_segno = sbi->next_victim_seg[FG_GC];
695 *result = p.min_segno;
696 sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
697 goto got_result;
698 }
699 }
700
701 last_victim = sm->last_victim[p.gc_mode];
702 if (p.alloc_mode == LFS && gc_type == FG_GC) {
703 p.min_segno = check_bg_victims(sbi);
704 if (p.min_segno != NULL_SEGNO)
705 goto got_it;
706 }
707
708 while (1) {
709 unsigned long cost, *dirty_bitmap;
710 unsigned int unit_no, segno;
711
712 dirty_bitmap = p.dirty_bitmap;
713 unit_no = find_next_bit(dirty_bitmap,
714 last_segment / p.ofs_unit,
715 p.offset / p.ofs_unit);
716 segno = unit_no * p.ofs_unit;
717 if (segno >= last_segment) {
718 if (sm->last_victim[p.gc_mode]) {
719 last_segment =
720 sm->last_victim[p.gc_mode];
721 sm->last_victim[p.gc_mode] = 0;
722 p.offset = 0;
723 continue;
724 }
725 break;
726 }
727
728 p.offset = segno + p.ofs_unit;
729 nsearched++;
730
731 #ifdef CONFIG_F2FS_CHECK_FS
732 /*
733 * skip selecting the invalid segno (that is failed due to block
734 * validity check failure during GC) to avoid endless GC loop in
735 * such cases.
736 */
737 if (test_bit(segno, sm->invalid_segmap))
738 goto next;
739 #endif
740
741 secno = GET_SEC_FROM_SEG(sbi, segno);
742
743 if (sec_usage_check(sbi, secno))
744 goto next;
745
746 /* Don't touch checkpointed data */
747 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
748 if (p.alloc_mode == LFS) {
749 /*
750 * LFS is set to find source section during GC.
751 * The victim should have no checkpointed data.
752 */
753 if (get_ckpt_valid_blocks(sbi, segno, true))
754 goto next;
755 } else {
756 /*
757 * SSR | AT_SSR are set to find target segment
758 * for writes which can be full by checkpointed
759 * and newly written blocks.
760 */
761 if (!f2fs_segment_has_free_slot(sbi, segno))
762 goto next;
763 }
764 }
765
766 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
767 goto next;
768
769 if (is_atgc) {
770 add_victim_entry(sbi, &p, segno);
771 goto next;
772 }
773
774 cost = get_gc_cost(sbi, segno, &p);
775
776 if (p.min_cost > cost) {
777 p.min_segno = segno;
778 p.min_cost = cost;
779 }
780 next:
781 if (nsearched >= p.max_search) {
782 if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
783 sm->last_victim[p.gc_mode] =
784 last_victim + p.ofs_unit;
785 else
786 sm->last_victim[p.gc_mode] = segno + p.ofs_unit;
787 sm->last_victim[p.gc_mode] %=
788 (MAIN_SECS(sbi) * sbi->segs_per_sec);
789 break;
790 }
791 }
792
793 /* get victim for GC_AT/AT_SSR */
794 if (is_atgc) {
795 lookup_victim_by_age(sbi, &p);
796 release_victim_entry(sbi);
797 }
798
799 if (is_atgc && p.min_segno == NULL_SEGNO &&
800 sm->elapsed_time < p.age_threshold) {
801 p.age_threshold = 0;
802 goto retry;
803 }
804
805 if (p.min_segno != NULL_SEGNO) {
806 got_it:
807 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
808 got_result:
809 if (p.alloc_mode == LFS) {
810 secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
811 if (gc_type == FG_GC)
812 sbi->cur_victim_sec = secno;
813 else
814 set_bit(secno, dirty_i->victim_secmap);
815 }
816 ret = 0;
817
818 }
819 out:
820 if (p.min_segno != NULL_SEGNO)
821 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
822 sbi->cur_victim_sec,
823 prefree_segments(sbi), free_segments(sbi));
824 mutex_unlock(&dirty_i->seglist_lock);
825
826 return ret;
827 }
828
829 static const struct victim_selection default_v_ops = {
830 .get_victim = get_victim_by_default,
831 };
832
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)833 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
834 {
835 struct inode_entry *ie;
836
837 ie = radix_tree_lookup(&gc_list->iroot, ino);
838 if (ie)
839 return ie->inode;
840 return NULL;
841 }
842
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)843 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
844 {
845 struct inode_entry *new_ie;
846
847 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
848 iput(inode);
849 return;
850 }
851 new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
852 new_ie->inode = inode;
853
854 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
855 list_add_tail(&new_ie->list, &gc_list->ilist);
856 }
857
put_gc_inode(struct gc_inode_list * gc_list)858 static void put_gc_inode(struct gc_inode_list *gc_list)
859 {
860 struct inode_entry *ie, *next_ie;
861 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
862 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
863 iput(ie->inode);
864 list_del(&ie->list);
865 kmem_cache_free(f2fs_inode_entry_slab, ie);
866 }
867 }
868
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)869 static int check_valid_map(struct f2fs_sb_info *sbi,
870 unsigned int segno, int offset)
871 {
872 struct sit_info *sit_i = SIT_I(sbi);
873 struct seg_entry *sentry;
874 int ret;
875
876 down_read(&sit_i->sentry_lock);
877 sentry = get_seg_entry(sbi, segno);
878 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
879 up_read(&sit_i->sentry_lock);
880 return ret;
881 }
882
883 /*
884 * This function compares node address got in summary with that in NAT.
885 * On validity, copy that node with cold status, otherwise (invalid node)
886 * ignore that.
887 */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type)888 static int gc_node_segment(struct f2fs_sb_info *sbi,
889 struct f2fs_summary *sum, unsigned int segno, int gc_type)
890 {
891 struct f2fs_summary *entry;
892 block_t start_addr;
893 int off;
894 int phase = 0;
895 bool fggc = (gc_type == FG_GC);
896 int submitted = 0;
897 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
898
899 start_addr = START_BLOCK(sbi, segno);
900
901 next_step:
902 entry = sum;
903
904 if (fggc && phase == 2)
905 atomic_inc(&sbi->wb_sync_req[NODE]);
906
907 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
908 nid_t nid = le32_to_cpu(entry->nid);
909 struct page *node_page;
910 struct node_info ni;
911 int err;
912
913 /* stop BG_GC if there is not enough free sections. */
914 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
915 return submitted;
916
917 if (check_valid_map(sbi, segno, off) == 0)
918 continue;
919
920 if (phase == 0) {
921 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
922 META_NAT, true);
923 continue;
924 }
925
926 if (phase == 1) {
927 f2fs_ra_node_page(sbi, nid);
928 continue;
929 }
930
931 /* phase == 2 */
932 node_page = f2fs_get_node_page(sbi, nid);
933 if (IS_ERR(node_page))
934 continue;
935
936 /* block may become invalid during f2fs_get_node_page */
937 if (check_valid_map(sbi, segno, off) == 0) {
938 f2fs_put_page(node_page, 1);
939 continue;
940 }
941
942 if (f2fs_get_node_info(sbi, nid, &ni)) {
943 f2fs_put_page(node_page, 1);
944 continue;
945 }
946
947 if (ni.blk_addr != start_addr + off) {
948 f2fs_put_page(node_page, 1);
949 continue;
950 }
951
952 err = f2fs_move_node_page(node_page, gc_type);
953 if (!err && gc_type == FG_GC)
954 submitted++;
955 stat_inc_node_blk_count(sbi, 1, gc_type);
956 }
957
958 if (++phase < 3)
959 goto next_step;
960
961 if (fggc)
962 atomic_dec(&sbi->wb_sync_req[NODE]);
963 return submitted;
964 }
965
966 /*
967 * Calculate start block index indicating the given node offset.
968 * Be careful, caller should give this node offset only indicating direct node
969 * blocks. If any node offsets, which point the other types of node blocks such
970 * as indirect or double indirect node blocks, are given, it must be a caller's
971 * bug.
972 */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)973 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
974 {
975 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
976 unsigned int bidx;
977
978 if (node_ofs == 0)
979 return 0;
980
981 if (node_ofs <= 2) {
982 bidx = node_ofs - 1;
983 } else if (node_ofs <= indirect_blks) {
984 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
985 bidx = node_ofs - 2 - dec;
986 } else {
987 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
988 bidx = node_ofs - 5 - dec;
989 }
990 return bidx * ADDRS_PER_BLOCK(inode) + ADDRS_PER_INODE(inode);
991 }
992
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)993 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
994 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
995 {
996 struct page *node_page;
997 nid_t nid;
998 unsigned int ofs_in_node;
999 block_t source_blkaddr;
1000
1001 nid = le32_to_cpu(sum->nid);
1002 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
1003
1004 node_page = f2fs_get_node_page(sbi, nid);
1005 if (IS_ERR(node_page))
1006 return false;
1007
1008 if (f2fs_get_node_info(sbi, nid, dni)) {
1009 f2fs_put_page(node_page, 1);
1010 return false;
1011 }
1012
1013 if (sum->version != dni->version) {
1014 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
1015 __func__);
1016 set_sbi_flag(sbi, SBI_NEED_FSCK);
1017 }
1018
1019 if (f2fs_check_nid_range(sbi, dni->ino))
1020 return false;
1021
1022 *nofs = ofs_of_node(node_page);
1023 source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
1024 f2fs_put_page(node_page, 1);
1025
1026 if (source_blkaddr != blkaddr) {
1027 #ifdef CONFIG_F2FS_CHECK_FS
1028 unsigned int segno = GET_SEGNO(sbi, blkaddr);
1029 unsigned long offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
1030
1031 if (unlikely(check_valid_map(sbi, segno, offset))) {
1032 if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
1033 f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u\n",
1034 blkaddr, source_blkaddr, segno);
1035 set_sbi_flag(sbi, SBI_NEED_FSCK);
1036 }
1037 }
1038 #endif
1039 return false;
1040 }
1041 return true;
1042 }
1043
ra_data_block(struct inode * inode,pgoff_t index)1044 static int ra_data_block(struct inode *inode, pgoff_t index)
1045 {
1046 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1047 struct address_space *mapping = inode->i_mapping;
1048 struct dnode_of_data dn;
1049 struct page *page;
1050 struct extent_info ei = {0, 0, 0};
1051 struct f2fs_io_info fio = {
1052 .sbi = sbi,
1053 .ino = inode->i_ino,
1054 .type = DATA,
1055 .temp = COLD,
1056 .op = REQ_OP_READ,
1057 .op_flags = 0,
1058 .encrypted_page = NULL,
1059 .in_list = false,
1060 .retry = false,
1061 };
1062 int err;
1063
1064 page = f2fs_grab_cache_page(mapping, index, true);
1065 if (!page)
1066 return -ENOMEM;
1067
1068 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1069 dn.data_blkaddr = ei.blk + index - ei.fofs;
1070 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1071 DATA_GENERIC_ENHANCE_READ))) {
1072 err = -EFSCORRUPTED;
1073 goto put_page;
1074 }
1075 goto got_it;
1076 }
1077
1078 set_new_dnode(&dn, inode, NULL, NULL, 0);
1079 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1080 if (err)
1081 goto put_page;
1082 f2fs_put_dnode(&dn);
1083
1084 if (!__is_valid_data_blkaddr(dn.data_blkaddr)) {
1085 err = -ENOENT;
1086 goto put_page;
1087 }
1088 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
1089 DATA_GENERIC_ENHANCE))) {
1090 err = -EFSCORRUPTED;
1091 goto put_page;
1092 }
1093 got_it:
1094 /* read page */
1095 fio.page = page;
1096 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1097
1098 /*
1099 * don't cache encrypted data into meta inode until previous dirty
1100 * data were writebacked to avoid racing between GC and flush.
1101 */
1102 f2fs_wait_on_page_writeback(page, DATA, true, true);
1103
1104 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1105
1106 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
1107 dn.data_blkaddr,
1108 FGP_LOCK | FGP_CREAT, GFP_NOFS);
1109 if (!fio.encrypted_page) {
1110 err = -ENOMEM;
1111 goto put_page;
1112 }
1113
1114 err = f2fs_submit_page_bio(&fio);
1115 if (err)
1116 goto put_encrypted_page;
1117 f2fs_put_page(fio.encrypted_page, 0);
1118 f2fs_put_page(page, 1);
1119
1120 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1121 f2fs_update_iostat(sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1122
1123 return 0;
1124 put_encrypted_page:
1125 f2fs_put_page(fio.encrypted_page, 1);
1126 put_page:
1127 f2fs_put_page(page, 1);
1128 return err;
1129 }
1130
1131 /*
1132 * Move data block via META_MAPPING while keeping locked data page.
1133 * This can be used to move blocks, aka LBAs, directly on disk.
1134 */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1135 static int move_data_block(struct inode *inode, block_t bidx,
1136 int gc_type, unsigned int segno, int off)
1137 {
1138 struct f2fs_io_info fio = {
1139 .sbi = F2FS_I_SB(inode),
1140 .ino = inode->i_ino,
1141 .type = DATA,
1142 .temp = COLD,
1143 .op = REQ_OP_READ,
1144 .op_flags = 0,
1145 .encrypted_page = NULL,
1146 .in_list = false,
1147 .retry = false,
1148 };
1149 struct dnode_of_data dn;
1150 struct f2fs_summary sum;
1151 struct node_info ni;
1152 struct page *page, *mpage;
1153 block_t newaddr;
1154 int err = 0;
1155 bool lfs_mode = f2fs_lfs_mode(fio.sbi);
1156 int type = fio.sbi->am.atgc_enabled ?
1157 CURSEG_ALL_DATA_ATGC : CURSEG_COLD_DATA;
1158
1159 /* do not read out */
1160 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
1161 if (!page)
1162 return -ENOMEM;
1163
1164 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1165 err = -ENOENT;
1166 goto out;
1167 }
1168
1169 if (f2fs_is_atomic_file(inode)) {
1170 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1171 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1172 err = -EAGAIN;
1173 goto out;
1174 }
1175
1176 if (f2fs_is_pinned_file(inode)) {
1177 f2fs_pin_file_control(inode, true);
1178 err = -EAGAIN;
1179 goto out;
1180 }
1181
1182 set_new_dnode(&dn, inode, NULL, NULL, 0);
1183 err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
1184 if (err)
1185 goto out;
1186
1187 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1188 ClearPageUptodate(page);
1189 err = -ENOENT;
1190 goto put_out;
1191 }
1192
1193 /*
1194 * don't cache encrypted data into meta inode until previous dirty
1195 * data were writebacked to avoid racing between GC and flush.
1196 */
1197 f2fs_wait_on_page_writeback(page, DATA, true, true);
1198
1199 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
1200
1201 err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
1202 if (err)
1203 goto put_out;
1204
1205 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
1206
1207 /* read page */
1208 fio.page = page;
1209 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
1210
1211 if (lfs_mode)
1212 down_write(&fio.sbi->io_order_lock);
1213
1214 mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
1215 fio.old_blkaddr, false);
1216 if (!mpage) {
1217 err = -ENOMEM;
1218 goto up_out;
1219 }
1220
1221 fio.encrypted_page = mpage;
1222
1223 /* read source block in mpage */
1224 if (!PageUptodate(mpage)) {
1225 err = f2fs_submit_page_bio(&fio);
1226 if (err) {
1227 f2fs_put_page(mpage, 1);
1228 goto up_out;
1229 }
1230
1231 f2fs_update_iostat(fio.sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1232 f2fs_update_iostat(fio.sbi, FS_GDATA_READ_IO, F2FS_BLKSIZE);
1233
1234 lock_page(mpage);
1235 if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
1236 !PageUptodate(mpage))) {
1237 err = -EIO;
1238 f2fs_put_page(mpage, 1);
1239 goto up_out;
1240 }
1241 }
1242
1243 f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
1244 &sum, type, NULL, SEQ_NONE);
1245
1246 fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
1247 newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
1248 if (!fio.encrypted_page) {
1249 err = -ENOMEM;
1250 f2fs_put_page(mpage, 1);
1251 goto recover_block;
1252 }
1253
1254 /* write target block */
1255 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
1256 memcpy(page_address(fio.encrypted_page),
1257 page_address(mpage), PAGE_SIZE);
1258 f2fs_put_page(mpage, 1);
1259 invalidate_mapping_pages(META_MAPPING(fio.sbi),
1260 fio.old_blkaddr, fio.old_blkaddr);
1261
1262 set_page_dirty(fio.encrypted_page);
1263 if (clear_page_dirty_for_io(fio.encrypted_page))
1264 dec_page_count(fio.sbi, F2FS_DIRTY_META);
1265
1266 set_page_writeback(fio.encrypted_page);
1267 ClearPageError(page);
1268
1269 /* allocate block address */
1270 f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
1271
1272 fio.op = REQ_OP_WRITE;
1273 fio.op_flags = REQ_SYNC;
1274 fio.new_blkaddr = newaddr;
1275 f2fs_submit_page_write(&fio);
1276 if (fio.retry) {
1277 err = -EAGAIN;
1278 if (PageWriteback(fio.encrypted_page))
1279 end_page_writeback(fio.encrypted_page);
1280 goto put_page_out;
1281 }
1282
1283 f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
1284
1285 f2fs_update_data_blkaddr(&dn, newaddr);
1286 set_inode_flag(inode, FI_APPEND_WRITE);
1287 if (page->index == 0)
1288 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
1289 put_page_out:
1290 f2fs_put_page(fio.encrypted_page, 1);
1291 recover_block:
1292 if (err)
1293 f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
1294 true, true, true);
1295 up_out:
1296 if (lfs_mode)
1297 up_write(&fio.sbi->io_order_lock);
1298 put_out:
1299 f2fs_put_dnode(&dn);
1300 out:
1301 f2fs_put_page(page, 1);
1302 return err;
1303 }
1304
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)1305 static int move_data_page(struct inode *inode, block_t bidx, int gc_type,
1306 unsigned int segno, int off)
1307 {
1308 struct page *page;
1309 int err = 0;
1310
1311 page = f2fs_get_lock_data_page(inode, bidx, true);
1312 if (IS_ERR(page))
1313 return PTR_ERR(page);
1314
1315 if (!check_valid_map(F2FS_I_SB(inode), segno, off)) {
1316 err = -ENOENT;
1317 goto out;
1318 }
1319
1320 if (f2fs_is_atomic_file(inode)) {
1321 F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
1322 F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
1323 err = -EAGAIN;
1324 goto out;
1325 }
1326 if (f2fs_is_pinned_file(inode)) {
1327 if (gc_type == FG_GC)
1328 f2fs_pin_file_control(inode, true);
1329 err = -EAGAIN;
1330 goto out;
1331 }
1332
1333 if (gc_type == BG_GC) {
1334 if (PageWriteback(page)) {
1335 err = -EAGAIN;
1336 goto out;
1337 }
1338 set_page_dirty(page);
1339 set_cold_data(page);
1340 } else {
1341 struct f2fs_io_info fio = {
1342 .sbi = F2FS_I_SB(inode),
1343 .ino = inode->i_ino,
1344 .type = DATA,
1345 .temp = COLD,
1346 .op = REQ_OP_WRITE,
1347 .op_flags = REQ_SYNC,
1348 .old_blkaddr = NULL_ADDR,
1349 .page = page,
1350 .encrypted_page = NULL,
1351 .need_lock = LOCK_REQ,
1352 .io_type = FS_GC_DATA_IO,
1353 };
1354 bool is_dirty = PageDirty(page);
1355
1356 retry:
1357 f2fs_wait_on_page_writeback(page, DATA, true, true);
1358
1359 set_page_dirty(page);
1360 if (clear_page_dirty_for_io(page)) {
1361 inode_dec_dirty_pages(inode);
1362 f2fs_remove_dirty_inode(inode);
1363 }
1364
1365 set_cold_data(page);
1366
1367 err = f2fs_do_write_data_page(&fio);
1368 if (err) {
1369 clear_cold_data(page);
1370 if (err == -ENOMEM) {
1371 congestion_wait(BLK_RW_ASYNC,
1372 DEFAULT_IO_TIMEOUT);
1373 goto retry;
1374 }
1375 if (is_dirty)
1376 set_page_dirty(page);
1377 }
1378 }
1379 out:
1380 f2fs_put_page(page, 1);
1381 return err;
1382 }
1383
1384 /*
1385 * This function tries to get parent node of victim data block, and identifies
1386 * data block validity. If the block is valid, copy that with cold status and
1387 * modify parent node.
1388 * If the parent node is not valid or the data block address is different,
1389 * the victim data block is ignored.
1390 */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type,bool force_migrate)1391 static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
1392 struct gc_inode_list *gc_list, unsigned int segno, int gc_type,
1393 bool force_migrate)
1394 {
1395 struct super_block *sb = sbi->sb;
1396 struct f2fs_summary *entry;
1397 block_t start_addr;
1398 int off;
1399 int phase = 0;
1400 int submitted = 0;
1401 unsigned int usable_blks_in_seg = f2fs_usable_blks_in_seg(sbi, segno);
1402
1403 start_addr = START_BLOCK(sbi, segno);
1404
1405 next_step:
1406 entry = sum;
1407
1408 for (off = 0; off < usable_blks_in_seg; off++, entry++) {
1409 struct page *data_page;
1410 struct inode *inode;
1411 struct node_info dni; /* dnode info for the data */
1412 unsigned int ofs_in_node, nofs;
1413 block_t start_bidx;
1414 nid_t nid = le32_to_cpu(entry->nid);
1415
1416 /*
1417 * stop BG_GC if there is not enough free sections.
1418 * Or, stop GC if the segment becomes fully valid caused by
1419 * race condition along with SSR block allocation.
1420 */
1421 if ((gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) ||
1422 (!force_migrate && get_valid_blocks(sbi, segno, true) ==
1423 BLKS_PER_SEC(sbi)))
1424 return submitted;
1425
1426 if (check_valid_map(sbi, segno, off) == 0)
1427 continue;
1428
1429 if (phase == 0) {
1430 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
1431 META_NAT, true);
1432 continue;
1433 }
1434
1435 if (phase == 1) {
1436 f2fs_ra_node_page(sbi, nid);
1437 continue;
1438 }
1439
1440 /* Get an inode by ino with checking validity */
1441 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
1442 continue;
1443
1444 if (phase == 2) {
1445 f2fs_ra_node_page(sbi, dni.ino);
1446 continue;
1447 }
1448
1449 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
1450
1451 if (phase == 3) {
1452 inode = f2fs_iget(sb, dni.ino);
1453 if (IS_ERR(inode) || is_bad_inode(inode) ||
1454 special_file(inode->i_mode)) {
1455 set_sbi_flag(sbi, SBI_NEED_FSCK);
1456 continue;
1457 }
1458
1459 if (!down_write_trylock(
1460 &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
1461 iput(inode);
1462 sbi->skipped_gc_rwsem++;
1463 continue;
1464 }
1465
1466 start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
1467 ofs_in_node;
1468
1469 if (f2fs_post_read_required(inode)) {
1470 int err = ra_data_block(inode, start_bidx);
1471
1472 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1473 if (err) {
1474 iput(inode);
1475 continue;
1476 }
1477 add_gc_inode(gc_list, inode);
1478 continue;
1479 }
1480
1481 data_page = f2fs_get_read_data_page(inode,
1482 start_bidx, REQ_RAHEAD, true);
1483 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1484 if (IS_ERR(data_page)) {
1485 iput(inode);
1486 continue;
1487 }
1488
1489 f2fs_put_page(data_page, 0);
1490 add_gc_inode(gc_list, inode);
1491 continue;
1492 }
1493
1494 /* phase 4 */
1495 inode = find_gc_inode(gc_list, dni.ino);
1496 if (inode) {
1497 struct f2fs_inode_info *fi = F2FS_I(inode);
1498 bool locked = false;
1499 int err;
1500
1501 if (S_ISREG(inode->i_mode)) {
1502 if (!down_write_trylock(&fi->i_gc_rwsem[READ])) {
1503 sbi->skipped_gc_rwsem++;
1504 continue;
1505 }
1506 if (!down_write_trylock(
1507 &fi->i_gc_rwsem[WRITE])) {
1508 sbi->skipped_gc_rwsem++;
1509 up_write(&fi->i_gc_rwsem[READ]);
1510 continue;
1511 }
1512 locked = true;
1513
1514 /* wait for all inflight aio data */
1515 inode_dio_wait(inode);
1516 }
1517
1518 start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1519 + ofs_in_node;
1520 if (f2fs_post_read_required(inode))
1521 err = move_data_block(inode, start_bidx,
1522 gc_type, segno, off);
1523 else
1524 err = move_data_page(inode, start_bidx, gc_type,
1525 segno, off);
1526
1527 if (!err && (gc_type == FG_GC ||
1528 f2fs_post_read_required(inode)))
1529 submitted++;
1530
1531 if (locked) {
1532 up_write(&fi->i_gc_rwsem[WRITE]);
1533 up_write(&fi->i_gc_rwsem[READ]);
1534 }
1535
1536 stat_inc_data_blk_count(sbi, 1, gc_type);
1537 }
1538 }
1539
1540 if (++phase < 5)
1541 goto next_step;
1542
1543 return submitted;
1544 }
1545
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type)1546 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1547 int gc_type)
1548 {
1549 struct sit_info *sit_i = SIT_I(sbi);
1550 int ret;
1551
1552 down_write(&sit_i->sentry_lock);
1553 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1554 NO_CHECK_TYPE, LFS, 0);
1555 up_write(&sit_i->sentry_lock);
1556 return ret;
1557 }
1558
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type,bool force_migrate)1559 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1560 unsigned int start_segno,
1561 struct gc_inode_list *gc_list, int gc_type,
1562 bool force_migrate)
1563 {
1564 struct page *sum_page;
1565 struct f2fs_summary_block *sum;
1566 struct blk_plug plug;
1567 unsigned int segno = start_segno;
1568 unsigned int end_segno = start_segno + sbi->segs_per_sec;
1569 int seg_freed = 0, migrated = 0;
1570 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1571 SUM_TYPE_DATA : SUM_TYPE_NODE;
1572 int submitted = 0;
1573
1574 if (__is_large_section(sbi))
1575 end_segno = rounddown(end_segno, sbi->segs_per_sec);
1576
1577 /*
1578 * zone-capacity can be less than zone-size in zoned devices,
1579 * resulting in less than expected usable segments in the zone,
1580 * calculate the end segno in the zone which can be garbage collected
1581 */
1582 if (f2fs_sb_has_blkzoned(sbi))
1583 end_segno -= sbi->segs_per_sec -
1584 f2fs_usable_segs_in_sec(sbi, segno);
1585
1586 sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
1587
1588 /* readahead multi ssa blocks those have contiguous address */
1589 if (__is_large_section(sbi))
1590 f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1591 end_segno - segno, META_SSA, true);
1592
1593 /* reference all summary page */
1594 while (segno < end_segno) {
1595 sum_page = f2fs_get_sum_page(sbi, segno++);
1596 if (IS_ERR(sum_page)) {
1597 int err = PTR_ERR(sum_page);
1598
1599 end_segno = segno - 1;
1600 for (segno = start_segno; segno < end_segno; segno++) {
1601 sum_page = find_get_page(META_MAPPING(sbi),
1602 GET_SUM_BLOCK(sbi, segno));
1603 f2fs_put_page(sum_page, 0);
1604 f2fs_put_page(sum_page, 0);
1605 }
1606 return err;
1607 }
1608 unlock_page(sum_page);
1609 }
1610
1611 blk_start_plug(&plug);
1612
1613 for (segno = start_segno; segno < end_segno; segno++) {
1614
1615 /* find segment summary of victim */
1616 sum_page = find_get_page(META_MAPPING(sbi),
1617 GET_SUM_BLOCK(sbi, segno));
1618 f2fs_put_page(sum_page, 0);
1619
1620 if (get_valid_blocks(sbi, segno, false) == 0)
1621 goto freed;
1622 if (gc_type == BG_GC && __is_large_section(sbi) &&
1623 migrated >= sbi->migration_granularity)
1624 goto skip;
1625 if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1626 goto skip;
1627
1628 sum = page_address(sum_page);
1629 if (type != GET_SUM_TYPE((&sum->footer))) {
1630 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1631 segno, type, GET_SUM_TYPE((&sum->footer)));
1632 set_sbi_flag(sbi, SBI_NEED_FSCK);
1633 f2fs_stop_checkpoint(sbi, false);
1634 goto skip;
1635 }
1636
1637 /*
1638 * this is to avoid deadlock:
1639 * - lock_page(sum_page) - f2fs_replace_block
1640 * - check_valid_map() - down_write(sentry_lock)
1641 * - down_read(sentry_lock) - change_curseg()
1642 * - lock_page(sum_page)
1643 */
1644 if (type == SUM_TYPE_NODE)
1645 submitted += gc_node_segment(sbi, sum->entries, segno,
1646 gc_type);
1647 else
1648 submitted += gc_data_segment(sbi, sum->entries, gc_list,
1649 segno, gc_type,
1650 force_migrate);
1651
1652 stat_inc_seg_count(sbi, type, gc_type);
1653 migrated++;
1654
1655 freed:
1656 if (gc_type == FG_GC &&
1657 get_valid_blocks(sbi, segno, false) == 0)
1658 seg_freed++;
1659
1660 if (__is_large_section(sbi) && segno + 1 < end_segno)
1661 sbi->next_victim_seg[gc_type] = segno + 1;
1662 skip:
1663 f2fs_put_page(sum_page, 0);
1664 }
1665
1666 if (submitted)
1667 f2fs_submit_merged_write(sbi,
1668 (type == SUM_TYPE_NODE) ? NODE : DATA);
1669
1670 blk_finish_plug(&plug);
1671
1672 stat_inc_call_count(sbi->stat_info);
1673
1674 return seg_freed;
1675 }
1676
f2fs_gc(struct f2fs_sb_info * sbi,bool sync,bool background,bool force,unsigned int segno)1677 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1678 bool background, bool force, unsigned int segno)
1679 {
1680 int gc_type = sync ? FG_GC : BG_GC;
1681 int sec_freed = 0, seg_freed = 0, total_freed = 0;
1682 int ret = 0;
1683 struct cp_control cpc;
1684 unsigned int init_segno = segno;
1685 struct gc_inode_list gc_list = {
1686 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1687 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1688 };
1689 unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1690 unsigned long long first_skipped;
1691 unsigned int skipped_round = 0, round = 0;
1692
1693 trace_f2fs_gc_begin(sbi->sb, sync, background,
1694 get_pages(sbi, F2FS_DIRTY_NODES),
1695 get_pages(sbi, F2FS_DIRTY_DENTS),
1696 get_pages(sbi, F2FS_DIRTY_IMETA),
1697 free_sections(sbi),
1698 free_segments(sbi),
1699 reserved_segments(sbi),
1700 prefree_segments(sbi));
1701
1702 cpc.reason = __get_cp_reason(sbi);
1703 sbi->skipped_gc_rwsem = 0;
1704 first_skipped = last_skipped;
1705 gc_more:
1706 if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1707 ret = -EINVAL;
1708 goto stop;
1709 }
1710 if (unlikely(f2fs_cp_error(sbi))) {
1711 ret = -EIO;
1712 goto stop;
1713 }
1714
1715 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1716 /*
1717 * For example, if there are many prefree_segments below given
1718 * threshold, we can make them free by checkpoint. Then, we
1719 * secure free segments which doesn't need fggc any more.
1720 */
1721 if (prefree_segments(sbi) &&
1722 !is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
1723 ret = f2fs_write_checkpoint(sbi, &cpc);
1724 if (ret)
1725 goto stop;
1726 }
1727 if (has_not_enough_free_secs(sbi, 0, 0))
1728 gc_type = FG_GC;
1729 }
1730
1731 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1732 if (gc_type == BG_GC && !background) {
1733 ret = -EINVAL;
1734 goto stop;
1735 }
1736 ret = __get_victim(sbi, &segno, gc_type);
1737 if (ret)
1738 goto stop;
1739
1740 seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type, force);
1741 if (gc_type == FG_GC &&
1742 seg_freed == f2fs_usable_segs_in_sec(sbi, segno))
1743 sec_freed++;
1744 total_freed += seg_freed;
1745
1746 if (gc_type == FG_GC) {
1747 if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1748 sbi->skipped_gc_rwsem)
1749 skipped_round++;
1750 last_skipped = sbi->skipped_atomic_files[FG_GC];
1751 round++;
1752 }
1753
1754 if (gc_type == FG_GC && seg_freed)
1755 sbi->cur_victim_sec = NULL_SEGNO;
1756
1757 if (sync)
1758 goto stop;
1759
1760 if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1761 if (skipped_round <= MAX_SKIP_GC_COUNT ||
1762 skipped_round * 2 < round) {
1763 segno = NULL_SEGNO;
1764 goto gc_more;
1765 }
1766
1767 if (first_skipped < last_skipped &&
1768 (last_skipped - first_skipped) >
1769 sbi->skipped_gc_rwsem) {
1770 f2fs_drop_inmem_pages_all(sbi, true);
1771 segno = NULL_SEGNO;
1772 goto gc_more;
1773 }
1774 if (gc_type == FG_GC && !is_sbi_flag_set(sbi, SBI_CP_DISABLED))
1775 ret = f2fs_write_checkpoint(sbi, &cpc);
1776 }
1777 stop:
1778 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1779 SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1780
1781 trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1782 get_pages(sbi, F2FS_DIRTY_NODES),
1783 get_pages(sbi, F2FS_DIRTY_DENTS),
1784 get_pages(sbi, F2FS_DIRTY_IMETA),
1785 free_sections(sbi),
1786 free_segments(sbi),
1787 reserved_segments(sbi),
1788 prefree_segments(sbi));
1789
1790 up_write(&sbi->gc_lock);
1791
1792 put_gc_inode(&gc_list);
1793
1794 if (sync && !ret)
1795 ret = sec_freed ? 0 : -EAGAIN;
1796 return ret;
1797 }
1798
f2fs_create_garbage_collection_cache(void)1799 int __init f2fs_create_garbage_collection_cache(void)
1800 {
1801 victim_entry_slab = f2fs_kmem_cache_create("f2fs_victim_entry",
1802 sizeof(struct victim_entry));
1803 if (!victim_entry_slab)
1804 return -ENOMEM;
1805 return 0;
1806 }
1807
f2fs_destroy_garbage_collection_cache(void)1808 void f2fs_destroy_garbage_collection_cache(void)
1809 {
1810 kmem_cache_destroy(victim_entry_slab);
1811 }
1812
init_atgc_management(struct f2fs_sb_info * sbi)1813 static void init_atgc_management(struct f2fs_sb_info *sbi)
1814 {
1815 struct atgc_management *am = &sbi->am;
1816
1817 if (test_opt(sbi, ATGC) &&
1818 SIT_I(sbi)->elapsed_time >= DEF_GC_THREAD_AGE_THRESHOLD)
1819 am->atgc_enabled = true;
1820
1821 am->root = RB_ROOT_CACHED;
1822 INIT_LIST_HEAD(&am->victim_list);
1823 am->victim_count = 0;
1824
1825 am->candidate_ratio = DEF_GC_THREAD_CANDIDATE_RATIO;
1826 am->max_candidate_count = DEF_GC_THREAD_MAX_CANDIDATE_COUNT;
1827 am->age_weight = DEF_GC_THREAD_AGE_WEIGHT;
1828 am->age_threshold = DEF_GC_THREAD_AGE_THRESHOLD;
1829 }
1830
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)1831 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1832 {
1833 DIRTY_I(sbi)->v_ops = &default_v_ops;
1834
1835 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1836
1837 /* give warm/cold data area from slower device */
1838 if (f2fs_is_multi_device(sbi) && !__is_large_section(sbi))
1839 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1840 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1841
1842 init_atgc_management(sbi);
1843 }
1844
free_segment_range(struct f2fs_sb_info * sbi,unsigned int secs,bool gc_only)1845 static int free_segment_range(struct f2fs_sb_info *sbi,
1846 unsigned int secs, bool gc_only)
1847 {
1848 unsigned int segno, next_inuse, start, end;
1849 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1850 int gc_mode, gc_type;
1851 int err = 0;
1852 int type;
1853
1854 /* Force block allocation for GC */
1855 MAIN_SECS(sbi) -= secs;
1856 start = MAIN_SECS(sbi) * sbi->segs_per_sec;
1857 end = MAIN_SEGS(sbi) - 1;
1858
1859 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1860 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1861 if (SIT_I(sbi)->last_victim[gc_mode] >= start)
1862 SIT_I(sbi)->last_victim[gc_mode] = 0;
1863
1864 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1865 if (sbi->next_victim_seg[gc_type] >= start)
1866 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1867 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1868
1869 /* Move out cursegs from the target range */
1870 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_PERSIST_TYPE; type++)
1871 f2fs_allocate_segment_for_resize(sbi, type, start, end);
1872
1873 /* do GC to move out valid blocks in the range */
1874 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1875 struct gc_inode_list gc_list = {
1876 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1877 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1878 };
1879
1880 do_garbage_collect(sbi, segno, &gc_list, FG_GC, true);
1881 put_gc_inode(&gc_list);
1882
1883 if (!gc_only && get_valid_blocks(sbi, segno, true)) {
1884 err = -EAGAIN;
1885 goto out;
1886 }
1887 if (fatal_signal_pending(current)) {
1888 err = -ERESTARTSYS;
1889 goto out;
1890 }
1891 }
1892 if (gc_only)
1893 goto out;
1894
1895 err = f2fs_write_checkpoint(sbi, &cpc);
1896 if (err)
1897 goto out;
1898
1899 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1900 if (next_inuse <= end) {
1901 f2fs_err(sbi, "segno %u should be free but still inuse!",
1902 next_inuse);
1903 f2fs_bug_on(sbi, 1);
1904 }
1905 out:
1906 MAIN_SECS(sbi) += secs;
1907 return err;
1908 }
1909
update_sb_metadata(struct f2fs_sb_info * sbi,int secs)1910 static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1911 {
1912 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1913 int section_count;
1914 int segment_count;
1915 int segment_count_main;
1916 long long block_count;
1917 int segs = secs * sbi->segs_per_sec;
1918
1919 down_write(&sbi->sb_lock);
1920
1921 section_count = le32_to_cpu(raw_sb->section_count);
1922 segment_count = le32_to_cpu(raw_sb->segment_count);
1923 segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1924 block_count = le64_to_cpu(raw_sb->block_count);
1925
1926 raw_sb->section_count = cpu_to_le32(section_count + secs);
1927 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1928 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1929 raw_sb->block_count = cpu_to_le64(block_count +
1930 (long long)segs * sbi->blocks_per_seg);
1931 if (f2fs_is_multi_device(sbi)) {
1932 int last_dev = sbi->s_ndevs - 1;
1933 int dev_segs =
1934 le32_to_cpu(raw_sb->devs[last_dev].total_segments);
1935
1936 raw_sb->devs[last_dev].total_segments =
1937 cpu_to_le32(dev_segs + segs);
1938 }
1939
1940 up_write(&sbi->sb_lock);
1941 }
1942
update_fs_metadata(struct f2fs_sb_info * sbi,int secs)1943 static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1944 {
1945 int segs = secs * sbi->segs_per_sec;
1946 long long blks = (long long)segs * sbi->blocks_per_seg;
1947 long long user_block_count =
1948 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1949
1950 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1951 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1952 MAIN_SECS(sbi) += secs;
1953 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1954 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1955 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count + blks);
1956
1957 if (f2fs_is_multi_device(sbi)) {
1958 int last_dev = sbi->s_ndevs - 1;
1959
1960 FDEV(last_dev).total_segments =
1961 (int)FDEV(last_dev).total_segments + segs;
1962 FDEV(last_dev).end_blk =
1963 (long long)FDEV(last_dev).end_blk + blks;
1964 #ifdef CONFIG_BLK_DEV_ZONED
1965 FDEV(last_dev).nr_blkz = (int)FDEV(last_dev).nr_blkz +
1966 (int)(blks >> sbi->log_blocks_per_blkz);
1967 #endif
1968 }
1969 }
1970
f2fs_resize_fs(struct f2fs_sb_info * sbi,__u64 block_count)1971 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1972 {
1973 __u64 old_block_count, shrunk_blocks;
1974 struct cp_control cpc = { CP_RESIZE, 0, 0, 0 };
1975 unsigned int secs;
1976 int err = 0;
1977 __u32 rem;
1978
1979 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1980 if (block_count > old_block_count)
1981 return -EINVAL;
1982
1983 if (f2fs_is_multi_device(sbi)) {
1984 int last_dev = sbi->s_ndevs - 1;
1985 __u64 last_segs = FDEV(last_dev).total_segments;
1986
1987 if (block_count + last_segs * sbi->blocks_per_seg <=
1988 old_block_count)
1989 return -EINVAL;
1990 }
1991
1992 /* new fs size should align to section size */
1993 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1994 if (rem)
1995 return -EINVAL;
1996
1997 if (block_count == old_block_count)
1998 return 0;
1999
2000 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2001 f2fs_err(sbi, "Should run fsck to repair first.");
2002 return -EFSCORRUPTED;
2003 }
2004
2005 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2006 f2fs_err(sbi, "Checkpoint should be enabled.");
2007 return -EINVAL;
2008 }
2009
2010 shrunk_blocks = old_block_count - block_count;
2011 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
2012
2013 /* stop other GC */
2014 if (!down_write_trylock(&sbi->gc_lock))
2015 return -EAGAIN;
2016
2017 /* stop CP to protect MAIN_SEC in free_segment_range */
2018 f2fs_lock_op(sbi);
2019
2020 spin_lock(&sbi->stat_lock);
2021 if (shrunk_blocks + valid_user_blocks(sbi) +
2022 sbi->current_reserved_blocks + sbi->unusable_block_count +
2023 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2024 err = -ENOSPC;
2025 spin_unlock(&sbi->stat_lock);
2026
2027 if (err)
2028 goto out_unlock;
2029
2030 err = free_segment_range(sbi, secs, true);
2031
2032 out_unlock:
2033 f2fs_unlock_op(sbi);
2034 up_write(&sbi->gc_lock);
2035 if (err)
2036 return err;
2037
2038 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
2039
2040 freeze_super(sbi->sb);
2041 down_write(&sbi->gc_lock);
2042 mutex_lock(&sbi->cp_mutex);
2043
2044 spin_lock(&sbi->stat_lock);
2045 if (shrunk_blocks + valid_user_blocks(sbi) +
2046 sbi->current_reserved_blocks + sbi->unusable_block_count +
2047 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
2048 err = -ENOSPC;
2049 else
2050 sbi->user_block_count -= shrunk_blocks;
2051 spin_unlock(&sbi->stat_lock);
2052 if (err)
2053 goto out_err;
2054
2055 err = free_segment_range(sbi, secs, false);
2056 if (err)
2057 goto recover_out;
2058
2059 update_sb_metadata(sbi, -secs);
2060
2061 err = f2fs_commit_super(sbi, false);
2062 if (err) {
2063 update_sb_metadata(sbi, secs);
2064 goto recover_out;
2065 }
2066
2067 update_fs_metadata(sbi, -secs);
2068 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2069 set_sbi_flag(sbi, SBI_IS_DIRTY);
2070
2071 err = f2fs_write_checkpoint(sbi, &cpc);
2072 if (err) {
2073 update_fs_metadata(sbi, secs);
2074 update_sb_metadata(sbi, secs);
2075 f2fs_commit_super(sbi, false);
2076 }
2077 recover_out:
2078 if (err) {
2079 set_sbi_flag(sbi, SBI_NEED_FSCK);
2080 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
2081
2082 spin_lock(&sbi->stat_lock);
2083 sbi->user_block_count += shrunk_blocks;
2084 spin_unlock(&sbi->stat_lock);
2085 }
2086 out_err:
2087 mutex_unlock(&sbi->cp_mutex);
2088 up_write(&sbi->gc_lock);
2089 thaw_super(sbi->sb);
2090 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
2091 return err;
2092 }
2093