• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * fs/f2fs/gc.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
25 
gc_thread_func(void * data)26 static int gc_thread_func(void *data)
27 {
28 	struct f2fs_sb_info *sbi = data;
29 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 	wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 	unsigned int wait_ms;
32 
33 	wait_ms = gc_th->min_sleep_time;
34 
35 	set_freezable();
36 	do {
37 		wait_event_interruptible_timeout(*wq,
38 				kthread_should_stop() || freezing(current) ||
39 				gc_th->gc_wake,
40 				msecs_to_jiffies(wait_ms));
41 
42 		/* give it a try one time */
43 		if (gc_th->gc_wake)
44 			gc_th->gc_wake = 0;
45 
46 		if (try_to_freeze())
47 			continue;
48 		if (kthread_should_stop())
49 			break;
50 
51 		if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
52 			increase_sleep_time(gc_th, &wait_ms);
53 			continue;
54 		}
55 
56 		if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
57 			f2fs_show_injection_info(FAULT_CHECKPOINT);
58 			f2fs_stop_checkpoint(sbi, false);
59 		}
60 
61 		if (!sb_start_write_trylock(sbi->sb))
62 			continue;
63 
64 		/*
65 		 * [GC triggering condition]
66 		 * 0. GC is not conducted currently.
67 		 * 1. There are enough dirty segments.
68 		 * 2. IO subsystem is idle by checking the # of writeback pages.
69 		 * 3. IO subsystem is idle by checking the # of requests in
70 		 *    bdev's request list.
71 		 *
72 		 * Note) We have to avoid triggering GCs frequently.
73 		 * Because it is possible that some segments can be
74 		 * invalidated soon after by user update or deletion.
75 		 * So, I'd like to wait some time to collect dirty segments.
76 		 */
77 		if (sbi->gc_mode == GC_URGENT) {
78 			wait_ms = gc_th->urgent_sleep_time;
79 			mutex_lock(&sbi->gc_mutex);
80 			goto do_gc;
81 		}
82 
83 		if (!mutex_trylock(&sbi->gc_mutex))
84 			goto next;
85 
86 		if (!is_idle(sbi)) {
87 			increase_sleep_time(gc_th, &wait_ms);
88 			mutex_unlock(&sbi->gc_mutex);
89 			goto next;
90 		}
91 
92 		if (has_enough_invalid_blocks(sbi))
93 			decrease_sleep_time(gc_th, &wait_ms);
94 		else
95 			increase_sleep_time(gc_th, &wait_ms);
96 do_gc:
97 		stat_inc_bggc_count(sbi);
98 
99 		/* if return value is not zero, no victim was selected */
100 		if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true, NULL_SEGNO))
101 			wait_ms = gc_th->no_gc_sleep_time;
102 
103 		trace_f2fs_background_gc(sbi->sb, wait_ms,
104 				prefree_segments(sbi), free_segments(sbi));
105 
106 		/* balancing f2fs's metadata periodically */
107 		f2fs_balance_fs_bg(sbi);
108 next:
109 		sb_end_write(sbi->sb);
110 
111 	} while (!kthread_should_stop());
112 	return 0;
113 }
114 
f2fs_start_gc_thread(struct f2fs_sb_info * sbi)115 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi)
116 {
117 	struct f2fs_gc_kthread *gc_th;
118 	dev_t dev = sbi->sb->s_bdev->bd_dev;
119 	int err = 0;
120 
121 	gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
122 	if (!gc_th) {
123 		err = -ENOMEM;
124 		goto out;
125 	}
126 
127 	gc_th->urgent_sleep_time = DEF_GC_THREAD_URGENT_SLEEP_TIME;
128 	gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
129 	gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
130 	gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
131 
132 	gc_th->gc_wake= 0;
133 
134 	sbi->gc_thread = gc_th;
135 	init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
136 	sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
137 			"f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
138 	if (IS_ERR(gc_th->f2fs_gc_task)) {
139 		err = PTR_ERR(gc_th->f2fs_gc_task);
140 		kfree(gc_th);
141 		sbi->gc_thread = NULL;
142 	}
143 out:
144 	return err;
145 }
146 
f2fs_stop_gc_thread(struct f2fs_sb_info * sbi)147 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi)
148 {
149 	struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
150 	if (!gc_th)
151 		return;
152 	kthread_stop(gc_th->f2fs_gc_task);
153 	kfree(gc_th);
154 	sbi->gc_thread = NULL;
155 }
156 
select_gc_type(struct f2fs_sb_info * sbi,int gc_type)157 static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type)
158 {
159 	int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
160 
161 	switch (sbi->gc_mode) {
162 	case GC_IDLE_CB:
163 		gc_mode = GC_CB;
164 		break;
165 	case GC_IDLE_GREEDY:
166 	case GC_URGENT:
167 		gc_mode = GC_GREEDY;
168 		break;
169 	}
170 	return gc_mode;
171 }
172 
select_policy(struct f2fs_sb_info * sbi,int gc_type,int type,struct victim_sel_policy * p)173 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
174 			int type, struct victim_sel_policy *p)
175 {
176 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
177 
178 	if (p->alloc_mode == SSR) {
179 		p->gc_mode = GC_GREEDY;
180 		p->dirty_segmap = dirty_i->dirty_segmap[type];
181 		p->max_search = dirty_i->nr_dirty[type];
182 		p->ofs_unit = 1;
183 	} else {
184 		p->gc_mode = select_gc_type(sbi, gc_type);
185 		p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
186 		p->max_search = dirty_i->nr_dirty[DIRTY];
187 		p->ofs_unit = sbi->segs_per_sec;
188 	}
189 
190 	/* we need to check every dirty segments in the FG_GC case */
191 	if (gc_type != FG_GC &&
192 			(sbi->gc_mode != GC_URGENT) &&
193 			p->max_search > sbi->max_victim_search)
194 		p->max_search = sbi->max_victim_search;
195 
196 	/* let's select beginning hot/small space first in no_heap mode*/
197 	if (test_opt(sbi, NOHEAP) &&
198 		(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
199 		p->offset = 0;
200 	else
201 		p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
202 }
203 
get_max_cost(struct f2fs_sb_info * sbi,struct victim_sel_policy * p)204 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
205 				struct victim_sel_policy *p)
206 {
207 	/* SSR allocates in a segment unit */
208 	if (p->alloc_mode == SSR)
209 		return sbi->blocks_per_seg;
210 	if (p->gc_mode == GC_GREEDY)
211 		return 2 * sbi->blocks_per_seg * p->ofs_unit;
212 	else if (p->gc_mode == GC_CB)
213 		return UINT_MAX;
214 	else /* No other gc_mode */
215 		return 0;
216 }
217 
check_bg_victims(struct f2fs_sb_info * sbi)218 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
219 {
220 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
221 	unsigned int secno;
222 
223 	/*
224 	 * If the gc_type is FG_GC, we can select victim segments
225 	 * selected by background GC before.
226 	 * Those segments guarantee they have small valid blocks.
227 	 */
228 	for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
229 		if (sec_usage_check(sbi, secno))
230 			continue;
231 		clear_bit(secno, dirty_i->victim_secmap);
232 		return GET_SEG_FROM_SEC(sbi, secno);
233 	}
234 	return NULL_SEGNO;
235 }
236 
get_cb_cost(struct f2fs_sb_info * sbi,unsigned int segno)237 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
238 {
239 	struct sit_info *sit_i = SIT_I(sbi);
240 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
241 	unsigned int start = GET_SEG_FROM_SEC(sbi, secno);
242 	unsigned long long mtime = 0;
243 	unsigned int vblocks;
244 	unsigned char age = 0;
245 	unsigned char u;
246 	unsigned int i;
247 
248 	for (i = 0; i < sbi->segs_per_sec; i++)
249 		mtime += get_seg_entry(sbi, start + i)->mtime;
250 	vblocks = get_valid_blocks(sbi, segno, true);
251 
252 	mtime = div_u64(mtime, sbi->segs_per_sec);
253 	vblocks = div_u64(vblocks, sbi->segs_per_sec);
254 
255 	u = (vblocks * 100) >> sbi->log_blocks_per_seg;
256 
257 	/* Handle if the system time has changed by the user */
258 	if (mtime < sit_i->min_mtime)
259 		sit_i->min_mtime = mtime;
260 	if (mtime > sit_i->max_mtime)
261 		sit_i->max_mtime = mtime;
262 	if (sit_i->max_mtime != sit_i->min_mtime)
263 		age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
264 				sit_i->max_mtime - sit_i->min_mtime);
265 
266 	return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
267 }
268 
get_gc_cost(struct f2fs_sb_info * sbi,unsigned int segno,struct victim_sel_policy * p)269 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
270 			unsigned int segno, struct victim_sel_policy *p)
271 {
272 	if (p->alloc_mode == SSR)
273 		return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
274 
275 	/* alloc_mode == LFS */
276 	if (p->gc_mode == GC_GREEDY)
277 		return get_valid_blocks(sbi, segno, true);
278 	else
279 		return get_cb_cost(sbi, segno);
280 }
281 
count_bits(const unsigned long * addr,unsigned int offset,unsigned int len)282 static unsigned int count_bits(const unsigned long *addr,
283 				unsigned int offset, unsigned int len)
284 {
285 	unsigned int end = offset + len, sum = 0;
286 
287 	while (offset < end) {
288 		if (test_bit(offset++, addr))
289 			++sum;
290 	}
291 	return sum;
292 }
293 
294 /*
295  * This function is called from two paths.
296  * One is garbage collection and the other is SSR segment selection.
297  * When it is called during GC, it just gets a victim segment
298  * and it does not remove it from dirty seglist.
299  * When it is called from SSR segment selection, it finds a segment
300  * which has minimum valid blocks and removes it from dirty seglist.
301  */
get_victim_by_default(struct f2fs_sb_info * sbi,unsigned int * result,int gc_type,int type,char alloc_mode)302 static int get_victim_by_default(struct f2fs_sb_info *sbi,
303 		unsigned int *result, int gc_type, int type, char alloc_mode)
304 {
305 	struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
306 	struct sit_info *sm = SIT_I(sbi);
307 	struct victim_sel_policy p;
308 	unsigned int secno, last_victim;
309 	unsigned int last_segment = MAIN_SEGS(sbi);
310 	unsigned int nsearched = 0;
311 
312 	mutex_lock(&dirty_i->seglist_lock);
313 
314 	p.alloc_mode = alloc_mode;
315 	select_policy(sbi, gc_type, type, &p);
316 
317 	p.min_segno = NULL_SEGNO;
318 	p.min_cost = get_max_cost(sbi, &p);
319 
320 	if (*result != NULL_SEGNO) {
321 		if (get_valid_blocks(sbi, *result, false) &&
322 			!sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result)))
323 			p.min_segno = *result;
324 		goto out;
325 	}
326 
327 	if (p.max_search == 0)
328 		goto out;
329 
330 	last_victim = sm->last_victim[p.gc_mode];
331 	if (p.alloc_mode == LFS && gc_type == FG_GC) {
332 		p.min_segno = check_bg_victims(sbi);
333 		if (p.min_segno != NULL_SEGNO)
334 			goto got_it;
335 	}
336 
337 	while (1) {
338 		unsigned long cost;
339 		unsigned int segno;
340 
341 		segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
342 		if (segno >= last_segment) {
343 			if (sm->last_victim[p.gc_mode]) {
344 				last_segment =
345 					sm->last_victim[p.gc_mode];
346 				sm->last_victim[p.gc_mode] = 0;
347 				p.offset = 0;
348 				continue;
349 			}
350 			break;
351 		}
352 
353 		p.offset = segno + p.ofs_unit;
354 		if (p.ofs_unit > 1) {
355 			p.offset -= segno % p.ofs_unit;
356 			nsearched += count_bits(p.dirty_segmap,
357 						p.offset - p.ofs_unit,
358 						p.ofs_unit);
359 		} else {
360 			nsearched++;
361 		}
362 
363 		secno = GET_SEC_FROM_SEG(sbi, segno);
364 
365 		if (sec_usage_check(sbi, secno))
366 			goto next;
367 		if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
368 			goto next;
369 
370 		cost = get_gc_cost(sbi, segno, &p);
371 
372 		if (p.min_cost > cost) {
373 			p.min_segno = segno;
374 			p.min_cost = cost;
375 		}
376 next:
377 		if (nsearched >= p.max_search) {
378 			if (!sm->last_victim[p.gc_mode] && segno <= last_victim)
379 				sm->last_victim[p.gc_mode] = last_victim + 1;
380 			else
381 				sm->last_victim[p.gc_mode] = segno + 1;
382 			sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi);
383 			break;
384 		}
385 	}
386 	if (p.min_segno != NULL_SEGNO) {
387 got_it:
388 		if (p.alloc_mode == LFS) {
389 			secno = GET_SEC_FROM_SEG(sbi, p.min_segno);
390 			if (gc_type == FG_GC)
391 				sbi->cur_victim_sec = secno;
392 			else
393 				set_bit(secno, dirty_i->victim_secmap);
394 		}
395 		*result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
396 
397 		trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
398 				sbi->cur_victim_sec,
399 				prefree_segments(sbi), free_segments(sbi));
400 	}
401 out:
402 	mutex_unlock(&dirty_i->seglist_lock);
403 
404 	return (p.min_segno == NULL_SEGNO) ? 0 : 1;
405 }
406 
407 static const struct victim_selection default_v_ops = {
408 	.get_victim = get_victim_by_default,
409 };
410 
find_gc_inode(struct gc_inode_list * gc_list,nid_t ino)411 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
412 {
413 	struct inode_entry *ie;
414 
415 	ie = radix_tree_lookup(&gc_list->iroot, ino);
416 	if (ie)
417 		return ie->inode;
418 	return NULL;
419 }
420 
add_gc_inode(struct gc_inode_list * gc_list,struct inode * inode)421 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
422 {
423 	struct inode_entry *new_ie;
424 
425 	if (inode == find_gc_inode(gc_list, inode->i_ino)) {
426 		iput(inode);
427 		return;
428 	}
429 	new_ie = f2fs_kmem_cache_alloc(f2fs_inode_entry_slab, GFP_NOFS);
430 	new_ie->inode = inode;
431 
432 	f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
433 	list_add_tail(&new_ie->list, &gc_list->ilist);
434 }
435 
put_gc_inode(struct gc_inode_list * gc_list)436 static void put_gc_inode(struct gc_inode_list *gc_list)
437 {
438 	struct inode_entry *ie, *next_ie;
439 	list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
440 		radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
441 		iput(ie->inode);
442 		list_del(&ie->list);
443 		kmem_cache_free(f2fs_inode_entry_slab, ie);
444 	}
445 }
446 
check_valid_map(struct f2fs_sb_info * sbi,unsigned int segno,int offset)447 static int check_valid_map(struct f2fs_sb_info *sbi,
448 				unsigned int segno, int offset)
449 {
450 	struct sit_info *sit_i = SIT_I(sbi);
451 	struct seg_entry *sentry;
452 	int ret;
453 
454 	down_read(&sit_i->sentry_lock);
455 	sentry = get_seg_entry(sbi, segno);
456 	ret = f2fs_test_bit(offset, sentry->cur_valid_map);
457 	up_read(&sit_i->sentry_lock);
458 	return ret;
459 }
460 
461 /*
462  * This function compares node address got in summary with that in NAT.
463  * On validity, copy that node with cold status, otherwise (invalid node)
464  * ignore that.
465  */
gc_node_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,unsigned int segno,int gc_type)466 static void gc_node_segment(struct f2fs_sb_info *sbi,
467 		struct f2fs_summary *sum, unsigned int segno, int gc_type)
468 {
469 	struct f2fs_summary *entry;
470 	block_t start_addr;
471 	int off;
472 	int phase = 0;
473 	bool fggc = (gc_type == FG_GC);
474 
475 	start_addr = START_BLOCK(sbi, segno);
476 
477 next_step:
478 	entry = sum;
479 
480 	if (fggc && phase == 2)
481 		atomic_inc(&sbi->wb_sync_req[NODE]);
482 
483 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
484 		nid_t nid = le32_to_cpu(entry->nid);
485 		struct page *node_page;
486 		struct node_info ni;
487 
488 		/* stop BG_GC if there is not enough free sections. */
489 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
490 			return;
491 
492 		if (check_valid_map(sbi, segno, off) == 0)
493 			continue;
494 
495 		if (phase == 0) {
496 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
497 							META_NAT, true);
498 			continue;
499 		}
500 
501 		if (phase == 1) {
502 			f2fs_ra_node_page(sbi, nid);
503 			continue;
504 		}
505 
506 		/* phase == 2 */
507 		node_page = f2fs_get_node_page(sbi, nid);
508 		if (IS_ERR(node_page))
509 			continue;
510 
511 		/* block may become invalid during f2fs_get_node_page */
512 		if (check_valid_map(sbi, segno, off) == 0) {
513 			f2fs_put_page(node_page, 1);
514 			continue;
515 		}
516 
517 		if (f2fs_get_node_info(sbi, nid, &ni)) {
518 			f2fs_put_page(node_page, 1);
519 			continue;
520 		}
521 
522 		if (ni.blk_addr != start_addr + off) {
523 			f2fs_put_page(node_page, 1);
524 			continue;
525 		}
526 
527 		f2fs_move_node_page(node_page, gc_type);
528 		stat_inc_node_blk_count(sbi, 1, gc_type);
529 	}
530 
531 	if (++phase < 3)
532 		goto next_step;
533 
534 	if (fggc)
535 		atomic_dec(&sbi->wb_sync_req[NODE]);
536 }
537 
538 /*
539  * Calculate start block index indicating the given node offset.
540  * Be careful, caller should give this node offset only indicating direct node
541  * blocks. If any node offsets, which point the other types of node blocks such
542  * as indirect or double indirect node blocks, are given, it must be a caller's
543  * bug.
544  */
f2fs_start_bidx_of_node(unsigned int node_ofs,struct inode * inode)545 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
546 {
547 	unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
548 	unsigned int bidx;
549 
550 	if (node_ofs == 0)
551 		return 0;
552 
553 	if (node_ofs <= 2) {
554 		bidx = node_ofs - 1;
555 	} else if (node_ofs <= indirect_blks) {
556 		int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
557 		bidx = node_ofs - 2 - dec;
558 	} else {
559 		int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
560 		bidx = node_ofs - 5 - dec;
561 	}
562 	return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
563 }
564 
is_alive(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct node_info * dni,block_t blkaddr,unsigned int * nofs)565 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
566 		struct node_info *dni, block_t blkaddr, unsigned int *nofs)
567 {
568 	struct page *node_page;
569 	nid_t nid;
570 	unsigned int ofs_in_node;
571 	block_t source_blkaddr;
572 
573 	nid = le32_to_cpu(sum->nid);
574 	ofs_in_node = le16_to_cpu(sum->ofs_in_node);
575 
576 	node_page = f2fs_get_node_page(sbi, nid);
577 	if (IS_ERR(node_page))
578 		return false;
579 
580 	if (f2fs_get_node_info(sbi, nid, dni)) {
581 		f2fs_put_page(node_page, 1);
582 		return false;
583 	}
584 
585 	if (sum->version != dni->version) {
586 		f2fs_msg(sbi->sb, KERN_WARNING,
587 				"%s: valid data with mismatched node version.",
588 				__func__);
589 		set_sbi_flag(sbi, SBI_NEED_FSCK);
590 	}
591 
592 	if (f2fs_check_nid_range(sbi, dni->ino))
593 		return false;
594 
595 	*nofs = ofs_of_node(node_page);
596 	source_blkaddr = datablock_addr(NULL, node_page, ofs_in_node);
597 	f2fs_put_page(node_page, 1);
598 
599 	if (source_blkaddr != blkaddr)
600 		return false;
601 	return true;
602 }
603 
ra_data_block(struct inode * inode,pgoff_t index)604 static int ra_data_block(struct inode *inode, pgoff_t index)
605 {
606 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
607 	struct address_space *mapping = inode->i_mapping;
608 	struct dnode_of_data dn;
609 	struct page *page;
610 	struct extent_info ei = {0, 0, 0};
611 	struct f2fs_io_info fio = {
612 		.sbi = sbi,
613 		.ino = inode->i_ino,
614 		.type = DATA,
615 		.temp = COLD,
616 		.op = REQ_OP_READ,
617 		.op_flags = 0,
618 		.encrypted_page = NULL,
619 		.in_list = false,
620 		.retry = false,
621 	};
622 	int err;
623 
624 	page = f2fs_grab_cache_page(mapping, index, true);
625 	if (!page)
626 		return -ENOMEM;
627 
628 	if (f2fs_lookup_extent_cache(inode, index, &ei)) {
629 		dn.data_blkaddr = ei.blk + index - ei.fofs;
630 		goto got_it;
631 	}
632 
633 	set_new_dnode(&dn, inode, NULL, NULL, 0);
634 	err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
635 	if (err)
636 		goto put_page;
637 	f2fs_put_dnode(&dn);
638 
639 	if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
640 						DATA_GENERIC))) {
641 		err = -EFSCORRUPTED;
642 		goto put_page;
643 	}
644 got_it:
645 	/* read page */
646 	fio.page = page;
647 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
648 
649 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
650 					dn.data_blkaddr,
651 					FGP_LOCK | FGP_CREAT, GFP_NOFS);
652 	if (!fio.encrypted_page) {
653 		err = -ENOMEM;
654 		goto put_page;
655 	}
656 
657 	err = f2fs_submit_page_bio(&fio);
658 	if (err)
659 		goto put_encrypted_page;
660 	f2fs_put_page(fio.encrypted_page, 0);
661 	f2fs_put_page(page, 1);
662 	return 0;
663 put_encrypted_page:
664 	f2fs_put_page(fio.encrypted_page, 1);
665 put_page:
666 	f2fs_put_page(page, 1);
667 	return err;
668 }
669 
670 /*
671  * Move data block via META_MAPPING while keeping locked data page.
672  * This can be used to move blocks, aka LBAs, directly on disk.
673  */
move_data_block(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)674 static void move_data_block(struct inode *inode, block_t bidx,
675 				int gc_type, unsigned int segno, int off)
676 {
677 	struct f2fs_io_info fio = {
678 		.sbi = F2FS_I_SB(inode),
679 		.ino = inode->i_ino,
680 		.type = DATA,
681 		.temp = COLD,
682 		.op = REQ_OP_READ,
683 		.op_flags = 0,
684 		.encrypted_page = NULL,
685 		.in_list = false,
686 		.retry = false,
687 	};
688 	struct dnode_of_data dn;
689 	struct f2fs_summary sum;
690 	struct node_info ni;
691 	struct page *page, *mpage;
692 	block_t newaddr;
693 	int err;
694 	bool lfs_mode = test_opt(fio.sbi, LFS);
695 
696 	/* do not read out */
697 	page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
698 	if (!page)
699 		return;
700 
701 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
702 		goto out;
703 
704 	if (f2fs_is_atomic_file(inode)) {
705 		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
706 		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
707 		goto out;
708 	}
709 
710 	if (f2fs_is_pinned_file(inode)) {
711 		f2fs_pin_file_control(inode, true);
712 		goto out;
713 	}
714 
715 	set_new_dnode(&dn, inode, NULL, NULL, 0);
716 	err = f2fs_get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
717 	if (err)
718 		goto out;
719 
720 	if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
721 		ClearPageUptodate(page);
722 		goto put_out;
723 	}
724 
725 	/*
726 	 * don't cache encrypted data into meta inode until previous dirty
727 	 * data were writebacked to avoid racing between GC and flush.
728 	 */
729 	f2fs_wait_on_page_writeback(page, DATA, true);
730 
731 	err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
732 	if (err)
733 		goto put_out;
734 
735 	set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
736 
737 	/* read page */
738 	fio.page = page;
739 	fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
740 
741 	if (lfs_mode)
742 		down_write(&fio.sbi->io_order_lock);
743 
744 	f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
745 					&sum, CURSEG_COLD_DATA, NULL, false);
746 
747 	fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
748 				newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
749 	if (!fio.encrypted_page) {
750 		err = -ENOMEM;
751 		goto recover_block;
752 	}
753 
754 	mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
755 					fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
756 	if (mpage) {
757 		bool updated = false;
758 
759 		if (PageUptodate(mpage)) {
760 			memcpy(page_address(fio.encrypted_page),
761 					page_address(mpage), PAGE_SIZE);
762 			updated = true;
763 		}
764 		f2fs_put_page(mpage, 1);
765 		invalidate_mapping_pages(META_MAPPING(fio.sbi),
766 					fio.old_blkaddr, fio.old_blkaddr);
767 		if (updated)
768 			goto write_page;
769 	}
770 
771 	err = f2fs_submit_page_bio(&fio);
772 	if (err)
773 		goto put_page_out;
774 
775 	/* write page */
776 	lock_page(fio.encrypted_page);
777 
778 	if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
779 		err = -EIO;
780 		goto put_page_out;
781 	}
782 	if (unlikely(!PageUptodate(fio.encrypted_page))) {
783 		err = -EIO;
784 		goto put_page_out;
785 	}
786 
787 write_page:
788 	set_page_dirty(fio.encrypted_page);
789 	f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
790 	if (clear_page_dirty_for_io(fio.encrypted_page))
791 		dec_page_count(fio.sbi, F2FS_DIRTY_META);
792 
793 	set_page_writeback(fio.encrypted_page);
794 	ClearPageError(page);
795 
796 	/* allocate block address */
797 	f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
798 
799 	fio.op = REQ_OP_WRITE;
800 	fio.op_flags = REQ_SYNC;
801 	fio.new_blkaddr = newaddr;
802 	f2fs_submit_page_write(&fio);
803 	if (fio.retry) {
804 		if (PageWriteback(fio.encrypted_page))
805 			end_page_writeback(fio.encrypted_page);
806 		goto put_page_out;
807 	}
808 
809 	f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
810 
811 	f2fs_update_data_blkaddr(&dn, newaddr);
812 	set_inode_flag(inode, FI_APPEND_WRITE);
813 	if (page->index == 0)
814 		set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
815 put_page_out:
816 	f2fs_put_page(fio.encrypted_page, 1);
817 recover_block:
818 	if (lfs_mode)
819 		up_write(&fio.sbi->io_order_lock);
820 	if (err)
821 		f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
822 								true, true);
823 put_out:
824 	f2fs_put_dnode(&dn);
825 out:
826 	f2fs_put_page(page, 1);
827 }
828 
move_data_page(struct inode * inode,block_t bidx,int gc_type,unsigned int segno,int off)829 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
830 							unsigned int segno, int off)
831 {
832 	struct page *page;
833 
834 	page = f2fs_get_lock_data_page(inode, bidx, true);
835 	if (IS_ERR(page))
836 		return;
837 
838 	if (!check_valid_map(F2FS_I_SB(inode), segno, off))
839 		goto out;
840 
841 	if (f2fs_is_atomic_file(inode)) {
842 		F2FS_I(inode)->i_gc_failures[GC_FAILURE_ATOMIC]++;
843 		F2FS_I_SB(inode)->skipped_atomic_files[gc_type]++;
844 		goto out;
845 	}
846 	if (f2fs_is_pinned_file(inode)) {
847 		if (gc_type == FG_GC)
848 			f2fs_pin_file_control(inode, true);
849 		goto out;
850 	}
851 
852 	if (gc_type == BG_GC) {
853 		if (PageWriteback(page))
854 			goto out;
855 		set_page_dirty(page);
856 		set_cold_data(page);
857 	} else {
858 		struct f2fs_io_info fio = {
859 			.sbi = F2FS_I_SB(inode),
860 			.ino = inode->i_ino,
861 			.type = DATA,
862 			.temp = COLD,
863 			.op = REQ_OP_WRITE,
864 			.op_flags = REQ_SYNC,
865 			.old_blkaddr = NULL_ADDR,
866 			.page = page,
867 			.encrypted_page = NULL,
868 			.need_lock = LOCK_REQ,
869 			.io_type = FS_GC_DATA_IO,
870 		};
871 		bool is_dirty = PageDirty(page);
872 		int err;
873 
874 retry:
875 		set_page_dirty(page);
876 		f2fs_wait_on_page_writeback(page, DATA, true);
877 		if (clear_page_dirty_for_io(page)) {
878 			inode_dec_dirty_pages(inode);
879 			f2fs_remove_dirty_inode(inode);
880 		}
881 
882 		set_cold_data(page);
883 
884 		err = f2fs_do_write_data_page(&fio);
885 		if (err) {
886 			clear_cold_data(page);
887 			if (err == -ENOMEM) {
888 				congestion_wait(BLK_RW_ASYNC, HZ/50);
889 				goto retry;
890 			}
891 			if (is_dirty)
892 				set_page_dirty(page);
893 		}
894 	}
895 out:
896 	f2fs_put_page(page, 1);
897 }
898 
899 /*
900  * This function tries to get parent node of victim data block, and identifies
901  * data block validity. If the block is valid, copy that with cold status and
902  * modify parent node.
903  * If the parent node is not valid or the data block address is different,
904  * the victim data block is ignored.
905  */
gc_data_segment(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,struct gc_inode_list * gc_list,unsigned int segno,int gc_type)906 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
907 		struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
908 {
909 	struct super_block *sb = sbi->sb;
910 	struct f2fs_summary *entry;
911 	block_t start_addr;
912 	int off;
913 	int phase = 0;
914 
915 	start_addr = START_BLOCK(sbi, segno);
916 
917 next_step:
918 	entry = sum;
919 
920 	for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
921 		struct page *data_page;
922 		struct inode *inode;
923 		struct node_info dni; /* dnode info for the data */
924 		unsigned int ofs_in_node, nofs;
925 		block_t start_bidx;
926 		nid_t nid = le32_to_cpu(entry->nid);
927 
928 		/* stop BG_GC if there is not enough free sections. */
929 		if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
930 			return;
931 
932 		if (check_valid_map(sbi, segno, off) == 0)
933 			continue;
934 
935 		if (phase == 0) {
936 			f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
937 							META_NAT, true);
938 			continue;
939 		}
940 
941 		if (phase == 1) {
942 			f2fs_ra_node_page(sbi, nid);
943 			continue;
944 		}
945 
946 		/* Get an inode by ino with checking validity */
947 		if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
948 			continue;
949 
950 		if (phase == 2) {
951 			f2fs_ra_node_page(sbi, dni.ino);
952 			continue;
953 		}
954 
955 		ofs_in_node = le16_to_cpu(entry->ofs_in_node);
956 
957 		if (phase == 3) {
958 			inode = f2fs_iget(sb, dni.ino);
959 			if (IS_ERR(inode) || is_bad_inode(inode) ||
960 					special_file(inode->i_mode))
961 				continue;
962 
963 			if (!down_write_trylock(
964 				&F2FS_I(inode)->i_gc_rwsem[WRITE])) {
965 				iput(inode);
966 				sbi->skipped_gc_rwsem++;
967 				continue;
968 			}
969 
970 			start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
971 								ofs_in_node;
972 
973 			if (f2fs_post_read_required(inode)) {
974 				int err = ra_data_block(inode, start_bidx);
975 
976 				up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
977 				if (err) {
978 					iput(inode);
979 					continue;
980 				}
981 				add_gc_inode(gc_list, inode);
982 				continue;
983 			}
984 
985 			data_page = f2fs_get_read_data_page(inode,
986 						start_bidx, REQ_RAHEAD, true);
987 			up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
988 			if (IS_ERR(data_page)) {
989 				iput(inode);
990 				continue;
991 			}
992 
993 			f2fs_put_page(data_page, 0);
994 			add_gc_inode(gc_list, inode);
995 			continue;
996 		}
997 
998 		/* phase 4 */
999 		inode = find_gc_inode(gc_list, dni.ino);
1000 		if (inode) {
1001 			struct f2fs_inode_info *fi = F2FS_I(inode);
1002 			bool locked = false;
1003 
1004 			if (S_ISREG(inode->i_mode)) {
1005 				if (!down_write_trylock(&fi->i_gc_rwsem[READ]))
1006 					continue;
1007 				if (!down_write_trylock(
1008 						&fi->i_gc_rwsem[WRITE])) {
1009 					sbi->skipped_gc_rwsem++;
1010 					up_write(&fi->i_gc_rwsem[READ]);
1011 					continue;
1012 				}
1013 				locked = true;
1014 
1015 				/* wait for all inflight aio data */
1016 				inode_dio_wait(inode);
1017 			}
1018 
1019 			start_bidx = f2fs_start_bidx_of_node(nofs, inode)
1020 								+ ofs_in_node;
1021 			if (f2fs_post_read_required(inode))
1022 				move_data_block(inode, start_bidx, gc_type,
1023 								segno, off);
1024 			else
1025 				move_data_page(inode, start_bidx, gc_type,
1026 								segno, off);
1027 
1028 			if (locked) {
1029 				up_write(&fi->i_gc_rwsem[WRITE]);
1030 				up_write(&fi->i_gc_rwsem[READ]);
1031 			}
1032 
1033 			stat_inc_data_blk_count(sbi, 1, gc_type);
1034 		}
1035 	}
1036 
1037 	if (++phase < 5)
1038 		goto next_step;
1039 }
1040 
__get_victim(struct f2fs_sb_info * sbi,unsigned int * victim,int gc_type)1041 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
1042 			int gc_type)
1043 {
1044 	struct sit_info *sit_i = SIT_I(sbi);
1045 	int ret;
1046 
1047 	down_write(&sit_i->sentry_lock);
1048 	ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
1049 					      NO_CHECK_TYPE, LFS);
1050 	up_write(&sit_i->sentry_lock);
1051 	return ret;
1052 }
1053 
do_garbage_collect(struct f2fs_sb_info * sbi,unsigned int start_segno,struct gc_inode_list * gc_list,int gc_type)1054 static int do_garbage_collect(struct f2fs_sb_info *sbi,
1055 				unsigned int start_segno,
1056 				struct gc_inode_list *gc_list, int gc_type)
1057 {
1058 	struct page *sum_page;
1059 	struct f2fs_summary_block *sum;
1060 	struct blk_plug plug;
1061 	unsigned int segno = start_segno;
1062 	unsigned int end_segno = start_segno + sbi->segs_per_sec;
1063 	int seg_freed = 0;
1064 	unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
1065 						SUM_TYPE_DATA : SUM_TYPE_NODE;
1066 
1067 	/* readahead multi ssa blocks those have contiguous address */
1068 	if (sbi->segs_per_sec > 1)
1069 		f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
1070 					sbi->segs_per_sec, META_SSA, true);
1071 
1072 	/* reference all summary page */
1073 	while (segno < end_segno) {
1074 		sum_page = f2fs_get_sum_page(sbi, segno++);
1075 		unlock_page(sum_page);
1076 	}
1077 
1078 	blk_start_plug(&plug);
1079 
1080 	for (segno = start_segno; segno < end_segno; segno++) {
1081 
1082 		/* find segment summary of victim */
1083 		sum_page = find_get_page(META_MAPPING(sbi),
1084 					GET_SUM_BLOCK(sbi, segno));
1085 		f2fs_put_page(sum_page, 0);
1086 
1087 		if (get_valid_blocks(sbi, segno, false) == 0)
1088 			goto freed;
1089 		if (!PageUptodate(sum_page) || unlikely(f2fs_cp_error(sbi)))
1090 			goto next;
1091 
1092 		sum = page_address(sum_page);
1093 		if (type != GET_SUM_TYPE((&sum->footer))) {
1094 			f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
1095 				"type [%d, %d] in SSA and SIT",
1096 				segno, type, GET_SUM_TYPE((&sum->footer)));
1097 			set_sbi_flag(sbi, SBI_NEED_FSCK);
1098 			goto next;
1099 		}
1100 
1101 		/*
1102 		 * this is to avoid deadlock:
1103 		 * - lock_page(sum_page)         - f2fs_replace_block
1104 		 *  - check_valid_map()            - down_write(sentry_lock)
1105 		 *   - down_read(sentry_lock)     - change_curseg()
1106 		 *                                  - lock_page(sum_page)
1107 		 */
1108 		if (type == SUM_TYPE_NODE)
1109 			gc_node_segment(sbi, sum->entries, segno, gc_type);
1110 		else
1111 			gc_data_segment(sbi, sum->entries, gc_list, segno,
1112 								gc_type);
1113 
1114 		stat_inc_seg_count(sbi, type, gc_type);
1115 
1116 freed:
1117 		if (gc_type == FG_GC &&
1118 				get_valid_blocks(sbi, segno, false) == 0)
1119 			seg_freed++;
1120 next:
1121 		f2fs_put_page(sum_page, 0);
1122 	}
1123 
1124 	if (gc_type == FG_GC)
1125 		f2fs_submit_merged_write(sbi,
1126 				(type == SUM_TYPE_NODE) ? NODE : DATA);
1127 
1128 	blk_finish_plug(&plug);
1129 
1130 	stat_inc_call_count(sbi->stat_info);
1131 
1132 	return seg_freed;
1133 }
1134 
f2fs_gc(struct f2fs_sb_info * sbi,bool sync,bool background,unsigned int segno)1135 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync,
1136 			bool background, unsigned int segno)
1137 {
1138 	int gc_type = sync ? FG_GC : BG_GC;
1139 	int sec_freed = 0, seg_freed = 0, total_freed = 0;
1140 	int ret = 0;
1141 	struct cp_control cpc;
1142 	unsigned int init_segno = segno;
1143 	struct gc_inode_list gc_list = {
1144 		.ilist = LIST_HEAD_INIT(gc_list.ilist),
1145 		.iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1146 	};
1147 	unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
1148 	unsigned long long first_skipped;
1149 	unsigned int skipped_round = 0, round = 0;
1150 
1151 	trace_f2fs_gc_begin(sbi->sb, sync, background,
1152 				get_pages(sbi, F2FS_DIRTY_NODES),
1153 				get_pages(sbi, F2FS_DIRTY_DENTS),
1154 				get_pages(sbi, F2FS_DIRTY_IMETA),
1155 				free_sections(sbi),
1156 				free_segments(sbi),
1157 				reserved_segments(sbi),
1158 				prefree_segments(sbi));
1159 
1160 	cpc.reason = __get_cp_reason(sbi);
1161 	sbi->skipped_gc_rwsem = 0;
1162 	first_skipped = last_skipped;
1163 gc_more:
1164 	if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
1165 		ret = -EINVAL;
1166 		goto stop;
1167 	}
1168 	if (unlikely(f2fs_cp_error(sbi))) {
1169 		ret = -EIO;
1170 		goto stop;
1171 	}
1172 
1173 	if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0)) {
1174 		/*
1175 		 * For example, if there are many prefree_segments below given
1176 		 * threshold, we can make them free by checkpoint. Then, we
1177 		 * secure free segments which doesn't need fggc any more.
1178 		 */
1179 		if (prefree_segments(sbi)) {
1180 			ret = f2fs_write_checkpoint(sbi, &cpc);
1181 			if (ret)
1182 				goto stop;
1183 		}
1184 		if (has_not_enough_free_secs(sbi, 0, 0))
1185 			gc_type = FG_GC;
1186 	}
1187 
1188 	/* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1189 	if (gc_type == BG_GC && !background) {
1190 		ret = -EINVAL;
1191 		goto stop;
1192 	}
1193 	if (!__get_victim(sbi, &segno, gc_type)) {
1194 		ret = -ENODATA;
1195 		goto stop;
1196 	}
1197 
1198 	seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
1199 	if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
1200 		sec_freed++;
1201 	total_freed += seg_freed;
1202 
1203 	if (gc_type == FG_GC) {
1204 		if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
1205 						sbi->skipped_gc_rwsem)
1206 			skipped_round++;
1207 		last_skipped = sbi->skipped_atomic_files[FG_GC];
1208 		round++;
1209 	}
1210 
1211 	if (gc_type == FG_GC)
1212 		sbi->cur_victim_sec = NULL_SEGNO;
1213 
1214 	if (sync)
1215 		goto stop;
1216 
1217 	if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
1218 		if (skipped_round <= MAX_SKIP_GC_COUNT ||
1219 					skipped_round * 2 < round) {
1220 			segno = NULL_SEGNO;
1221 			goto gc_more;
1222 		}
1223 
1224 		if (first_skipped < last_skipped &&
1225 				(last_skipped - first_skipped) >
1226 						sbi->skipped_gc_rwsem) {
1227 			f2fs_drop_inmem_pages_all(sbi, true);
1228 			segno = NULL_SEGNO;
1229 			goto gc_more;
1230 		}
1231 		if (gc_type == FG_GC)
1232 			ret = f2fs_write_checkpoint(sbi, &cpc);
1233 	}
1234 stop:
1235 	SIT_I(sbi)->last_victim[ALLOC_NEXT] = 0;
1236 	SIT_I(sbi)->last_victim[FLUSH_DEVICE] = init_segno;
1237 
1238 	trace_f2fs_gc_end(sbi->sb, ret, total_freed, sec_freed,
1239 				get_pages(sbi, F2FS_DIRTY_NODES),
1240 				get_pages(sbi, F2FS_DIRTY_DENTS),
1241 				get_pages(sbi, F2FS_DIRTY_IMETA),
1242 				free_sections(sbi),
1243 				free_segments(sbi),
1244 				reserved_segments(sbi),
1245 				prefree_segments(sbi));
1246 
1247 	mutex_unlock(&sbi->gc_mutex);
1248 
1249 	put_gc_inode(&gc_list);
1250 
1251 	if (sync && !ret)
1252 		ret = sec_freed ? 0 : -EAGAIN;
1253 	return ret;
1254 }
1255 
f2fs_build_gc_manager(struct f2fs_sb_info * sbi)1256 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1257 {
1258 	DIRTY_I(sbi)->v_ops = &default_v_ops;
1259 
1260 	sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
1261 
1262 	/* give warm/cold data area from slower device */
1263 	if (f2fs_is_multi_device(sbi) && sbi->segs_per_sec == 1)
1264 		SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1265 				GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1266 }
1267