• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * fs/f2fs/segment.h
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/blkdev.h>
12 
13 /* constant macro */
14 #define NULL_SEGNO			((unsigned int)(~0))
15 #define NULL_SECNO			((unsigned int)(~0))
16 
17 #define DEF_RECLAIM_PREFREE_SEGMENTS	5	/* 5% over total segments */
18 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS	4096	/* 8GB in maximum */
19 
20 #define F2FS_MIN_SEGMENTS	9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
21 
22 /* L: Logical segment # in volume, R: Relative segment # in main area */
23 #define GET_L2R_SEGNO(free_i, segno)	((segno) - (free_i)->start_segno)
24 #define GET_R2L_SEGNO(free_i, segno)	((segno) + (free_i)->start_segno)
25 
26 #define IS_DATASEG(t)	((t) <= CURSEG_COLD_DATA)
27 #define IS_NODESEG(t)	((t) >= CURSEG_HOT_NODE)
28 
29 #define IS_HOT(t)	((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
30 #define IS_WARM(t)	((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
31 #define IS_COLD(t)	((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
32 
33 #define IS_CURSEG(sbi, seg)						\
34 	(((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) ||	\
35 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) ||	\
36 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) ||	\
37 	 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) ||	\
38 	 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) ||	\
39 	 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
40 
41 #define IS_CURSEC(sbi, secno)						\
42 	(((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno /		\
43 	  (sbi)->segs_per_sec) ||	\
44 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno /		\
45 	  (sbi)->segs_per_sec) ||	\
46 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno /		\
47 	  (sbi)->segs_per_sec) ||	\
48 	 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno /		\
49 	  (sbi)->segs_per_sec) ||	\
50 	 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno /		\
51 	  (sbi)->segs_per_sec) ||	\
52 	 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno /		\
53 	  (sbi)->segs_per_sec))	\
54 
55 #define MAIN_BLKADDR(sbi)	(SM_I(sbi)->main_blkaddr)
56 #define SEG0_BLKADDR(sbi)	(SM_I(sbi)->seg0_blkaddr)
57 
58 #define MAIN_SEGS(sbi)	(SM_I(sbi)->main_segments)
59 #define MAIN_SECS(sbi)	((sbi)->total_sections)
60 
61 #define TOTAL_SEGS(sbi)	(SM_I(sbi)->segment_count)
62 #define TOTAL_BLKS(sbi)	(TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
63 
64 #define MAX_BLKADDR(sbi)	(SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
65 #define SEGMENT_SIZE(sbi)	(1ULL << ((sbi)->log_blocksize +	\
66 					(sbi)->log_blocks_per_seg))
67 
68 #define START_BLOCK(sbi, segno)	(SEG0_BLKADDR(sbi) +			\
69 	 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
70 
71 #define NEXT_FREE_BLKADDR(sbi, curseg)					\
72 	(START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
73 
74 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr)	((blk_addr) - SEG0_BLKADDR(sbi))
75 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr)				\
76 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
77 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr)				\
78 	(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
79 
80 #define GET_SEGNO(sbi, blk_addr)					\
81 	((((blk_addr) == NULL_ADDR) || ((blk_addr) == NEW_ADDR)) ?	\
82 	NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi),			\
83 		GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
84 #define BLKS_PER_SEC(sbi)					\
85 	((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
86 #define GET_SEC_FROM_SEG(sbi, segno)				\
87 	((segno) / (sbi)->segs_per_sec)
88 #define GET_SEG_FROM_SEC(sbi, secno)				\
89 	((secno) * (sbi)->segs_per_sec)
90 #define GET_ZONE_FROM_SEC(sbi, secno)				\
91 	((secno) / (sbi)->secs_per_zone)
92 #define GET_ZONE_FROM_SEG(sbi, segno)				\
93 	GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
94 
95 #define GET_SUM_BLOCK(sbi, segno)				\
96 	((sbi)->sm_info->ssa_blkaddr + (segno))
97 
98 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
99 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
100 
101 #define SIT_ENTRY_OFFSET(sit_i, segno)					\
102 	((segno) % (sit_i)->sents_per_block)
103 #define SIT_BLOCK_OFFSET(segno)					\
104 	((segno) / SIT_ENTRY_PER_BLOCK)
105 #define	START_SEGNO(segno)		\
106 	(SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
107 #define SIT_BLK_CNT(sbi)			\
108 	((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
109 #define f2fs_bitmap_size(nr)			\
110 	(BITS_TO_LONGS(nr) * sizeof(unsigned long))
111 
112 #define SECTOR_FROM_BLOCK(blk_addr)					\
113 	(((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
114 #define SECTOR_TO_BLOCK(sectors)					\
115 	((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
116 
117 /*
118  * indicate a block allocation direction: RIGHT and LEFT.
119  * RIGHT means allocating new sections towards the end of volume.
120  * LEFT means the opposite direction.
121  */
122 enum {
123 	ALLOC_RIGHT = 0,
124 	ALLOC_LEFT
125 };
126 
127 /*
128  * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
129  * LFS writes data sequentially with cleaning operations.
130  * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
131  */
132 enum {
133 	LFS = 0,
134 	SSR
135 };
136 
137 /*
138  * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
139  * GC_CB is based on cost-benefit algorithm.
140  * GC_GREEDY is based on greedy algorithm.
141  */
142 enum {
143 	GC_CB = 0,
144 	GC_GREEDY,
145 	ALLOC_NEXT,
146 	FLUSH_DEVICE,
147 	MAX_GC_POLICY,
148 };
149 
150 /*
151  * BG_GC means the background cleaning job.
152  * FG_GC means the on-demand cleaning job.
153  * FORCE_FG_GC means on-demand cleaning job in background.
154  */
155 enum {
156 	BG_GC = 0,
157 	FG_GC,
158 	FORCE_FG_GC,
159 };
160 
161 /* for a function parameter to select a victim segment */
162 struct victim_sel_policy {
163 	int alloc_mode;			/* LFS or SSR */
164 	int gc_mode;			/* GC_CB or GC_GREEDY */
165 	unsigned long *dirty_segmap;	/* dirty segment bitmap */
166 	unsigned int max_search;	/* maximum # of segments to search */
167 	unsigned int offset;		/* last scanned bitmap offset */
168 	unsigned int ofs_unit;		/* bitmap search unit */
169 	unsigned int min_cost;		/* minimum cost */
170 	unsigned int min_segno;		/* segment # having min. cost */
171 };
172 
173 struct seg_entry {
174 	unsigned int type:6;		/* segment type like CURSEG_XXX_TYPE */
175 	unsigned int valid_blocks:10;	/* # of valid blocks */
176 	unsigned int ckpt_valid_blocks:10;	/* # of valid blocks last cp */
177 	unsigned int padding:6;		/* padding */
178 	unsigned char *cur_valid_map;	/* validity bitmap of blocks */
179 #ifdef CONFIG_F2FS_CHECK_FS
180 	unsigned char *cur_valid_map_mir;	/* mirror of current valid bitmap */
181 #endif
182 	/*
183 	 * # of valid blocks and the validity bitmap stored in the the last
184 	 * checkpoint pack. This information is used by the SSR mode.
185 	 */
186 	unsigned char *ckpt_valid_map;	/* validity bitmap of blocks last cp */
187 	unsigned char *discard_map;
188 	unsigned long long mtime;	/* modification time of the segment */
189 };
190 
191 struct sec_entry {
192 	unsigned int valid_blocks;	/* # of valid blocks in a section */
193 };
194 
195 struct segment_allocation {
196 	void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
197 };
198 
199 /*
200  * this value is set in page as a private data which indicate that
201  * the page is atomically written, and it is in inmem_pages list.
202  */
203 #define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
204 #define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
205 
206 #define IS_ATOMIC_WRITTEN_PAGE(page)			\
207 		(page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
208 #define IS_DUMMY_WRITTEN_PAGE(page)			\
209 		(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
210 
211 struct inmem_pages {
212 	struct list_head list;
213 	struct page *page;
214 	block_t old_addr;		/* for revoking when fail to commit */
215 };
216 
217 struct sit_info {
218 	const struct segment_allocation *s_ops;
219 
220 	block_t sit_base_addr;		/* start block address of SIT area */
221 	block_t sit_blocks;		/* # of blocks used by SIT area */
222 	block_t written_valid_blocks;	/* # of valid blocks in main area */
223 	char *sit_bitmap;		/* SIT bitmap pointer */
224 #ifdef CONFIG_F2FS_CHECK_FS
225 	char *sit_bitmap_mir;		/* SIT bitmap mirror */
226 #endif
227 	unsigned int bitmap_size;	/* SIT bitmap size */
228 
229 	unsigned long *tmp_map;			/* bitmap for temporal use */
230 	unsigned long *dirty_sentries_bitmap;	/* bitmap for dirty sentries */
231 	unsigned int dirty_sentries;		/* # of dirty sentries */
232 	unsigned int sents_per_block;		/* # of SIT entries per block */
233 	struct rw_semaphore sentry_lock;	/* to protect SIT cache */
234 	struct seg_entry *sentries;		/* SIT segment-level cache */
235 	struct sec_entry *sec_entries;		/* SIT section-level cache */
236 
237 	/* for cost-benefit algorithm in cleaning procedure */
238 	unsigned long long elapsed_time;	/* elapsed time after mount */
239 	unsigned long long mounted_time;	/* mount time */
240 	unsigned long long min_mtime;		/* min. modification time */
241 	unsigned long long max_mtime;		/* max. modification time */
242 
243 	unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
244 };
245 
246 struct free_segmap_info {
247 	unsigned int start_segno;	/* start segment number logically */
248 	unsigned int free_segments;	/* # of free segments */
249 	unsigned int free_sections;	/* # of free sections */
250 	spinlock_t segmap_lock;		/* free segmap lock */
251 	unsigned long *free_segmap;	/* free segment bitmap */
252 	unsigned long *free_secmap;	/* free section bitmap */
253 };
254 
255 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
256 enum dirty_type {
257 	DIRTY_HOT_DATA,		/* dirty segments assigned as hot data logs */
258 	DIRTY_WARM_DATA,	/* dirty segments assigned as warm data logs */
259 	DIRTY_COLD_DATA,	/* dirty segments assigned as cold data logs */
260 	DIRTY_HOT_NODE,		/* dirty segments assigned as hot node logs */
261 	DIRTY_WARM_NODE,	/* dirty segments assigned as warm node logs */
262 	DIRTY_COLD_NODE,	/* dirty segments assigned as cold node logs */
263 	DIRTY,			/* to count # of dirty segments */
264 	PRE,			/* to count # of entirely obsolete segments */
265 	NR_DIRTY_TYPE
266 };
267 
268 struct dirty_seglist_info {
269 	const struct victim_selection *v_ops;	/* victim selction operation */
270 	unsigned long *dirty_segmap[NR_DIRTY_TYPE];
271 	struct mutex seglist_lock;		/* lock for segment bitmaps */
272 	int nr_dirty[NR_DIRTY_TYPE];		/* # of dirty segments */
273 	unsigned long *victim_secmap;		/* background GC victims */
274 };
275 
276 /* victim selection function for cleaning and SSR */
277 struct victim_selection {
278 	int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
279 							int, int, char);
280 };
281 
282 /* for active log information */
283 struct curseg_info {
284 	struct mutex curseg_mutex;		/* lock for consistency */
285 	struct f2fs_summary_block *sum_blk;	/* cached summary block */
286 	struct rw_semaphore journal_rwsem;	/* protect journal area */
287 	struct f2fs_journal *journal;		/* cached journal info */
288 	unsigned char alloc_type;		/* current allocation type */
289 	unsigned int segno;			/* current segment number */
290 	unsigned short next_blkoff;		/* next block offset to write */
291 	unsigned int zone;			/* current zone number */
292 	unsigned int next_segno;		/* preallocated segment */
293 };
294 
295 struct sit_entry_set {
296 	struct list_head set_list;	/* link with all sit sets */
297 	unsigned int start_segno;	/* start segno of sits in set */
298 	unsigned int entry_cnt;		/* the # of sit entries in set */
299 };
300 
301 /*
302  * inline functions
303  */
CURSEG_I(struct f2fs_sb_info * sbi,int type)304 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
305 {
306 	return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
307 }
308 
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)309 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
310 						unsigned int segno)
311 {
312 	struct sit_info *sit_i = SIT_I(sbi);
313 	return &sit_i->sentries[segno];
314 }
315 
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)316 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
317 						unsigned int segno)
318 {
319 	struct sit_info *sit_i = SIT_I(sbi);
320 	return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
321 }
322 
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)323 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
324 				unsigned int segno, bool use_section)
325 {
326 	/*
327 	 * In order to get # of valid blocks in a section instantly from many
328 	 * segments, f2fs manages two counting structures separately.
329 	 */
330 	if (use_section && sbi->segs_per_sec > 1)
331 		return get_sec_entry(sbi, segno)->valid_blocks;
332 	else
333 		return get_seg_entry(sbi, segno)->valid_blocks;
334 }
335 
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)336 static inline void seg_info_from_raw_sit(struct seg_entry *se,
337 					struct f2fs_sit_entry *rs)
338 {
339 	se->valid_blocks = GET_SIT_VBLOCKS(rs);
340 	se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
341 	memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
342 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
343 #ifdef CONFIG_F2FS_CHECK_FS
344 	memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
345 #endif
346 	se->type = GET_SIT_TYPE(rs);
347 	se->mtime = le64_to_cpu(rs->mtime);
348 }
349 
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)350 static inline void seg_info_to_raw_sit(struct seg_entry *se,
351 					struct f2fs_sit_entry *rs)
352 {
353 	unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
354 					se->valid_blocks;
355 	rs->vblocks = cpu_to_le16(raw_vblocks);
356 	memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
357 	memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
358 	se->ckpt_valid_blocks = se->valid_blocks;
359 	rs->mtime = cpu_to_le64(se->mtime);
360 }
361 
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)362 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
363 		unsigned int max, unsigned int segno)
364 {
365 	unsigned int ret;
366 	spin_lock(&free_i->segmap_lock);
367 	ret = find_next_bit(free_i->free_segmap, max, segno);
368 	spin_unlock(&free_i->segmap_lock);
369 	return ret;
370 }
371 
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)372 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
373 {
374 	struct free_segmap_info *free_i = FREE_I(sbi);
375 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
376 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
377 	unsigned int next;
378 
379 	spin_lock(&free_i->segmap_lock);
380 	clear_bit(segno, free_i->free_segmap);
381 	free_i->free_segments++;
382 
383 	next = find_next_bit(free_i->free_segmap,
384 			start_segno + sbi->segs_per_sec, start_segno);
385 	if (next >= start_segno + sbi->segs_per_sec) {
386 		clear_bit(secno, free_i->free_secmap);
387 		free_i->free_sections++;
388 	}
389 	spin_unlock(&free_i->segmap_lock);
390 }
391 
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)392 static inline void __set_inuse(struct f2fs_sb_info *sbi,
393 		unsigned int segno)
394 {
395 	struct free_segmap_info *free_i = FREE_I(sbi);
396 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
397 
398 	set_bit(segno, free_i->free_segmap);
399 	free_i->free_segments--;
400 	if (!test_and_set_bit(secno, free_i->free_secmap))
401 		free_i->free_sections--;
402 }
403 
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno)404 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
405 		unsigned int segno)
406 {
407 	struct free_segmap_info *free_i = FREE_I(sbi);
408 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
409 	unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
410 	unsigned int next;
411 
412 	spin_lock(&free_i->segmap_lock);
413 	if (test_and_clear_bit(segno, free_i->free_segmap)) {
414 		free_i->free_segments++;
415 
416 		next = find_next_bit(free_i->free_segmap,
417 				start_segno + sbi->segs_per_sec, start_segno);
418 		if (next >= start_segno + sbi->segs_per_sec) {
419 			if (test_and_clear_bit(secno, free_i->free_secmap))
420 				free_i->free_sections++;
421 		}
422 	}
423 	spin_unlock(&free_i->segmap_lock);
424 }
425 
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)426 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
427 		unsigned int segno)
428 {
429 	struct free_segmap_info *free_i = FREE_I(sbi);
430 	unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
431 
432 	spin_lock(&free_i->segmap_lock);
433 	if (!test_and_set_bit(segno, free_i->free_segmap)) {
434 		free_i->free_segments--;
435 		if (!test_and_set_bit(secno, free_i->free_secmap))
436 			free_i->free_sections--;
437 	}
438 	spin_unlock(&free_i->segmap_lock);
439 }
440 
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)441 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
442 		void *dst_addr)
443 {
444 	struct sit_info *sit_i = SIT_I(sbi);
445 
446 #ifdef CONFIG_F2FS_CHECK_FS
447 	if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
448 						sit_i->bitmap_size))
449 		f2fs_bug_on(sbi, 1);
450 #endif
451 	memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
452 }
453 
written_block_count(struct f2fs_sb_info * sbi)454 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
455 {
456 	return SIT_I(sbi)->written_valid_blocks;
457 }
458 
free_segments(struct f2fs_sb_info * sbi)459 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
460 {
461 	return FREE_I(sbi)->free_segments;
462 }
463 
reserved_segments(struct f2fs_sb_info * sbi)464 static inline int reserved_segments(struct f2fs_sb_info *sbi)
465 {
466 	return SM_I(sbi)->reserved_segments;
467 }
468 
free_sections(struct f2fs_sb_info * sbi)469 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
470 {
471 	return FREE_I(sbi)->free_sections;
472 }
473 
prefree_segments(struct f2fs_sb_info * sbi)474 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
475 {
476 	return DIRTY_I(sbi)->nr_dirty[PRE];
477 }
478 
dirty_segments(struct f2fs_sb_info * sbi)479 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
480 {
481 	return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
482 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
483 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
484 		DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
485 		DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
486 		DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
487 }
488 
overprovision_segments(struct f2fs_sb_info * sbi)489 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
490 {
491 	return SM_I(sbi)->ovp_segments;
492 }
493 
reserved_sections(struct f2fs_sb_info * sbi)494 static inline int reserved_sections(struct f2fs_sb_info *sbi)
495 {
496 	return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
497 }
498 
has_curseg_enough_space(struct f2fs_sb_info * sbi)499 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
500 {
501 	unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
502 					get_pages(sbi, F2FS_DIRTY_DENTS);
503 	unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
504 	unsigned int segno, left_blocks;
505 	int i;
506 
507 	/* check current node segment */
508 	for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
509 		segno = CURSEG_I(sbi, i)->segno;
510 		left_blocks = sbi->blocks_per_seg -
511 			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
512 
513 		if (node_blocks > left_blocks)
514 			return false;
515 	}
516 
517 	/* check current data segment */
518 	segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
519 	left_blocks = sbi->blocks_per_seg -
520 			get_seg_entry(sbi, segno)->ckpt_valid_blocks;
521 	if (dent_blocks > left_blocks)
522 		return false;
523 	return true;
524 }
525 
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)526 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
527 					int freed, int needed)
528 {
529 	int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
530 	int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
531 	int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
532 
533 	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
534 		return false;
535 
536 	if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
537 			has_curseg_enough_space(sbi))
538 		return false;
539 	return (free_sections(sbi) + freed) <=
540 		(node_secs + 2 * dent_secs + imeta_secs +
541 		reserved_sections(sbi) + needed);
542 }
543 
excess_prefree_segs(struct f2fs_sb_info * sbi)544 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
545 {
546 	return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
547 }
548 
utilization(struct f2fs_sb_info * sbi)549 static inline int utilization(struct f2fs_sb_info *sbi)
550 {
551 	return div_u64((u64)valid_user_blocks(sbi) * 100,
552 					sbi->user_block_count);
553 }
554 
555 /*
556  * Sometimes f2fs may be better to drop out-of-place update policy.
557  * And, users can control the policy through sysfs entries.
558  * There are five policies with triggering conditions as follows.
559  * F2FS_IPU_FORCE - all the time,
560  * F2FS_IPU_SSR - if SSR mode is activated,
561  * F2FS_IPU_UTIL - if FS utilization is over threashold,
562  * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
563  *                     threashold,
564  * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
565  *                     storages. IPU will be triggered only if the # of dirty
566  *                     pages over min_fsync_blocks.
567  * F2FS_IPUT_DISABLE - disable IPU. (=default option)
568  */
569 #define DEF_MIN_IPU_UTIL	70
570 #define DEF_MIN_FSYNC_BLOCKS	8
571 #define DEF_MIN_HOT_BLOCKS	16
572 
573 enum {
574 	F2FS_IPU_FORCE,
575 	F2FS_IPU_SSR,
576 	F2FS_IPU_UTIL,
577 	F2FS_IPU_SSR_UTIL,
578 	F2FS_IPU_FSYNC,
579 	F2FS_IPU_ASYNC,
580 };
581 
need_inplace_update_policy(struct inode * inode,struct f2fs_io_info * fio)582 static inline bool need_inplace_update_policy(struct inode *inode,
583 				struct f2fs_io_info *fio)
584 {
585 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
586 	unsigned int policy = SM_I(sbi)->ipu_policy;
587 
588 	if (test_opt(sbi, LFS))
589 		return false;
590 
591 	/* if this is cold file, we should overwrite to avoid fragmentation */
592 	if (file_is_cold(inode))
593 		return true;
594 
595 	if (policy & (0x1 << F2FS_IPU_FORCE))
596 		return true;
597 	if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
598 		return true;
599 	if (policy & (0x1 << F2FS_IPU_UTIL) &&
600 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
601 		return true;
602 	if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) &&
603 			utilization(sbi) > SM_I(sbi)->min_ipu_util)
604 		return true;
605 
606 	/*
607 	 * IPU for rewrite async pages
608 	 */
609 	if (policy & (0x1 << F2FS_IPU_ASYNC) &&
610 			fio && fio->op == REQ_OP_WRITE &&
611 			!(fio->op_flags & REQ_SYNC) &&
612 			!f2fs_encrypted_inode(inode))
613 		return true;
614 
615 	/* this is only set during fdatasync */
616 	if (policy & (0x1 << F2FS_IPU_FSYNC) &&
617 			is_inode_flag_set(inode, FI_NEED_IPU))
618 		return true;
619 
620 	return false;
621 }
622 
curseg_segno(struct f2fs_sb_info * sbi,int type)623 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
624 		int type)
625 {
626 	struct curseg_info *curseg = CURSEG_I(sbi, type);
627 	return curseg->segno;
628 }
629 
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)630 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
631 		int type)
632 {
633 	struct curseg_info *curseg = CURSEG_I(sbi, type);
634 	return curseg->alloc_type;
635 }
636 
curseg_blkoff(struct f2fs_sb_info * sbi,int type)637 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
638 {
639 	struct curseg_info *curseg = CURSEG_I(sbi, type);
640 	return curseg->next_blkoff;
641 }
642 
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)643 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
644 {
645 	f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
646 }
647 
verify_block_addr(struct f2fs_sb_info * sbi,block_t blk_addr)648 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
649 {
650 	BUG_ON(blk_addr < SEG0_BLKADDR(sbi)
651 			|| blk_addr >= MAX_BLKADDR(sbi));
652 }
653 
654 /*
655  * Summary block is always treated as an invalid block
656  */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)657 static inline void check_block_count(struct f2fs_sb_info *sbi,
658 		int segno, struct f2fs_sit_entry *raw_sit)
659 {
660 #ifdef CONFIG_F2FS_CHECK_FS
661 	bool is_valid  = test_bit_le(0, raw_sit->valid_map) ? true : false;
662 	int valid_blocks = 0;
663 	int cur_pos = 0, next_pos;
664 
665 	/* check bitmap with valid block count */
666 	do {
667 		if (is_valid) {
668 			next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
669 					sbi->blocks_per_seg,
670 					cur_pos);
671 			valid_blocks += next_pos - cur_pos;
672 		} else
673 			next_pos = find_next_bit_le(&raw_sit->valid_map,
674 					sbi->blocks_per_seg,
675 					cur_pos);
676 		cur_pos = next_pos;
677 		is_valid = !is_valid;
678 	} while (cur_pos < sbi->blocks_per_seg);
679 	BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
680 #endif
681 	/* check segment usage, and check boundary of a given segment number */
682 	f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
683 					|| segno > TOTAL_SEGS(sbi) - 1);
684 }
685 
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)686 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
687 						unsigned int start)
688 {
689 	struct sit_info *sit_i = SIT_I(sbi);
690 	unsigned int offset = SIT_BLOCK_OFFSET(start);
691 	block_t blk_addr = sit_i->sit_base_addr + offset;
692 
693 	check_seg_range(sbi, start);
694 
695 #ifdef CONFIG_F2FS_CHECK_FS
696 	if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
697 			f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
698 		f2fs_bug_on(sbi, 1);
699 #endif
700 
701 	/* calculate sit block address */
702 	if (f2fs_test_bit(offset, sit_i->sit_bitmap))
703 		blk_addr += sit_i->sit_blocks;
704 
705 	return blk_addr;
706 }
707 
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)708 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
709 						pgoff_t block_addr)
710 {
711 	struct sit_info *sit_i = SIT_I(sbi);
712 	block_addr -= sit_i->sit_base_addr;
713 	if (block_addr < sit_i->sit_blocks)
714 		block_addr += sit_i->sit_blocks;
715 	else
716 		block_addr -= sit_i->sit_blocks;
717 
718 	return block_addr + sit_i->sit_base_addr;
719 }
720 
set_to_next_sit(struct sit_info * sit_i,unsigned int start)721 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
722 {
723 	unsigned int block_off = SIT_BLOCK_OFFSET(start);
724 
725 	f2fs_change_bit(block_off, sit_i->sit_bitmap);
726 #ifdef CONFIG_F2FS_CHECK_FS
727 	f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
728 #endif
729 }
730 
get_mtime(struct f2fs_sb_info * sbi)731 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
732 {
733 	struct sit_info *sit_i = SIT_I(sbi);
734 	return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
735 						sit_i->mounted_time;
736 }
737 
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)738 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
739 			unsigned int ofs_in_node, unsigned char version)
740 {
741 	sum->nid = cpu_to_le32(nid);
742 	sum->ofs_in_node = cpu_to_le16(ofs_in_node);
743 	sum->version = version;
744 }
745 
start_sum_block(struct f2fs_sb_info * sbi)746 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
747 {
748 	return __start_cp_addr(sbi) +
749 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
750 }
751 
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)752 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
753 {
754 	return __start_cp_addr(sbi) +
755 		le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
756 				- (base + 1) + type;
757 }
758 
no_fggc_candidate(struct f2fs_sb_info * sbi,unsigned int secno)759 static inline bool no_fggc_candidate(struct f2fs_sb_info *sbi,
760 						unsigned int secno)
761 {
762 	if (get_valid_blocks(sbi, GET_SEG_FROM_SEC(sbi, secno), true) >
763 						sbi->fggc_threshold)
764 		return true;
765 	return false;
766 }
767 
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)768 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
769 {
770 	if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
771 		return true;
772 	return false;
773 }
774 
775 /*
776  * It is very important to gather dirty pages and write at once, so that we can
777  * submit a big bio without interfering other data writes.
778  * By default, 512 pages for directory data,
779  * 512 pages (2MB) * 8 for nodes, and
780  * 256 pages * 8 for meta are set.
781  */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)782 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
783 {
784 	if (sbi->sb->s_bdi->dirty_exceeded)
785 		return 0;
786 
787 	if (type == DATA)
788 		return sbi->blocks_per_seg;
789 	else if (type == NODE)
790 		return 8 * sbi->blocks_per_seg;
791 	else if (type == META)
792 		return 8 * BIO_MAX_PAGES;
793 	else
794 		return 0;
795 }
796 
797 /*
798  * When writing pages, it'd better align nr_to_write for segment size.
799  */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)800 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
801 					struct writeback_control *wbc)
802 {
803 	long nr_to_write, desired;
804 
805 	if (wbc->sync_mode != WB_SYNC_NONE)
806 		return 0;
807 
808 	nr_to_write = wbc->nr_to_write;
809 	desired = BIO_MAX_PAGES;
810 	if (type == NODE)
811 		desired <<= 1;
812 
813 	wbc->nr_to_write = desired;
814 	return desired - nr_to_write;
815 }
816 
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)817 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
818 {
819 	struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
820 	bool wakeup = false;
821 	int i;
822 
823 	if (force)
824 		goto wake_up;
825 
826 	mutex_lock(&dcc->cmd_lock);
827 	for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
828 		if (i + 1 < dcc->discard_granularity)
829 			break;
830 		if (!list_empty(&dcc->pend_list[i])) {
831 			wakeup = true;
832 			break;
833 		}
834 	}
835 	mutex_unlock(&dcc->cmd_lock);
836 	if (!wakeup)
837 		return;
838 wake_up:
839 	dcc->discard_wake = 1;
840 	wake_up_interruptible_all(&dcc->discard_wait_queue);
841 }
842