1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10
11 /* constant macro */
12 #define NULL_SEGNO ((unsigned int)(~0))
13 #define NULL_SECNO ((unsigned int)(~0))
14
15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19 #define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
20
21 /* L: Logical segment # in volume, R: Relative segment # in main area */
22 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
23 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
24
25 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
26 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE)
27
28 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
29 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
30 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
31
32 #define IS_CURSEG(sbi, seg) \
33 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
34 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
35 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
36 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
37 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
38 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
39
40 #define IS_CURSEC(sbi, secno) \
41 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
42 (sbi)->segs_per_sec) || \
43 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
44 (sbi)->segs_per_sec) || \
45 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
46 (sbi)->segs_per_sec) || \
47 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
48 (sbi)->segs_per_sec) || \
49 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
50 (sbi)->segs_per_sec) || \
51 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
52 (sbi)->segs_per_sec)) \
53
54 #define MAIN_BLKADDR(sbi) \
55 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
56 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
57 #define SEG0_BLKADDR(sbi) \
58 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
59 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
60
61 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
62 #define MAIN_SECS(sbi) ((sbi)->total_sections)
63
64 #define TOTAL_SEGS(sbi) \
65 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
66 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
67 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
68
69 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
70 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
71 (sbi)->log_blocks_per_seg))
72
73 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
74 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
75
76 #define NEXT_FREE_BLKADDR(sbi, curseg) \
77 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
78
79 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
80 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
81 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
82 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
83 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
84
85 #define GET_SEGNO(sbi, blk_addr) \
86 ((!__is_valid_data_blkaddr(blk_addr)) ? \
87 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
88 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
89 #define BLKS_PER_SEC(sbi) \
90 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
91 #define GET_SEC_FROM_SEG(sbi, segno) \
92 (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
93 #define GET_SEG_FROM_SEC(sbi, secno) \
94 ((secno) * (sbi)->segs_per_sec)
95 #define GET_ZONE_FROM_SEC(sbi, secno) \
96 (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
97 #define GET_ZONE_FROM_SEG(sbi, segno) \
98 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
99
100 #define GET_SUM_BLOCK(sbi, segno) \
101 ((sbi)->sm_info->ssa_blkaddr + (segno))
102
103 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
104 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
105
106 #define SIT_ENTRY_OFFSET(sit_i, segno) \
107 ((segno) % (sit_i)->sents_per_block)
108 #define SIT_BLOCK_OFFSET(segno) \
109 ((segno) / SIT_ENTRY_PER_BLOCK)
110 #define START_SEGNO(segno) \
111 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
112 #define SIT_BLK_CNT(sbi) \
113 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
114 #define f2fs_bitmap_size(nr) \
115 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
116
117 #define SECTOR_FROM_BLOCK(blk_addr) \
118 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
119 #define SECTOR_TO_BLOCK(sectors) \
120 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
121
122 /*
123 * indicate a block allocation direction: RIGHT and LEFT.
124 * RIGHT means allocating new sections towards the end of volume.
125 * LEFT means the opposite direction.
126 */
127 enum {
128 ALLOC_RIGHT = 0,
129 ALLOC_LEFT
130 };
131
132 /*
133 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
134 * LFS writes data sequentially with cleaning operations.
135 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
136 */
137 enum {
138 LFS = 0,
139 SSR
140 };
141
142 /*
143 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
144 * GC_CB is based on cost-benefit algorithm.
145 * GC_GREEDY is based on greedy algorithm.
146 */
147 enum {
148 GC_CB = 0,
149 GC_GREEDY,
150 ALLOC_NEXT,
151 FLUSH_DEVICE,
152 MAX_GC_POLICY,
153 };
154
155 /*
156 * BG_GC means the background cleaning job.
157 * FG_GC means the on-demand cleaning job.
158 * FORCE_FG_GC means on-demand cleaning job in background.
159 */
160 enum {
161 BG_GC = 0,
162 FG_GC,
163 FORCE_FG_GC,
164 };
165
166 /* for a function parameter to select a victim segment */
167 struct victim_sel_policy {
168 int alloc_mode; /* LFS or SSR */
169 int gc_mode; /* GC_CB or GC_GREEDY */
170 unsigned long *dirty_segmap; /* dirty segment bitmap */
171 unsigned int max_search; /* maximum # of segments to search */
172 unsigned int offset; /* last scanned bitmap offset */
173 unsigned int ofs_unit; /* bitmap search unit */
174 unsigned int min_cost; /* minimum cost */
175 unsigned int min_segno; /* segment # having min. cost */
176 };
177
178 struct seg_entry {
179 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
180 unsigned int valid_blocks:10; /* # of valid blocks */
181 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
182 unsigned int padding:6; /* padding */
183 unsigned char *cur_valid_map; /* validity bitmap of blocks */
184 #ifdef CONFIG_F2FS_CHECK_FS
185 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
186 #endif
187 /*
188 * # of valid blocks and the validity bitmap stored in the the last
189 * checkpoint pack. This information is used by the SSR mode.
190 */
191 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
192 unsigned char *discard_map;
193 unsigned long long mtime; /* modification time of the segment */
194 };
195
196 struct sec_entry {
197 unsigned int valid_blocks; /* # of valid blocks in a section */
198 };
199
200 struct segment_allocation {
201 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
202 };
203
204 #define MAX_SKIP_GC_COUNT 16
205
206 struct inmem_pages {
207 struct list_head list;
208 struct page *page;
209 block_t old_addr; /* for revoking when fail to commit */
210 };
211
212 struct sit_info {
213 const struct segment_allocation *s_ops;
214
215 block_t sit_base_addr; /* start block address of SIT area */
216 block_t sit_blocks; /* # of blocks used by SIT area */
217 block_t written_valid_blocks; /* # of valid blocks in main area */
218 char *bitmap; /* all bitmaps pointer */
219 char *sit_bitmap; /* SIT bitmap pointer */
220 #ifdef CONFIG_F2FS_CHECK_FS
221 char *sit_bitmap_mir; /* SIT bitmap mirror */
222
223 /* bitmap of segments to be ignored by GC in case of errors */
224 unsigned long *invalid_segmap;
225 #endif
226 unsigned int bitmap_size; /* SIT bitmap size */
227
228 unsigned long *tmp_map; /* bitmap for temporal use */
229 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
230 unsigned int dirty_sentries; /* # of dirty sentries */
231 unsigned int sents_per_block; /* # of SIT entries per block */
232 struct rw_semaphore sentry_lock; /* to protect SIT cache */
233 struct seg_entry *sentries; /* SIT segment-level cache */
234 struct sec_entry *sec_entries; /* SIT section-level cache */
235
236 /* for cost-benefit algorithm in cleaning procedure */
237 unsigned long long elapsed_time; /* elapsed time after mount */
238 unsigned long long mounted_time; /* mount time */
239 unsigned long long min_mtime; /* min. modification time */
240 unsigned long long max_mtime; /* max. modification time */
241
242 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
243 };
244
245 struct free_segmap_info {
246 unsigned int start_segno; /* start segment number logically */
247 unsigned int free_segments; /* # of free segments */
248 unsigned int free_sections; /* # of free sections */
249 spinlock_t segmap_lock; /* free segmap lock */
250 unsigned long *free_segmap; /* free segment bitmap */
251 unsigned long *free_secmap; /* free section bitmap */
252 };
253
254 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
255 enum dirty_type {
256 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
257 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
258 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
259 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
260 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
261 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
262 DIRTY, /* to count # of dirty segments */
263 PRE, /* to count # of entirely obsolete segments */
264 NR_DIRTY_TYPE
265 };
266
267 struct dirty_seglist_info {
268 const struct victim_selection *v_ops; /* victim selction operation */
269 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
270 struct mutex seglist_lock; /* lock for segment bitmaps */
271 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
272 unsigned long *victim_secmap; /* background GC victims */
273 };
274
275 /* victim selection function for cleaning and SSR */
276 struct victim_selection {
277 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
278 int, int, char);
279 };
280
281 /* for active log information */
282 struct curseg_info {
283 struct mutex curseg_mutex; /* lock for consistency */
284 struct f2fs_summary_block *sum_blk; /* cached summary block */
285 struct rw_semaphore journal_rwsem; /* protect journal area */
286 struct f2fs_journal *journal; /* cached journal info */
287 unsigned char alloc_type; /* current allocation type */
288 unsigned int segno; /* current segment number */
289 unsigned short next_blkoff; /* next block offset to write */
290 unsigned int zone; /* current zone number */
291 unsigned int next_segno; /* preallocated segment */
292 };
293
294 struct sit_entry_set {
295 struct list_head set_list; /* link with all sit sets */
296 unsigned int start_segno; /* start segno of sits in set */
297 unsigned int entry_cnt; /* the # of sit entries in set */
298 };
299
300 /*
301 * inline functions
302 */
CURSEG_I(struct f2fs_sb_info * sbi,int type)303 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
304 {
305 if (type == CURSEG_COLD_DATA_PINNED)
306 type = CURSEG_COLD_DATA;
307 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
308 }
309
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)310 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
311 unsigned int segno)
312 {
313 struct sit_info *sit_i = SIT_I(sbi);
314 return &sit_i->sentries[segno];
315 }
316
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)317 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
318 unsigned int segno)
319 {
320 struct sit_info *sit_i = SIT_I(sbi);
321 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
322 }
323
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)324 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
325 unsigned int segno, bool use_section)
326 {
327 /*
328 * In order to get # of valid blocks in a section instantly from many
329 * segments, f2fs manages two counting structures separately.
330 */
331 if (use_section && __is_large_section(sbi))
332 return get_sec_entry(sbi, segno)->valid_blocks;
333 else
334 return get_seg_entry(sbi, segno)->valid_blocks;
335 }
336
get_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno)337 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
338 unsigned int segno)
339 {
340 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
341 }
342
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)343 static inline void seg_info_from_raw_sit(struct seg_entry *se,
344 struct f2fs_sit_entry *rs)
345 {
346 se->valid_blocks = GET_SIT_VBLOCKS(rs);
347 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
348 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
349 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
350 #ifdef CONFIG_F2FS_CHECK_FS
351 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
352 #endif
353 se->type = GET_SIT_TYPE(rs);
354 se->mtime = le64_to_cpu(rs->mtime);
355 }
356
__seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)357 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
358 struct f2fs_sit_entry *rs)
359 {
360 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
361 se->valid_blocks;
362 rs->vblocks = cpu_to_le16(raw_vblocks);
363 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
364 rs->mtime = cpu_to_le64(se->mtime);
365 }
366
seg_info_to_sit_page(struct f2fs_sb_info * sbi,struct page * page,unsigned int start)367 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
368 struct page *page, unsigned int start)
369 {
370 struct f2fs_sit_block *raw_sit;
371 struct seg_entry *se;
372 struct f2fs_sit_entry *rs;
373 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
374 (unsigned long)MAIN_SEGS(sbi));
375 int i;
376
377 raw_sit = (struct f2fs_sit_block *)page_address(page);
378 memset(raw_sit, 0, PAGE_SIZE);
379 for (i = 0; i < end - start; i++) {
380 rs = &raw_sit->entries[i];
381 se = get_seg_entry(sbi, start + i);
382 __seg_info_to_raw_sit(se, rs);
383 }
384 }
385
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)386 static inline void seg_info_to_raw_sit(struct seg_entry *se,
387 struct f2fs_sit_entry *rs)
388 {
389 __seg_info_to_raw_sit(se, rs);
390
391 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
392 se->ckpt_valid_blocks = se->valid_blocks;
393 }
394
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)395 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
396 unsigned int max, unsigned int segno)
397 {
398 unsigned int ret;
399 spin_lock(&free_i->segmap_lock);
400 ret = find_next_bit(free_i->free_segmap, max, segno);
401 spin_unlock(&free_i->segmap_lock);
402 return ret;
403 }
404
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)405 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
406 {
407 struct free_segmap_info *free_i = FREE_I(sbi);
408 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
409 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
410 unsigned int next;
411
412 spin_lock(&free_i->segmap_lock);
413 clear_bit(segno, free_i->free_segmap);
414 free_i->free_segments++;
415
416 next = find_next_bit(free_i->free_segmap,
417 start_segno + sbi->segs_per_sec, start_segno);
418 if (next >= start_segno + sbi->segs_per_sec) {
419 clear_bit(secno, free_i->free_secmap);
420 free_i->free_sections++;
421 }
422 spin_unlock(&free_i->segmap_lock);
423 }
424
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)425 static inline void __set_inuse(struct f2fs_sb_info *sbi,
426 unsigned int segno)
427 {
428 struct free_segmap_info *free_i = FREE_I(sbi);
429 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
430
431 set_bit(segno, free_i->free_segmap);
432 free_i->free_segments--;
433 if (!test_and_set_bit(secno, free_i->free_secmap))
434 free_i->free_sections--;
435 }
436
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno)437 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
438 unsigned int segno)
439 {
440 struct free_segmap_info *free_i = FREE_I(sbi);
441 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
442 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
443 unsigned int next;
444
445 spin_lock(&free_i->segmap_lock);
446 if (test_and_clear_bit(segno, free_i->free_segmap)) {
447 free_i->free_segments++;
448
449 if (IS_CURSEC(sbi, secno))
450 goto skip_free;
451 next = find_next_bit(free_i->free_segmap,
452 start_segno + sbi->segs_per_sec, start_segno);
453 if (next >= start_segno + sbi->segs_per_sec) {
454 if (test_and_clear_bit(secno, free_i->free_secmap))
455 free_i->free_sections++;
456 }
457 }
458 skip_free:
459 spin_unlock(&free_i->segmap_lock);
460 }
461
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)462 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
463 unsigned int segno)
464 {
465 struct free_segmap_info *free_i = FREE_I(sbi);
466 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
467
468 spin_lock(&free_i->segmap_lock);
469 if (!test_and_set_bit(segno, free_i->free_segmap)) {
470 free_i->free_segments--;
471 if (!test_and_set_bit(secno, free_i->free_secmap))
472 free_i->free_sections--;
473 }
474 spin_unlock(&free_i->segmap_lock);
475 }
476
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)477 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
478 void *dst_addr)
479 {
480 struct sit_info *sit_i = SIT_I(sbi);
481
482 #ifdef CONFIG_F2FS_CHECK_FS
483 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
484 sit_i->bitmap_size))
485 f2fs_bug_on(sbi, 1);
486 #endif
487 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
488 }
489
written_block_count(struct f2fs_sb_info * sbi)490 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
491 {
492 return SIT_I(sbi)->written_valid_blocks;
493 }
494
free_segments(struct f2fs_sb_info * sbi)495 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
496 {
497 return FREE_I(sbi)->free_segments;
498 }
499
reserved_segments(struct f2fs_sb_info * sbi)500 static inline int reserved_segments(struct f2fs_sb_info *sbi)
501 {
502 return SM_I(sbi)->reserved_segments +
503 SM_I(sbi)->additional_reserved_segments;
504 }
505
free_sections(struct f2fs_sb_info * sbi)506 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
507 {
508 return FREE_I(sbi)->free_sections;
509 }
510
prefree_segments(struct f2fs_sb_info * sbi)511 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
512 {
513 return DIRTY_I(sbi)->nr_dirty[PRE];
514 }
515
dirty_segments(struct f2fs_sb_info * sbi)516 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
517 {
518 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
519 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
520 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
521 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
522 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
523 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
524 }
525
overprovision_segments(struct f2fs_sb_info * sbi)526 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
527 {
528 return SM_I(sbi)->ovp_segments;
529 }
530
reserved_sections(struct f2fs_sb_info * sbi)531 static inline int reserved_sections(struct f2fs_sb_info *sbi)
532 {
533 return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
534 }
535
has_curseg_enough_space(struct f2fs_sb_info * sbi,unsigned int node_blocks,unsigned int dent_blocks)536 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi,
537 unsigned int node_blocks, unsigned int dent_blocks)
538 {
539
540 unsigned int segno, left_blocks;
541 int i;
542
543 /* check current node segment */
544 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
545 segno = CURSEG_I(sbi, i)->segno;
546 left_blocks = sbi->blocks_per_seg -
547 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
548
549 if (node_blocks > left_blocks)
550 return false;
551 }
552
553 /* check current data segment */
554 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
555 left_blocks = sbi->blocks_per_seg -
556 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
557 if (dent_blocks > left_blocks)
558 return false;
559 return true;
560 }
561
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)562 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
563 int freed, int needed)
564 {
565 unsigned int total_node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
566 get_pages(sbi, F2FS_DIRTY_DENTS) +
567 get_pages(sbi, F2FS_DIRTY_IMETA);
568 unsigned int total_dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
569 unsigned int node_secs = total_node_blocks / BLKS_PER_SEC(sbi);
570 unsigned int dent_secs = total_dent_blocks / BLKS_PER_SEC(sbi);
571 unsigned int node_blocks = total_node_blocks % BLKS_PER_SEC(sbi);
572 unsigned int dent_blocks = total_dent_blocks % BLKS_PER_SEC(sbi);
573 unsigned int free, need_lower, need_upper;
574
575 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
576 return false;
577
578 free = free_sections(sbi) + freed;
579 need_lower = node_secs + dent_secs + reserved_sections(sbi) + needed;
580 need_upper = need_lower + (node_blocks ? 1 : 0) + (dent_blocks ? 1 : 0);
581
582 if (free > need_upper)
583 return false;
584 else if (free <= need_lower)
585 return true;
586 return !has_curseg_enough_space(sbi, node_blocks, dent_blocks);
587 }
588
f2fs_is_checkpoint_ready(struct f2fs_sb_info * sbi)589 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
590 {
591 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
592 return true;
593 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
594 return true;
595 return false;
596 }
597
excess_prefree_segs(struct f2fs_sb_info * sbi)598 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
599 {
600 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
601 }
602
utilization(struct f2fs_sb_info * sbi)603 static inline int utilization(struct f2fs_sb_info *sbi)
604 {
605 return div_u64((u64)valid_user_blocks(sbi) * 100,
606 sbi->user_block_count);
607 }
608
609 /*
610 * Sometimes f2fs may be better to drop out-of-place update policy.
611 * And, users can control the policy through sysfs entries.
612 * There are five policies with triggering conditions as follows.
613 * F2FS_IPU_FORCE - all the time,
614 * F2FS_IPU_SSR - if SSR mode is activated,
615 * F2FS_IPU_UTIL - if FS utilization is over threashold,
616 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
617 * threashold,
618 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
619 * storages. IPU will be triggered only if the # of dirty
620 * pages over min_fsync_blocks. (=default option)
621 * F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
622 * F2FS_IPU_NOCACHE - disable IPU bio cache.
623 * F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
624 */
625 #define DEF_MIN_IPU_UTIL 70
626 #define DEF_MIN_FSYNC_BLOCKS 8
627 #define DEF_MIN_HOT_BLOCKS 16
628
629 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
630
631 enum {
632 F2FS_IPU_FORCE,
633 F2FS_IPU_SSR,
634 F2FS_IPU_UTIL,
635 F2FS_IPU_SSR_UTIL,
636 F2FS_IPU_FSYNC,
637 F2FS_IPU_ASYNC,
638 F2FS_IPU_NOCACHE,
639 };
640
curseg_segno(struct f2fs_sb_info * sbi,int type)641 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
642 int type)
643 {
644 struct curseg_info *curseg = CURSEG_I(sbi, type);
645 return curseg->segno;
646 }
647
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)648 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
649 int type)
650 {
651 struct curseg_info *curseg = CURSEG_I(sbi, type);
652 return curseg->alloc_type;
653 }
654
curseg_blkoff(struct f2fs_sb_info * sbi,int type)655 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
656 {
657 struct curseg_info *curseg = CURSEG_I(sbi, type);
658 return curseg->next_blkoff;
659 }
660
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)661 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
662 {
663 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
664 }
665
verify_fio_blkaddr(struct f2fs_io_info * fio)666 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
667 {
668 struct f2fs_sb_info *sbi = fio->sbi;
669
670 if (__is_valid_data_blkaddr(fio->old_blkaddr))
671 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
672 META_GENERIC : DATA_GENERIC);
673 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
674 META_GENERIC : DATA_GENERIC_ENHANCE);
675 }
676
677 /*
678 * Summary block is always treated as an invalid block
679 */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)680 static inline int check_block_count(struct f2fs_sb_info *sbi,
681 int segno, struct f2fs_sit_entry *raw_sit)
682 {
683 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
684 int valid_blocks = 0;
685 int cur_pos = 0, next_pos;
686
687 /* check bitmap with valid block count */
688 do {
689 if (is_valid) {
690 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
691 sbi->blocks_per_seg,
692 cur_pos);
693 valid_blocks += next_pos - cur_pos;
694 } else
695 next_pos = find_next_bit_le(&raw_sit->valid_map,
696 sbi->blocks_per_seg,
697 cur_pos);
698 cur_pos = next_pos;
699 is_valid = !is_valid;
700 } while (cur_pos < sbi->blocks_per_seg);
701
702 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
703 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
704 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
705 set_sbi_flag(sbi, SBI_NEED_FSCK);
706 return -EFSCORRUPTED;
707 }
708
709 /* check segment usage, and check boundary of a given segment number */
710 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
711 || segno > TOTAL_SEGS(sbi) - 1)) {
712 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
713 GET_SIT_VBLOCKS(raw_sit), segno);
714 set_sbi_flag(sbi, SBI_NEED_FSCK);
715 return -EFSCORRUPTED;
716 }
717 return 0;
718 }
719
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)720 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
721 unsigned int start)
722 {
723 struct sit_info *sit_i = SIT_I(sbi);
724 unsigned int offset = SIT_BLOCK_OFFSET(start);
725 block_t blk_addr = sit_i->sit_base_addr + offset;
726
727 check_seg_range(sbi, start);
728
729 #ifdef CONFIG_F2FS_CHECK_FS
730 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
731 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
732 f2fs_bug_on(sbi, 1);
733 #endif
734
735 /* calculate sit block address */
736 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
737 blk_addr += sit_i->sit_blocks;
738
739 return blk_addr;
740 }
741
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)742 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
743 pgoff_t block_addr)
744 {
745 struct sit_info *sit_i = SIT_I(sbi);
746 block_addr -= sit_i->sit_base_addr;
747 if (block_addr < sit_i->sit_blocks)
748 block_addr += sit_i->sit_blocks;
749 else
750 block_addr -= sit_i->sit_blocks;
751
752 return block_addr + sit_i->sit_base_addr;
753 }
754
set_to_next_sit(struct sit_info * sit_i,unsigned int start)755 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
756 {
757 unsigned int block_off = SIT_BLOCK_OFFSET(start);
758
759 f2fs_change_bit(block_off, sit_i->sit_bitmap);
760 #ifdef CONFIG_F2FS_CHECK_FS
761 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
762 #endif
763 }
764
get_mtime(struct f2fs_sb_info * sbi,bool base_time)765 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
766 bool base_time)
767 {
768 struct sit_info *sit_i = SIT_I(sbi);
769 time64_t diff, now = ktime_get_boottime_seconds();
770
771 if (now >= sit_i->mounted_time)
772 return sit_i->elapsed_time + now - sit_i->mounted_time;
773
774 /* system time is set to the past */
775 if (!base_time) {
776 diff = sit_i->mounted_time - now;
777 if (sit_i->elapsed_time >= diff)
778 return sit_i->elapsed_time - diff;
779 return 0;
780 }
781 return sit_i->elapsed_time;
782 }
783
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)784 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
785 unsigned int ofs_in_node, unsigned char version)
786 {
787 sum->nid = cpu_to_le32(nid);
788 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
789 sum->version = version;
790 }
791
start_sum_block(struct f2fs_sb_info * sbi)792 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
793 {
794 return __start_cp_addr(sbi) +
795 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
796 }
797
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)798 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
799 {
800 return __start_cp_addr(sbi) +
801 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
802 - (base + 1) + type;
803 }
804
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)805 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
806 {
807 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
808 return true;
809 return false;
810 }
811
812 /*
813 * It is very important to gather dirty pages and write at once, so that we can
814 * submit a big bio without interfering other data writes.
815 * By default, 512 pages for directory data,
816 * 512 pages (2MB) * 8 for nodes, and
817 * 256 pages * 8 for meta are set.
818 */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)819 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
820 {
821 if (sbi->sb->s_bdi->wb.dirty_exceeded)
822 return 0;
823
824 if (type == DATA)
825 return sbi->blocks_per_seg;
826 else if (type == NODE)
827 return 8 * sbi->blocks_per_seg;
828 else if (type == META)
829 return 8 * BIO_MAX_PAGES;
830 else
831 return 0;
832 }
833
834 /*
835 * When writing pages, it'd better align nr_to_write for segment size.
836 */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)837 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
838 struct writeback_control *wbc)
839 {
840 long nr_to_write, desired;
841
842 if (wbc->sync_mode != WB_SYNC_NONE)
843 return 0;
844
845 nr_to_write = wbc->nr_to_write;
846 desired = BIO_MAX_PAGES;
847 if (type == NODE)
848 desired <<= 1;
849
850 wbc->nr_to_write = desired;
851 return desired - nr_to_write;
852 }
853
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)854 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
855 {
856 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
857 bool wakeup = false;
858 int i;
859
860 if (force)
861 goto wake_up;
862
863 mutex_lock(&dcc->cmd_lock);
864 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
865 if (i + 1 < dcc->discard_granularity)
866 break;
867 if (!list_empty(&dcc->pend_list[i])) {
868 wakeup = true;
869 break;
870 }
871 }
872 mutex_unlock(&dcc->cmd_lock);
873 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
874 return;
875 wake_up:
876 dcc->discard_wake = 1;
877 wake_up_interruptible_all(&dcc->discard_wait_queue);
878 }
879