1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/segment.h
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/blkdev.h>
9 #include <linux/backing-dev.h>
10
11 /* constant macro */
12 #define NULL_SEGNO ((unsigned int)(~0))
13 #define NULL_SECNO ((unsigned int)(~0))
14
15 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
16 #define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
17
18 #define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
19
20 /* L: Logical segment # in volume, R: Relative segment # in main area */
21 #define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
22 #define GET_R2L_SEGNO(free_i, segno) ((segno) + (free_i)->start_segno)
23
24 #define IS_DATASEG(t) ((t) <= CURSEG_COLD_DATA)
25 #define IS_NODESEG(t) ((t) >= CURSEG_HOT_NODE)
26
27 #define IS_HOT(t) ((t) == CURSEG_HOT_NODE || (t) == CURSEG_HOT_DATA)
28 #define IS_WARM(t) ((t) == CURSEG_WARM_NODE || (t) == CURSEG_WARM_DATA)
29 #define IS_COLD(t) ((t) == CURSEG_COLD_NODE || (t) == CURSEG_COLD_DATA)
30
31 #define IS_CURSEG(sbi, seg) \
32 (((seg) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
33 ((seg) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
34 ((seg) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
35 ((seg) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
36 ((seg) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
37 ((seg) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
38
39 #define IS_CURSEC(sbi, secno) \
40 (((secno) == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
41 (sbi)->segs_per_sec) || \
42 ((secno) == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
43 (sbi)->segs_per_sec) || \
44 ((secno) == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
45 (sbi)->segs_per_sec) || \
46 ((secno) == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
47 (sbi)->segs_per_sec) || \
48 ((secno) == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
49 (sbi)->segs_per_sec) || \
50 ((secno) == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
51 (sbi)->segs_per_sec)) \
52
53 #define MAIN_BLKADDR(sbi) \
54 (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
55 le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
56 #define SEG0_BLKADDR(sbi) \
57 (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
58 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
59
60 #define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
61 #define MAIN_SECS(sbi) ((sbi)->total_sections)
62
63 #define TOTAL_SEGS(sbi) \
64 (SM_I(sbi) ? SM_I(sbi)->segment_count : \
65 le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
66 #define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << (sbi)->log_blocks_per_seg)
67
68 #define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
69 #define SEGMENT_SIZE(sbi) (1ULL << ((sbi)->log_blocksize + \
70 (sbi)->log_blocks_per_seg))
71
72 #define START_BLOCK(sbi, segno) (SEG0_BLKADDR(sbi) + \
73 (GET_R2L_SEGNO(FREE_I(sbi), segno) << (sbi)->log_blocks_per_seg))
74
75 #define NEXT_FREE_BLKADDR(sbi, curseg) \
76 (START_BLOCK(sbi, (curseg)->segno) + (curseg)->next_blkoff)
77
78 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) ((blk_addr) - SEG0_BLKADDR(sbi))
79 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
80 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> (sbi)->log_blocks_per_seg)
81 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
82 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
83
84 #define GET_SEGNO(sbi, blk_addr) \
85 ((!__is_valid_data_blkaddr(blk_addr)) ? \
86 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
87 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
88 #define BLKS_PER_SEC(sbi) \
89 ((sbi)->segs_per_sec * (sbi)->blocks_per_seg)
90 #define GET_SEC_FROM_SEG(sbi, segno) \
91 ((segno) / (sbi)->segs_per_sec)
92 #define GET_SEG_FROM_SEC(sbi, secno) \
93 ((secno) * (sbi)->segs_per_sec)
94 #define GET_ZONE_FROM_SEC(sbi, secno) \
95 ((secno) / (sbi)->secs_per_zone)
96 #define GET_ZONE_FROM_SEG(sbi, segno) \
97 GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
98
99 #define GET_SUM_BLOCK(sbi, segno) \
100 ((sbi)->sm_info->ssa_blkaddr + (segno))
101
102 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
103 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
104
105 #define SIT_ENTRY_OFFSET(sit_i, segno) \
106 ((segno) % (sit_i)->sents_per_block)
107 #define SIT_BLOCK_OFFSET(segno) \
108 ((segno) / SIT_ENTRY_PER_BLOCK)
109 #define START_SEGNO(segno) \
110 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
111 #define SIT_BLK_CNT(sbi) \
112 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
113 #define f2fs_bitmap_size(nr) \
114 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
115
116 #define SECTOR_FROM_BLOCK(blk_addr) \
117 (((sector_t)blk_addr) << F2FS_LOG_SECTORS_PER_BLOCK)
118 #define SECTOR_TO_BLOCK(sectors) \
119 ((sectors) >> F2FS_LOG_SECTORS_PER_BLOCK)
120
121 /*
122 * indicate a block allocation direction: RIGHT and LEFT.
123 * RIGHT means allocating new sections towards the end of volume.
124 * LEFT means the opposite direction.
125 */
126 enum {
127 ALLOC_RIGHT = 0,
128 ALLOC_LEFT
129 };
130
131 /*
132 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
133 * LFS writes data sequentially with cleaning operations.
134 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
135 */
136 enum {
137 LFS = 0,
138 SSR
139 };
140
141 /*
142 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
143 * GC_CB is based on cost-benefit algorithm.
144 * GC_GREEDY is based on greedy algorithm.
145 */
146 enum {
147 GC_CB = 0,
148 GC_GREEDY,
149 ALLOC_NEXT,
150 FLUSH_DEVICE,
151 MAX_GC_POLICY,
152 };
153
154 /*
155 * BG_GC means the background cleaning job.
156 * FG_GC means the on-demand cleaning job.
157 * FORCE_FG_GC means on-demand cleaning job in background.
158 */
159 enum {
160 BG_GC = 0,
161 FG_GC,
162 FORCE_FG_GC,
163 };
164
165 /* for a function parameter to select a victim segment */
166 struct victim_sel_policy {
167 int alloc_mode; /* LFS or SSR */
168 int gc_mode; /* GC_CB or GC_GREEDY */
169 unsigned long *dirty_segmap; /* dirty segment bitmap */
170 unsigned int max_search; /* maximum # of segments to search */
171 unsigned int offset; /* last scanned bitmap offset */
172 unsigned int ofs_unit; /* bitmap search unit */
173 unsigned int min_cost; /* minimum cost */
174 unsigned int min_segno; /* segment # having min. cost */
175 };
176
177 struct seg_entry {
178 unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */
179 unsigned int valid_blocks:10; /* # of valid blocks */
180 unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */
181 unsigned int padding:6; /* padding */
182 unsigned char *cur_valid_map; /* validity bitmap of blocks */
183 #ifdef CONFIG_F2FS_CHECK_FS
184 unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */
185 #endif
186 /*
187 * # of valid blocks and the validity bitmap stored in the the last
188 * checkpoint pack. This information is used by the SSR mode.
189 */
190 unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */
191 unsigned char *discard_map;
192 unsigned long long mtime; /* modification time of the segment */
193 };
194
195 struct sec_entry {
196 unsigned int valid_blocks; /* # of valid blocks in a section */
197 };
198
199 struct segment_allocation {
200 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
201 };
202
203 /*
204 * this value is set in page as a private data which indicate that
205 * the page is atomically written, and it is in inmem_pages list.
206 */
207 #define ATOMIC_WRITTEN_PAGE ((unsigned long)-1)
208 #define DUMMY_WRITTEN_PAGE ((unsigned long)-2)
209
210 #define IS_ATOMIC_WRITTEN_PAGE(page) \
211 (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
212 #define IS_DUMMY_WRITTEN_PAGE(page) \
213 (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
214
215 #define MAX_SKIP_GC_COUNT 16
216
217 struct inmem_pages {
218 struct list_head list;
219 struct page *page;
220 block_t old_addr; /* for revoking when fail to commit */
221 };
222
223 struct sit_info {
224 const struct segment_allocation *s_ops;
225
226 block_t sit_base_addr; /* start block address of SIT area */
227 block_t sit_blocks; /* # of blocks used by SIT area */
228 block_t written_valid_blocks; /* # of valid blocks in main area */
229 char *bitmap; /* all bitmaps pointer */
230 char *sit_bitmap; /* SIT bitmap pointer */
231 #ifdef CONFIG_F2FS_CHECK_FS
232 char *sit_bitmap_mir; /* SIT bitmap mirror */
233
234 /* bitmap of segments to be ignored by GC in case of errors */
235 unsigned long *invalid_segmap;
236 #endif
237 unsigned int bitmap_size; /* SIT bitmap size */
238
239 unsigned long *tmp_map; /* bitmap for temporal use */
240 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
241 unsigned int dirty_sentries; /* # of dirty sentries */
242 unsigned int sents_per_block; /* # of SIT entries per block */
243 struct rw_semaphore sentry_lock; /* to protect SIT cache */
244 struct seg_entry *sentries; /* SIT segment-level cache */
245 struct sec_entry *sec_entries; /* SIT section-level cache */
246
247 /* for cost-benefit algorithm in cleaning procedure */
248 unsigned long long elapsed_time; /* elapsed time after mount */
249 unsigned long long mounted_time; /* mount time */
250 unsigned long long min_mtime; /* min. modification time */
251 unsigned long long max_mtime; /* max. modification time */
252
253 unsigned int last_victim[MAX_GC_POLICY]; /* last victim segment # */
254 };
255
256 struct free_segmap_info {
257 unsigned int start_segno; /* start segment number logically */
258 unsigned int free_segments; /* # of free segments */
259 unsigned int free_sections; /* # of free sections */
260 spinlock_t segmap_lock; /* free segmap lock */
261 unsigned long *free_segmap; /* free segment bitmap */
262 unsigned long *free_secmap; /* free section bitmap */
263 };
264
265 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
266 enum dirty_type {
267 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
268 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
269 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
270 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
271 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
272 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
273 DIRTY, /* to count # of dirty segments */
274 PRE, /* to count # of entirely obsolete segments */
275 NR_DIRTY_TYPE
276 };
277
278 struct dirty_seglist_info {
279 const struct victim_selection *v_ops; /* victim selction operation */
280 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
281 struct mutex seglist_lock; /* lock for segment bitmaps */
282 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
283 unsigned long *victim_secmap; /* background GC victims */
284 };
285
286 /* victim selection function for cleaning and SSR */
287 struct victim_selection {
288 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
289 int, int, char);
290 };
291
292 /* for active log information */
293 struct curseg_info {
294 struct mutex curseg_mutex; /* lock for consistency */
295 struct f2fs_summary_block *sum_blk; /* cached summary block */
296 struct rw_semaphore journal_rwsem; /* protect journal area */
297 struct f2fs_journal *journal; /* cached journal info */
298 unsigned char alloc_type; /* current allocation type */
299 unsigned int segno; /* current segment number */
300 unsigned short next_blkoff; /* next block offset to write */
301 unsigned int zone; /* current zone number */
302 unsigned int next_segno; /* preallocated segment */
303 };
304
305 struct sit_entry_set {
306 struct list_head set_list; /* link with all sit sets */
307 unsigned int start_segno; /* start segno of sits in set */
308 unsigned int entry_cnt; /* the # of sit entries in set */
309 };
310
311 /*
312 * inline functions
313 */
CURSEG_I(struct f2fs_sb_info * sbi,int type)314 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
315 {
316 if (type == CURSEG_COLD_DATA_PINNED)
317 type = CURSEG_COLD_DATA;
318 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
319 }
320
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)321 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
322 unsigned int segno)
323 {
324 struct sit_info *sit_i = SIT_I(sbi);
325 return &sit_i->sentries[segno];
326 }
327
get_sec_entry(struct f2fs_sb_info * sbi,unsigned int segno)328 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
329 unsigned int segno)
330 {
331 struct sit_info *sit_i = SIT_I(sbi);
332 return &sit_i->sec_entries[GET_SEC_FROM_SEG(sbi, segno)];
333 }
334
get_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno,bool use_section)335 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
336 unsigned int segno, bool use_section)
337 {
338 /*
339 * In order to get # of valid blocks in a section instantly from many
340 * segments, f2fs manages two counting structures separately.
341 */
342 if (use_section && __is_large_section(sbi))
343 return get_sec_entry(sbi, segno)->valid_blocks;
344 else
345 return get_seg_entry(sbi, segno)->valid_blocks;
346 }
347
get_ckpt_valid_blocks(struct f2fs_sb_info * sbi,unsigned int segno)348 static inline unsigned int get_ckpt_valid_blocks(struct f2fs_sb_info *sbi,
349 unsigned int segno)
350 {
351 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
352 }
353
seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)354 static inline void seg_info_from_raw_sit(struct seg_entry *se,
355 struct f2fs_sit_entry *rs)
356 {
357 se->valid_blocks = GET_SIT_VBLOCKS(rs);
358 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
359 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
360 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
361 #ifdef CONFIG_F2FS_CHECK_FS
362 memcpy(se->cur_valid_map_mir, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
363 #endif
364 se->type = GET_SIT_TYPE(rs);
365 se->mtime = le64_to_cpu(rs->mtime);
366 }
367
__seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)368 static inline void __seg_info_to_raw_sit(struct seg_entry *se,
369 struct f2fs_sit_entry *rs)
370 {
371 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
372 se->valid_blocks;
373 rs->vblocks = cpu_to_le16(raw_vblocks);
374 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
375 rs->mtime = cpu_to_le64(se->mtime);
376 }
377
seg_info_to_sit_page(struct f2fs_sb_info * sbi,struct page * page,unsigned int start)378 static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
379 struct page *page, unsigned int start)
380 {
381 struct f2fs_sit_block *raw_sit;
382 struct seg_entry *se;
383 struct f2fs_sit_entry *rs;
384 unsigned int end = min(start + SIT_ENTRY_PER_BLOCK,
385 (unsigned long)MAIN_SEGS(sbi));
386 int i;
387
388 raw_sit = (struct f2fs_sit_block *)page_address(page);
389 memset(raw_sit, 0, PAGE_SIZE);
390 for (i = 0; i < end - start; i++) {
391 rs = &raw_sit->entries[i];
392 se = get_seg_entry(sbi, start + i);
393 __seg_info_to_raw_sit(se, rs);
394 }
395 }
396
seg_info_to_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * rs)397 static inline void seg_info_to_raw_sit(struct seg_entry *se,
398 struct f2fs_sit_entry *rs)
399 {
400 __seg_info_to_raw_sit(se, rs);
401
402 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
403 se->ckpt_valid_blocks = se->valid_blocks;
404 }
405
find_next_inuse(struct free_segmap_info * free_i,unsigned int max,unsigned int segno)406 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
407 unsigned int max, unsigned int segno)
408 {
409 unsigned int ret;
410 spin_lock(&free_i->segmap_lock);
411 ret = find_next_bit(free_i->free_segmap, max, segno);
412 spin_unlock(&free_i->segmap_lock);
413 return ret;
414 }
415
__set_free(struct f2fs_sb_info * sbi,unsigned int segno)416 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
417 {
418 struct free_segmap_info *free_i = FREE_I(sbi);
419 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
420 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
421 unsigned int next;
422
423 spin_lock(&free_i->segmap_lock);
424 clear_bit(segno, free_i->free_segmap);
425 free_i->free_segments++;
426
427 next = find_next_bit(free_i->free_segmap,
428 start_segno + sbi->segs_per_sec, start_segno);
429 if (next >= start_segno + sbi->segs_per_sec) {
430 clear_bit(secno, free_i->free_secmap);
431 free_i->free_sections++;
432 }
433 spin_unlock(&free_i->segmap_lock);
434 }
435
__set_inuse(struct f2fs_sb_info * sbi,unsigned int segno)436 static inline void __set_inuse(struct f2fs_sb_info *sbi,
437 unsigned int segno)
438 {
439 struct free_segmap_info *free_i = FREE_I(sbi);
440 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
441
442 set_bit(segno, free_i->free_segmap);
443 free_i->free_segments--;
444 if (!test_and_set_bit(secno, free_i->free_secmap))
445 free_i->free_sections--;
446 }
447
__set_test_and_free(struct f2fs_sb_info * sbi,unsigned int segno)448 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
449 unsigned int segno)
450 {
451 struct free_segmap_info *free_i = FREE_I(sbi);
452 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
453 unsigned int start_segno = GET_SEG_FROM_SEC(sbi, secno);
454 unsigned int next;
455
456 spin_lock(&free_i->segmap_lock);
457 if (test_and_clear_bit(segno, free_i->free_segmap)) {
458 free_i->free_segments++;
459
460 if (IS_CURSEC(sbi, secno))
461 goto skip_free;
462 next = find_next_bit(free_i->free_segmap,
463 start_segno + sbi->segs_per_sec, start_segno);
464 if (next >= start_segno + sbi->segs_per_sec) {
465 if (test_and_clear_bit(secno, free_i->free_secmap))
466 free_i->free_sections++;
467 }
468 }
469 skip_free:
470 spin_unlock(&free_i->segmap_lock);
471 }
472
__set_test_and_inuse(struct f2fs_sb_info * sbi,unsigned int segno)473 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
474 unsigned int segno)
475 {
476 struct free_segmap_info *free_i = FREE_I(sbi);
477 unsigned int secno = GET_SEC_FROM_SEG(sbi, segno);
478
479 spin_lock(&free_i->segmap_lock);
480 if (!test_and_set_bit(segno, free_i->free_segmap)) {
481 free_i->free_segments--;
482 if (!test_and_set_bit(secno, free_i->free_secmap))
483 free_i->free_sections--;
484 }
485 spin_unlock(&free_i->segmap_lock);
486 }
487
get_sit_bitmap(struct f2fs_sb_info * sbi,void * dst_addr)488 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
489 void *dst_addr)
490 {
491 struct sit_info *sit_i = SIT_I(sbi);
492
493 #ifdef CONFIG_F2FS_CHECK_FS
494 if (memcmp(sit_i->sit_bitmap, sit_i->sit_bitmap_mir,
495 sit_i->bitmap_size))
496 f2fs_bug_on(sbi, 1);
497 #endif
498 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
499 }
500
written_block_count(struct f2fs_sb_info * sbi)501 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
502 {
503 return SIT_I(sbi)->written_valid_blocks;
504 }
505
free_segments(struct f2fs_sb_info * sbi)506 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
507 {
508 return FREE_I(sbi)->free_segments;
509 }
510
reserved_segments(struct f2fs_sb_info * sbi)511 static inline int reserved_segments(struct f2fs_sb_info *sbi)
512 {
513 return SM_I(sbi)->reserved_segments;
514 }
515
free_sections(struct f2fs_sb_info * sbi)516 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
517 {
518 return FREE_I(sbi)->free_sections;
519 }
520
prefree_segments(struct f2fs_sb_info * sbi)521 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
522 {
523 return DIRTY_I(sbi)->nr_dirty[PRE];
524 }
525
dirty_segments(struct f2fs_sb_info * sbi)526 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
527 {
528 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
529 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
530 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
531 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
532 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
533 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
534 }
535
overprovision_segments(struct f2fs_sb_info * sbi)536 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
537 {
538 return SM_I(sbi)->ovp_segments;
539 }
540
reserved_sections(struct f2fs_sb_info * sbi)541 static inline int reserved_sections(struct f2fs_sb_info *sbi)
542 {
543 return GET_SEC_FROM_SEG(sbi, (unsigned int)reserved_segments(sbi));
544 }
545
has_curseg_enough_space(struct f2fs_sb_info * sbi)546 static inline bool has_curseg_enough_space(struct f2fs_sb_info *sbi)
547 {
548 unsigned int node_blocks = get_pages(sbi, F2FS_DIRTY_NODES) +
549 get_pages(sbi, F2FS_DIRTY_DENTS);
550 unsigned int dent_blocks = get_pages(sbi, F2FS_DIRTY_DENTS);
551 unsigned int segno, left_blocks;
552 int i;
553
554 /* check current node segment */
555 for (i = CURSEG_HOT_NODE; i <= CURSEG_COLD_NODE; i++) {
556 segno = CURSEG_I(sbi, i)->segno;
557 left_blocks = sbi->blocks_per_seg -
558 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
559
560 if (node_blocks > left_blocks)
561 return false;
562 }
563
564 /* check current data segment */
565 segno = CURSEG_I(sbi, CURSEG_HOT_DATA)->segno;
566 left_blocks = sbi->blocks_per_seg -
567 get_seg_entry(sbi, segno)->ckpt_valid_blocks;
568 if (dent_blocks > left_blocks)
569 return false;
570 return true;
571 }
572
has_not_enough_free_secs(struct f2fs_sb_info * sbi,int freed,int needed)573 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi,
574 int freed, int needed)
575 {
576 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
577 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
578 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
579
580 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
581 return false;
582
583 if (free_sections(sbi) + freed == reserved_sections(sbi) + needed &&
584 has_curseg_enough_space(sbi))
585 return false;
586 return (free_sections(sbi) + freed) <=
587 (node_secs + 2 * dent_secs + imeta_secs +
588 reserved_sections(sbi) + needed);
589 }
590
f2fs_is_checkpoint_ready(struct f2fs_sb_info * sbi)591 static inline bool f2fs_is_checkpoint_ready(struct f2fs_sb_info *sbi)
592 {
593 if (likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
594 return true;
595 if (likely(!has_not_enough_free_secs(sbi, 0, 0)))
596 return true;
597 return false;
598 }
599
excess_prefree_segs(struct f2fs_sb_info * sbi)600 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
601 {
602 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
603 }
604
utilization(struct f2fs_sb_info * sbi)605 static inline int utilization(struct f2fs_sb_info *sbi)
606 {
607 return div_u64((u64)valid_user_blocks(sbi) * 100,
608 sbi->user_block_count);
609 }
610
611 /*
612 * Sometimes f2fs may be better to drop out-of-place update policy.
613 * And, users can control the policy through sysfs entries.
614 * There are five policies with triggering conditions as follows.
615 * F2FS_IPU_FORCE - all the time,
616 * F2FS_IPU_SSR - if SSR mode is activated,
617 * F2FS_IPU_UTIL - if FS utilization is over threashold,
618 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
619 * threashold,
620 * F2FS_IPU_FSYNC - activated in fsync path only for high performance flash
621 * storages. IPU will be triggered only if the # of dirty
622 * pages over min_fsync_blocks.
623 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
624 */
625 #define DEF_MIN_IPU_UTIL 70
626 #define DEF_MIN_FSYNC_BLOCKS 8
627 #define DEF_MIN_HOT_BLOCKS 16
628
629 #define SMALL_VOLUME_SEGMENTS (16 * 512) /* 16GB */
630
631 enum {
632 F2FS_IPU_FORCE,
633 F2FS_IPU_SSR,
634 F2FS_IPU_UTIL,
635 F2FS_IPU_SSR_UTIL,
636 F2FS_IPU_FSYNC,
637 F2FS_IPU_ASYNC,
638 };
639
curseg_segno(struct f2fs_sb_info * sbi,int type)640 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
641 int type)
642 {
643 struct curseg_info *curseg = CURSEG_I(sbi, type);
644 return curseg->segno;
645 }
646
curseg_alloc_type(struct f2fs_sb_info * sbi,int type)647 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
648 int type)
649 {
650 struct curseg_info *curseg = CURSEG_I(sbi, type);
651 return curseg->alloc_type;
652 }
653
curseg_blkoff(struct f2fs_sb_info * sbi,int type)654 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
655 {
656 struct curseg_info *curseg = CURSEG_I(sbi, type);
657 return curseg->next_blkoff;
658 }
659
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)660 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
661 {
662 f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
663 }
664
verify_fio_blkaddr(struct f2fs_io_info * fio)665 static inline void verify_fio_blkaddr(struct f2fs_io_info *fio)
666 {
667 struct f2fs_sb_info *sbi = fio->sbi;
668
669 if (__is_valid_data_blkaddr(fio->old_blkaddr))
670 verify_blkaddr(sbi, fio->old_blkaddr, __is_meta_io(fio) ?
671 META_GENERIC : DATA_GENERIC);
672 verify_blkaddr(sbi, fio->new_blkaddr, __is_meta_io(fio) ?
673 META_GENERIC : DATA_GENERIC_ENHANCE);
674 }
675
676 /*
677 * Summary block is always treated as an invalid block
678 */
check_block_count(struct f2fs_sb_info * sbi,int segno,struct f2fs_sit_entry * raw_sit)679 static inline int check_block_count(struct f2fs_sb_info *sbi,
680 int segno, struct f2fs_sit_entry *raw_sit)
681 {
682 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
683 int valid_blocks = 0;
684 int cur_pos = 0, next_pos;
685
686 /* check bitmap with valid block count */
687 do {
688 if (is_valid) {
689 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
690 sbi->blocks_per_seg,
691 cur_pos);
692 valid_blocks += next_pos - cur_pos;
693 } else
694 next_pos = find_next_bit_le(&raw_sit->valid_map,
695 sbi->blocks_per_seg,
696 cur_pos);
697 cur_pos = next_pos;
698 is_valid = !is_valid;
699 } while (cur_pos < sbi->blocks_per_seg);
700
701 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
702 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
703 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
704 set_sbi_flag(sbi, SBI_NEED_FSCK);
705 return -EFSCORRUPTED;
706 }
707
708 /* check segment usage, and check boundary of a given segment number */
709 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
710 || segno > TOTAL_SEGS(sbi) - 1)) {
711 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
712 GET_SIT_VBLOCKS(raw_sit), segno);
713 set_sbi_flag(sbi, SBI_NEED_FSCK);
714 return -EFSCORRUPTED;
715 }
716 return 0;
717 }
718
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int start)719 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
720 unsigned int start)
721 {
722 struct sit_info *sit_i = SIT_I(sbi);
723 unsigned int offset = SIT_BLOCK_OFFSET(start);
724 block_t blk_addr = sit_i->sit_base_addr + offset;
725
726 check_seg_range(sbi, start);
727
728 #ifdef CONFIG_F2FS_CHECK_FS
729 if (f2fs_test_bit(offset, sit_i->sit_bitmap) !=
730 f2fs_test_bit(offset, sit_i->sit_bitmap_mir))
731 f2fs_bug_on(sbi, 1);
732 #endif
733
734 /* calculate sit block address */
735 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
736 blk_addr += sit_i->sit_blocks;
737
738 return blk_addr;
739 }
740
next_sit_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)741 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
742 pgoff_t block_addr)
743 {
744 struct sit_info *sit_i = SIT_I(sbi);
745 block_addr -= sit_i->sit_base_addr;
746 if (block_addr < sit_i->sit_blocks)
747 block_addr += sit_i->sit_blocks;
748 else
749 block_addr -= sit_i->sit_blocks;
750
751 return block_addr + sit_i->sit_base_addr;
752 }
753
set_to_next_sit(struct sit_info * sit_i,unsigned int start)754 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
755 {
756 unsigned int block_off = SIT_BLOCK_OFFSET(start);
757
758 f2fs_change_bit(block_off, sit_i->sit_bitmap);
759 #ifdef CONFIG_F2FS_CHECK_FS
760 f2fs_change_bit(block_off, sit_i->sit_bitmap_mir);
761 #endif
762 }
763
get_mtime(struct f2fs_sb_info * sbi,bool base_time)764 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi,
765 bool base_time)
766 {
767 struct sit_info *sit_i = SIT_I(sbi);
768 time64_t diff, now = ktime_get_real_seconds();
769
770 if (now >= sit_i->mounted_time)
771 return sit_i->elapsed_time + now - sit_i->mounted_time;
772
773 /* system time is set to the past */
774 if (!base_time) {
775 diff = sit_i->mounted_time - now;
776 if (sit_i->elapsed_time >= diff)
777 return sit_i->elapsed_time - diff;
778 return 0;
779 }
780 return sit_i->elapsed_time;
781 }
782
set_summary(struct f2fs_summary * sum,nid_t nid,unsigned int ofs_in_node,unsigned char version)783 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
784 unsigned int ofs_in_node, unsigned char version)
785 {
786 sum->nid = cpu_to_le32(nid);
787 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
788 sum->version = version;
789 }
790
start_sum_block(struct f2fs_sb_info * sbi)791 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
792 {
793 return __start_cp_addr(sbi) +
794 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
795 }
796
sum_blk_addr(struct f2fs_sb_info * sbi,int base,int type)797 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
798 {
799 return __start_cp_addr(sbi) +
800 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
801 - (base + 1) + type;
802 }
803
sec_usage_check(struct f2fs_sb_info * sbi,unsigned int secno)804 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
805 {
806 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
807 return true;
808 return false;
809 }
810
811 /*
812 * It is very important to gather dirty pages and write at once, so that we can
813 * submit a big bio without interfering other data writes.
814 * By default, 512 pages for directory data,
815 * 512 pages (2MB) * 8 for nodes, and
816 * 256 pages * 8 for meta are set.
817 */
nr_pages_to_skip(struct f2fs_sb_info * sbi,int type)818 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
819 {
820 if (sbi->sb->s_bdi->wb.dirty_exceeded)
821 return 0;
822
823 if (type == DATA)
824 return sbi->blocks_per_seg;
825 else if (type == NODE)
826 return 8 * sbi->blocks_per_seg;
827 else if (type == META)
828 return 8 * BIO_MAX_PAGES;
829 else
830 return 0;
831 }
832
833 /*
834 * When writing pages, it'd better align nr_to_write for segment size.
835 */
nr_pages_to_write(struct f2fs_sb_info * sbi,int type,struct writeback_control * wbc)836 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
837 struct writeback_control *wbc)
838 {
839 long nr_to_write, desired;
840
841 if (wbc->sync_mode != WB_SYNC_NONE)
842 return 0;
843
844 nr_to_write = wbc->nr_to_write;
845 desired = BIO_MAX_PAGES;
846 if (type == NODE)
847 desired <<= 1;
848
849 wbc->nr_to_write = desired;
850 return desired - nr_to_write;
851 }
852
wake_up_discard_thread(struct f2fs_sb_info * sbi,bool force)853 static inline void wake_up_discard_thread(struct f2fs_sb_info *sbi, bool force)
854 {
855 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
856 bool wakeup = false;
857 int i;
858
859 if (force)
860 goto wake_up;
861
862 mutex_lock(&dcc->cmd_lock);
863 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
864 if (i + 1 < dcc->discard_granularity)
865 break;
866 if (!list_empty(&dcc->pend_list[i])) {
867 wakeup = true;
868 break;
869 }
870 }
871 mutex_unlock(&dcc->cmd_lock);
872 if (!wakeup || !is_idle(sbi, DISCARD_TIME))
873 return;
874 wake_up:
875 dcc->discard_wake = 1;
876 wake_up_interruptible_all(&dcc->discard_wait_queue);
877 }
878