• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * fs/f2fs/f2fs.h
4  *
5  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6  *             http://www.samsung.com/
7  */
8 #ifndef _LINUX_F2FS_H
9 #define _LINUX_F2FS_H
10 
11 #include <linux/uio.h>
12 #include <linux/types.h>
13 #include <linux/page-flags.h>
14 #include <linux/buffer_head.h>
15 #include <linux/slab.h>
16 #include <linux/crc32.h>
17 #include <linux/magic.h>
18 #include <linux/kobject.h>
19 #include <linux/sched.h>
20 #include <linux/cred.h>
21 #include <linux/vmalloc.h>
22 #include <linux/bio.h>
23 #include <linux/blkdev.h>
24 #include <linux/quotaops.h>
25 #include <linux/part_stat.h>
26 #include <crypto/hash.h>
27 
28 #include <linux/fscrypt.h>
29 #include <linux/fsverity.h>
30 
31 #ifdef CONFIG_F2FS_CHECK_FS
32 #define f2fs_bug_on(sbi, condition)	BUG_ON(condition)
33 #else
34 #define f2fs_bug_on(sbi, condition)					\
35 	do {								\
36 		if (unlikely(condition)) {				\
37 			WARN_ON(1);					\
38 			set_sbi_flag(sbi, SBI_NEED_FSCK);		\
39 		}							\
40 	} while (0)
41 #endif
42 
43 enum {
44 	FAULT_KMALLOC,
45 	FAULT_KVMALLOC,
46 	FAULT_PAGE_ALLOC,
47 	FAULT_PAGE_GET,
48 	FAULT_ALLOC_BIO,
49 	FAULT_ALLOC_NID,
50 	FAULT_ORPHAN,
51 	FAULT_BLOCK,
52 	FAULT_DIR_DEPTH,
53 	FAULT_EVICT_INODE,
54 	FAULT_TRUNCATE,
55 	FAULT_READ_IO,
56 	FAULT_CHECKPOINT,
57 	FAULT_DISCARD,
58 	FAULT_WRITE_IO,
59 	FAULT_MAX,
60 };
61 
62 #ifdef CONFIG_F2FS_FAULT_INJECTION
63 #define F2FS_ALL_FAULT_TYPE		((1 << FAULT_MAX) - 1)
64 
65 struct f2fs_fault_info {
66 	atomic_t inject_ops;
67 	unsigned int inject_rate;
68 	unsigned int inject_type;
69 };
70 
71 extern const char *f2fs_fault_name[FAULT_MAX];
72 #define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
73 #endif
74 
75 /*
76  * For mount options
77  */
78 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD	0x00000002
79 #define F2FS_MOUNT_DISCARD		0x00000004
80 #define F2FS_MOUNT_NOHEAP		0x00000008
81 #define F2FS_MOUNT_XATTR_USER		0x00000010
82 #define F2FS_MOUNT_POSIX_ACL		0x00000020
83 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY	0x00000040
84 #define F2FS_MOUNT_INLINE_XATTR		0x00000080
85 #define F2FS_MOUNT_INLINE_DATA		0x00000100
86 #define F2FS_MOUNT_INLINE_DENTRY	0x00000200
87 #define F2FS_MOUNT_FLUSH_MERGE		0x00000400
88 #define F2FS_MOUNT_NOBARRIER		0x00000800
89 #define F2FS_MOUNT_FASTBOOT		0x00001000
90 #define F2FS_MOUNT_EXTENT_CACHE		0x00002000
91 #define F2FS_MOUNT_DATA_FLUSH		0x00008000
92 #define F2FS_MOUNT_FAULT_INJECTION	0x00010000
93 #define F2FS_MOUNT_USRQUOTA		0x00080000
94 #define F2FS_MOUNT_GRPQUOTA		0x00100000
95 #define F2FS_MOUNT_PRJQUOTA		0x00200000
96 #define F2FS_MOUNT_QUOTA		0x00400000
97 #define F2FS_MOUNT_INLINE_XATTR_SIZE	0x00800000
98 #define F2FS_MOUNT_RESERVE_ROOT		0x01000000
99 #define F2FS_MOUNT_DISABLE_CHECKPOINT	0x02000000
100 #define F2FS_MOUNT_NORECOVERY		0x04000000
101 #define F2FS_MOUNT_ATGC			0x08000000
102 #define	F2FS_MOUNT_GC_MERGE		0x20000000
103 
104 #define F2FS_OPTION(sbi)	((sbi)->mount_opt)
105 #define clear_opt(sbi, option)	(F2FS_OPTION(sbi).opt &= ~F2FS_MOUNT_##option)
106 #define set_opt(sbi, option)	(F2FS_OPTION(sbi).opt |= F2FS_MOUNT_##option)
107 #define test_opt(sbi, option)	(F2FS_OPTION(sbi).opt & F2FS_MOUNT_##option)
108 
109 #define ver_after(a, b)	(typecheck(unsigned long long, a) &&		\
110 		typecheck(unsigned long long, b) &&			\
111 		((long long)((a) - (b)) > 0))
112 
113 typedef u32 block_t;	/*
114 			 * should not change u32, since it is the on-disk block
115 			 * address format, __le32.
116 			 */
117 typedef u32 nid_t;
118 
119 #define COMPRESS_EXT_NUM		16
120 
121 struct f2fs_mount_info {
122 	unsigned int opt;
123 	int write_io_size_bits;		/* Write IO size bits */
124 	block_t root_reserved_blocks;	/* root reserved blocks */
125 	kuid_t s_resuid;		/* reserved blocks for uid */
126 	kgid_t s_resgid;		/* reserved blocks for gid */
127 	int active_logs;		/* # of active logs */
128 	int inline_xattr_size;		/* inline xattr size */
129 #ifdef CONFIG_F2FS_FAULT_INJECTION
130 	struct f2fs_fault_info fault_info;	/* For fault injection */
131 #endif
132 #ifdef CONFIG_QUOTA
133 	/* Names of quota files with journalled quota */
134 	char *s_qf_names[MAXQUOTAS];
135 	int s_jquota_fmt;			/* Format of quota to use */
136 #endif
137 	/* For which write hints are passed down to block layer */
138 	int whint_mode;
139 	int alloc_mode;			/* segment allocation policy */
140 	int fsync_mode;			/* fsync policy */
141 	int fs_mode;			/* fs mode: LFS or ADAPTIVE */
142 	int bggc_mode;			/* bggc mode: off, on or sync */
143 	struct fscrypt_dummy_policy dummy_enc_policy; /* test dummy encryption */
144 	block_t unusable_cap_perc;	/* percentage for cap */
145 	block_t unusable_cap;		/* Amount of space allowed to be
146 					 * unusable when disabling checkpoint
147 					 */
148 
149 	/* For compression */
150 	unsigned char compress_algorithm;	/* algorithm type */
151 	unsigned compress_log_size;		/* cluster log size */
152 	unsigned char compress_ext_cnt;		/* extension count */
153 	unsigned char extensions[COMPRESS_EXT_NUM][F2FS_EXTENSION_LEN];	/* extensions */
154 };
155 
156 #define F2FS_FEATURE_ENCRYPT		0x0001
157 #define F2FS_FEATURE_BLKZONED		0x0002
158 #define F2FS_FEATURE_ATOMIC_WRITE	0x0004
159 #define F2FS_FEATURE_EXTRA_ATTR		0x0008
160 #define F2FS_FEATURE_PRJQUOTA		0x0010
161 #define F2FS_FEATURE_INODE_CHKSUM	0x0020
162 #define F2FS_FEATURE_FLEXIBLE_INLINE_XATTR	0x0040
163 #define F2FS_FEATURE_QUOTA_INO		0x0080
164 #define F2FS_FEATURE_INODE_CRTIME	0x0100
165 #define F2FS_FEATURE_LOST_FOUND		0x0200
166 #define F2FS_FEATURE_VERITY		0x0400
167 #define F2FS_FEATURE_SB_CHKSUM		0x0800
168 #define F2FS_FEATURE_CASEFOLD		0x1000
169 #define F2FS_FEATURE_COMPRESSION	0x2000
170 
171 #define __F2FS_HAS_FEATURE(raw_super, mask)				\
172 	((raw_super->feature & cpu_to_le32(mask)) != 0)
173 #define F2FS_HAS_FEATURE(sbi, mask)	__F2FS_HAS_FEATURE(sbi->raw_super, mask)
174 #define F2FS_SET_FEATURE(sbi, mask)					\
175 	(sbi->raw_super->feature |= cpu_to_le32(mask))
176 #define F2FS_CLEAR_FEATURE(sbi, mask)					\
177 	(sbi->raw_super->feature &= ~cpu_to_le32(mask))
178 
179 /*
180  * Default values for user and/or group using reserved blocks
181  */
182 #define	F2FS_DEF_RESUID		0
183 #define	F2FS_DEF_RESGID		0
184 
185 /*
186  * For checkpoint manager
187  */
188 enum {
189 	NAT_BITMAP,
190 	SIT_BITMAP
191 };
192 
193 #define	CP_UMOUNT	0x00000001
194 #define	CP_FASTBOOT	0x00000002
195 #define	CP_SYNC		0x00000004
196 #define	CP_RECOVERY	0x00000008
197 #define	CP_DISCARD	0x00000010
198 #define CP_TRIMMED	0x00000020
199 #define CP_PAUSE	0x00000040
200 #define CP_RESIZE 	0x00000080
201 
202 #define MAX_DISCARD_BLOCKS(sbi)		BLKS_PER_SEC(sbi)
203 #define DEF_MAX_DISCARD_REQUEST		8	/* issue 8 discards per round */
204 #define DEF_MIN_DISCARD_ISSUE_TIME	50	/* 50 ms, if exists */
205 #define DEF_MID_DISCARD_ISSUE_TIME	500	/* 500 ms, if device busy */
206 #define DEF_MAX_DISCARD_ISSUE_TIME	60000	/* 60 s, if no candidates */
207 #define DEF_DISCARD_URGENT_UTIL		80	/* do more discard over 80% */
208 #define DEF_CP_INTERVAL			60	/* 60 secs */
209 #define DEF_IDLE_INTERVAL		5	/* 5 secs */
210 #define DEF_DISABLE_INTERVAL		5	/* 5 secs */
211 #define DEF_DISABLE_QUICK_INTERVAL	1	/* 1 secs */
212 #define DEF_UMOUNT_DISCARD_TIMEOUT	5	/* 5 secs */
213 
214 struct cp_control {
215 	int reason;
216 	__u64 trim_start;
217 	__u64 trim_end;
218 	__u64 trim_minlen;
219 };
220 
221 /*
222  * indicate meta/data type
223  */
224 enum {
225 	META_CP,
226 	META_NAT,
227 	META_SIT,
228 	META_SSA,
229 	META_MAX,
230 	META_POR,
231 	DATA_GENERIC,		/* check range only */
232 	DATA_GENERIC_ENHANCE,	/* strong check on range and segment bitmap */
233 	DATA_GENERIC_ENHANCE_READ,	/*
234 					 * strong check on range and segment
235 					 * bitmap but no warning due to race
236 					 * condition of read on truncated area
237 					 * by extent_cache
238 					 */
239 	META_GENERIC,
240 };
241 
242 /* for the list of ino */
243 enum {
244 	ORPHAN_INO,		/* for orphan ino list */
245 	APPEND_INO,		/* for append ino list */
246 	UPDATE_INO,		/* for update ino list */
247 	TRANS_DIR_INO,		/* for trasactions dir ino list */
248 	FLUSH_INO,		/* for multiple device flushing */
249 	MAX_INO_ENTRY,		/* max. list */
250 };
251 
252 struct ino_entry {
253 	struct list_head list;		/* list head */
254 	nid_t ino;			/* inode number */
255 	unsigned int dirty_device;	/* dirty device bitmap */
256 };
257 
258 /* for the list of inodes to be GCed */
259 struct inode_entry {
260 	struct list_head list;	/* list head */
261 	struct inode *inode;	/* vfs inode pointer */
262 };
263 
264 struct fsync_node_entry {
265 	struct list_head list;	/* list head */
266 	struct page *page;	/* warm node page pointer */
267 	unsigned int seq_id;	/* sequence id */
268 };
269 
270 /* for the bitmap indicate blocks to be discarded */
271 struct discard_entry {
272 	struct list_head list;	/* list head */
273 	block_t start_blkaddr;	/* start blockaddr of current segment */
274 	unsigned char discard_map[SIT_VBLOCK_MAP_SIZE];	/* segment discard bitmap */
275 };
276 
277 /* default discard granularity of inner discard thread, unit: block count */
278 #define DEFAULT_DISCARD_GRANULARITY		16
279 #define DISCARD_GRAN_BL		16
280 #define DISCARD_GRAN_BG		512
281 #define DISCARD_GRAN_FORCE	1
282 
283 /* max discard pend list number */
284 #define MAX_PLIST_NUM		512
285 #define plist_idx(blk_num)	((blk_num) >= MAX_PLIST_NUM ?		\
286 					(MAX_PLIST_NUM - 1) : ((blk_num) - 1))
287 #define FS_FREE_SPACE_PERCENT		20
288 #define DEVICE_FREE_SPACE_PERCENT	10
289 #define HUNDRED_PERCENT			100
290 
291 enum {
292 	D_PREP,			/* initial */
293 	D_PARTIAL,		/* partially submitted */
294 	D_SUBMIT,		/* all submitted */
295 	D_DONE,			/* finished */
296 };
297 
298 struct discard_info {
299 	block_t lstart;			/* logical start address */
300 	block_t len;			/* length */
301 	block_t start;			/* actual start address in dev */
302 };
303 
304 struct discard_cmd {
305 	struct rb_node rb_node;		/* rb node located in rb-tree */
306 	union {
307 		struct {
308 			block_t lstart;	/* logical start address */
309 			block_t len;	/* length */
310 			block_t start;	/* actual start address in dev */
311 		};
312 		struct discard_info di;	/* discard info */
313 
314 	};
315 	struct list_head list;		/* command list */
316 	struct completion wait;		/* compleation */
317 	struct block_device *bdev;	/* bdev */
318 	unsigned short ref;		/* reference count */
319 	unsigned char state;		/* state */
320 	unsigned char queued;		/* queued discard */
321 	int error;			/* bio error */
322 	spinlock_t lock;		/* for state/bio_ref updating */
323 	unsigned short bio_ref;		/* bio reference count */
324 };
325 
326 enum {
327 	DPOLICY_BG,
328 	DPOLICY_BALANCE,
329 	DPOLICY_FORCE,
330 	DPOLICY_FSTRIM,
331 	DPOLICY_UMOUNT,
332 	MAX_DPOLICY,
333 };
334 
335 enum {
336 	SUB_POLICY_BIG,
337 	SUB_POLICY_MID,
338 	SUB_POLICY_SMALL,
339 	NR_SUB_POLICY,
340 };
341 
342 struct discard_sub_policy {
343 	unsigned int max_requests;
344 	int interval;
345 };
346 
347 struct discard_policy {
348 	int type;			/* type of discard */
349 	unsigned int min_interval;	/* used for candidates exist */
350 	unsigned int mid_interval;	/* used for device busy */
351 	unsigned int max_interval;	/* used for candidates not exist */
352 	unsigned int io_aware_gran;	/* minimum granularity discard not be aware of I/O */
353 	bool io_aware;			/* issue discard in idle time */
354 	bool sync;			/* submit discard with REQ_SYNC flag */
355 	bool ordered;			/* issue discard by lba order */
356 	bool timeout;			/* discard timeout for put_super */
357 	unsigned int granularity;	/* discard granularity */
358 	struct discard_sub_policy sub_policy[NR_SUB_POLICY];
359 };
360 
361 struct discard_cmd_control {
362 	struct task_struct *f2fs_issue_discard;	/* discard thread */
363 	struct list_head entry_list;		/* 4KB discard entry list */
364 	struct list_head pend_list[MAX_PLIST_NUM];/* store pending entries */
365 	struct list_head wait_list;		/* store on-flushing entries */
366 	struct list_head fstrim_list;		/* in-flight discard from fstrim */
367 	wait_queue_head_t discard_wait_queue;	/* waiting queue for wake-up */
368 	unsigned int discard_wake;		/* to wake up discard thread */
369 	struct mutex cmd_lock;
370 	unsigned int nr_discards;		/* # of discards in the list */
371 	unsigned int max_discards;		/* max. discards to be issued */
372 	unsigned int discard_granularity;	/* discard granularity */
373 	unsigned int undiscard_blks;		/* # of undiscard blocks */
374 	unsigned int next_pos;			/* next discard position */
375 	atomic_t issued_discard;		/* # of issued discard */
376 	atomic_t queued_discard;		/* # of queued discard */
377 	atomic_t discard_cmd_cnt;		/* # of cached cmd count */
378 	struct rb_root_cached root;		/* root of discard rb-tree */
379 	bool rbtree_check;			/* config for consistence check */
380 	int discard_type;                       /* discard type */
381 };
382 
383 /* for the list of fsync inodes, used only during recovery */
384 struct fsync_inode_entry {
385 	struct list_head list;	/* list head */
386 	struct inode *inode;	/* vfs inode pointer */
387 	block_t blkaddr;	/* block address locating the last fsync */
388 	block_t last_dentry;	/* block address locating the last dentry */
389 };
390 
391 #define nats_in_cursum(jnl)		(le16_to_cpu((jnl)->n_nats))
392 #define sits_in_cursum(jnl)		(le16_to_cpu((jnl)->n_sits))
393 
394 #define nat_in_journal(jnl, i)		((jnl)->nat_j.entries[i].ne)
395 #define nid_in_journal(jnl, i)		((jnl)->nat_j.entries[i].nid)
396 #define sit_in_journal(jnl, i)		((jnl)->sit_j.entries[i].se)
397 #define segno_in_journal(jnl, i)	((jnl)->sit_j.entries[i].segno)
398 
399 #define MAX_NAT_JENTRIES(jnl)	(NAT_JOURNAL_ENTRIES - nats_in_cursum(jnl))
400 #define MAX_SIT_JENTRIES(jnl)	(SIT_JOURNAL_ENTRIES - sits_in_cursum(jnl))
401 
update_nats_in_cursum(struct f2fs_journal * journal,int i)402 static inline int update_nats_in_cursum(struct f2fs_journal *journal, int i)
403 {
404 	int before = nats_in_cursum(journal);
405 
406 	journal->n_nats = cpu_to_le16(before + i);
407 	return before;
408 }
409 
update_sits_in_cursum(struct f2fs_journal * journal,int i)410 static inline int update_sits_in_cursum(struct f2fs_journal *journal, int i)
411 {
412 	int before = sits_in_cursum(journal);
413 
414 	journal->n_sits = cpu_to_le16(before + i);
415 	return before;
416 }
417 
__has_cursum_space(struct f2fs_journal * journal,int size,int type)418 static inline bool __has_cursum_space(struct f2fs_journal *journal,
419 							int size, int type)
420 {
421 	if (type == NAT_JOURNAL)
422 		return size <= MAX_NAT_JENTRIES(journal);
423 	return size <= MAX_SIT_JENTRIES(journal);
424 }
425 
426 /* for inline stuff */
427 #define DEF_INLINE_RESERVED_SIZE	1
428 static inline int get_extra_isize(struct inode *inode);
429 static inline int get_inline_xattr_addrs(struct inode *inode);
430 #define MAX_INLINE_DATA(inode)	(sizeof(__le32) *			\
431 				(CUR_ADDRS_PER_INODE(inode) -		\
432 				get_inline_xattr_addrs(inode) -	\
433 				DEF_INLINE_RESERVED_SIZE))
434 
435 /* for inline dir */
436 #define NR_INLINE_DENTRY(inode)	(MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
437 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
438 				BITS_PER_BYTE + 1))
439 #define INLINE_DENTRY_BITMAP_SIZE(inode) \
440 	DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
441 #define INLINE_RESERVED_SIZE(inode)	(MAX_INLINE_DATA(inode) - \
442 				((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
443 				NR_INLINE_DENTRY(inode) + \
444 				INLINE_DENTRY_BITMAP_SIZE(inode)))
445 
446 /*
447  * For INODE and NODE manager
448  */
449 /* for directory operations */
450 
451 struct f2fs_filename {
452 	/*
453 	 * The filename the user specified.  This is NULL for some
454 	 * filesystem-internal operations, e.g. converting an inline directory
455 	 * to a non-inline one, or roll-forward recovering an encrypted dentry.
456 	 */
457 	const struct qstr *usr_fname;
458 
459 	/*
460 	 * The on-disk filename.  For encrypted directories, this is encrypted.
461 	 * This may be NULL for lookups in an encrypted dir without the key.
462 	 */
463 	struct fscrypt_str disk_name;
464 
465 	/* The dirhash of this filename */
466 	f2fs_hash_t hash;
467 
468 #ifdef CONFIG_FS_ENCRYPTION
469 	/*
470 	 * For lookups in encrypted directories: either the buffer backing
471 	 * disk_name, or a buffer that holds the decoded no-key name.
472 	 */
473 	struct fscrypt_str crypto_buf;
474 #endif
475 #ifdef CONFIG_UNICODE
476 	/*
477 	 * For casefolded directories: the casefolded name, but it's left NULL
478 	 * if the original name is not valid Unicode or if the filesystem is
479 	 * doing an internal operation where usr_fname is also NULL.  In these
480 	 * cases we fall back to treating the name as an opaque byte sequence.
481 	 */
482 	struct fscrypt_str cf_name;
483 #endif
484 };
485 
486 struct f2fs_dentry_ptr {
487 	struct inode *inode;
488 	void *bitmap;
489 	struct f2fs_dir_entry *dentry;
490 	__u8 (*filename)[F2FS_SLOT_LEN];
491 	int max;
492 	int nr_bitmap;
493 };
494 
make_dentry_ptr_block(struct inode * inode,struct f2fs_dentry_ptr * d,struct f2fs_dentry_block * t)495 static inline void make_dentry_ptr_block(struct inode *inode,
496 		struct f2fs_dentry_ptr *d, struct f2fs_dentry_block *t)
497 {
498 	d->inode = inode;
499 	d->max = NR_DENTRY_IN_BLOCK;
500 	d->nr_bitmap = SIZE_OF_DENTRY_BITMAP;
501 	d->bitmap = t->dentry_bitmap;
502 	d->dentry = t->dentry;
503 	d->filename = t->filename;
504 }
505 
make_dentry_ptr_inline(struct inode * inode,struct f2fs_dentry_ptr * d,void * t)506 static inline void make_dentry_ptr_inline(struct inode *inode,
507 					struct f2fs_dentry_ptr *d, void *t)
508 {
509 	int entry_cnt = NR_INLINE_DENTRY(inode);
510 	int bitmap_size = INLINE_DENTRY_BITMAP_SIZE(inode);
511 	int reserved_size = INLINE_RESERVED_SIZE(inode);
512 
513 	d->inode = inode;
514 	d->max = entry_cnt;
515 	d->nr_bitmap = bitmap_size;
516 	d->bitmap = t;
517 	d->dentry = t + bitmap_size + reserved_size;
518 	d->filename = t + bitmap_size + reserved_size +
519 					SIZE_OF_DIR_ENTRY * entry_cnt;
520 }
521 
522 /*
523  * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
524  * as its node offset to distinguish from index node blocks.
525  * But some bits are used to mark the node block.
526  */
527 #define XATTR_NODE_OFFSET	((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
528 				>> OFFSET_BIT_SHIFT)
529 enum {
530 	ALLOC_NODE,			/* allocate a new node page if needed */
531 	LOOKUP_NODE,			/* look up a node without readahead */
532 	LOOKUP_NODE_RA,			/*
533 					 * look up a node with readahead called
534 					 * by get_data_block.
535 					 */
536 };
537 
538 #define DEFAULT_RETRY_IO_COUNT	8	/* maximum retry read IO count */
539 
540 /* congestion wait timeout value, default: 20ms */
541 #define	DEFAULT_IO_TIMEOUT	(msecs_to_jiffies(20))
542 
543 /* maximum retry quota flush count */
544 #define DEFAULT_RETRY_QUOTA_FLUSH_COUNT		8
545 
546 #define F2FS_LINK_MAX	0xffffffff	/* maximum link count per file */
547 
548 #define MAX_DIR_RA_PAGES	4	/* maximum ra pages of dir */
549 
550 /* for in-memory extent cache entry */
551 #define F2FS_MIN_EXTENT_LEN	64	/* minimum extent length */
552 
553 /* number of extent info in extent cache we try to shrink */
554 #define EXTENT_CACHE_SHRINK_NUMBER	128
555 
556 struct rb_entry {
557 	struct rb_node rb_node;		/* rb node located in rb-tree */
558 	union {
559 		struct {
560 			unsigned int ofs;	/* start offset of the entry */
561 			unsigned int len;	/* length of the entry */
562 		};
563 		unsigned long long key;		/* 64-bits key */
564 	} __packed;
565 };
566 
567 struct extent_info {
568 	unsigned int fofs;		/* start offset in a file */
569 	unsigned int len;		/* length of the extent */
570 	u32 blk;			/* start block address of the extent */
571 };
572 
573 struct extent_node {
574 	struct rb_node rb_node;		/* rb node located in rb-tree */
575 	struct extent_info ei;		/* extent info */
576 	struct list_head list;		/* node in global extent list of sbi */
577 	struct extent_tree *et;		/* extent tree pointer */
578 };
579 
580 struct extent_tree {
581 	nid_t ino;			/* inode number */
582 	struct rb_root_cached root;	/* root of extent info rb-tree */
583 	struct extent_node *cached_en;	/* recently accessed extent node */
584 	struct extent_info largest;	/* largested extent info */
585 	struct list_head list;		/* to be used by sbi->zombie_list */
586 	rwlock_t lock;			/* protect extent info rb-tree */
587 	atomic_t node_cnt;		/* # of extent node in rb-tree*/
588 	bool largest_updated;		/* largest extent updated */
589 };
590 
591 /*
592  * This structure is taken from ext4_map_blocks.
593  *
594  * Note that, however, f2fs uses NEW and MAPPED flags for f2fs_map_blocks().
595  */
596 #define F2FS_MAP_NEW		(1 << BH_New)
597 #define F2FS_MAP_MAPPED		(1 << BH_Mapped)
598 #define F2FS_MAP_UNWRITTEN	(1 << BH_Unwritten)
599 #define F2FS_MAP_FLAGS		(F2FS_MAP_NEW | F2FS_MAP_MAPPED |\
600 				F2FS_MAP_UNWRITTEN)
601 
602 struct f2fs_map_blocks {
603 	block_t m_pblk;
604 	block_t m_lblk;
605 	unsigned int m_len;
606 	unsigned int m_flags;
607 	pgoff_t *m_next_pgofs;		/* point next possible non-hole pgofs */
608 	pgoff_t *m_next_extent;		/* point to next possible extent */
609 	int m_seg_type;
610 	bool m_may_create;		/* indicate it is from write path */
611 };
612 
613 /* for flag in get_data_block */
614 enum {
615 	F2FS_GET_BLOCK_DEFAULT,
616 	F2FS_GET_BLOCK_FIEMAP,
617 	F2FS_GET_BLOCK_BMAP,
618 	F2FS_GET_BLOCK_DIO,
619 	F2FS_GET_BLOCK_PRE_DIO,
620 	F2FS_GET_BLOCK_PRE_AIO,
621 	F2FS_GET_BLOCK_PRECACHE,
622 };
623 
624 /*
625  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
626  */
627 #define FADVISE_COLD_BIT	0x01
628 #define FADVISE_LOST_PINO_BIT	0x02
629 #define FADVISE_ENCRYPT_BIT	0x04
630 #define FADVISE_ENC_NAME_BIT	0x08
631 #define FADVISE_KEEP_SIZE_BIT	0x10
632 #define FADVISE_HOT_BIT		0x20
633 #define FADVISE_VERITY_BIT	0x40
634 
635 #define FADVISE_MODIFIABLE_BITS	(FADVISE_COLD_BIT | FADVISE_HOT_BIT)
636 
637 #define file_is_cold(inode)	is_file(inode, FADVISE_COLD_BIT)
638 #define file_wrong_pino(inode)	is_file(inode, FADVISE_LOST_PINO_BIT)
639 #define file_set_cold(inode)	set_file(inode, FADVISE_COLD_BIT)
640 #define file_lost_pino(inode)	set_file(inode, FADVISE_LOST_PINO_BIT)
641 #define file_clear_cold(inode)	clear_file(inode, FADVISE_COLD_BIT)
642 #define file_got_pino(inode)	clear_file(inode, FADVISE_LOST_PINO_BIT)
643 #define file_is_encrypt(inode)	is_file(inode, FADVISE_ENCRYPT_BIT)
644 #define file_set_encrypt(inode)	set_file(inode, FADVISE_ENCRYPT_BIT)
645 #define file_clear_encrypt(inode) clear_file(inode, FADVISE_ENCRYPT_BIT)
646 #define file_enc_name(inode)	is_file(inode, FADVISE_ENC_NAME_BIT)
647 #define file_set_enc_name(inode) set_file(inode, FADVISE_ENC_NAME_BIT)
648 #define file_keep_isize(inode)	is_file(inode, FADVISE_KEEP_SIZE_BIT)
649 #define file_set_keep_isize(inode) set_file(inode, FADVISE_KEEP_SIZE_BIT)
650 #define file_is_hot(inode)	is_file(inode, FADVISE_HOT_BIT)
651 #define file_set_hot(inode)	set_file(inode, FADVISE_HOT_BIT)
652 #define file_clear_hot(inode)	clear_file(inode, FADVISE_HOT_BIT)
653 #define file_is_verity(inode)	is_file(inode, FADVISE_VERITY_BIT)
654 #define file_set_verity(inode)	set_file(inode, FADVISE_VERITY_BIT)
655 
656 #define DEF_DIR_LEVEL		0
657 
658 enum {
659 	GC_FAILURE_PIN,
660 	GC_FAILURE_ATOMIC,
661 	MAX_GC_FAILURE
662 };
663 
664 /* used for f2fs_inode_info->flags */
665 enum {
666 	FI_NEW_INODE,		/* indicate newly allocated inode */
667 	FI_DIRTY_INODE,		/* indicate inode is dirty or not */
668 	FI_AUTO_RECOVER,	/* indicate inode is recoverable */
669 	FI_DIRTY_DIR,		/* indicate directory has dirty pages */
670 	FI_INC_LINK,		/* need to increment i_nlink */
671 	FI_ACL_MODE,		/* indicate acl mode */
672 	FI_NO_ALLOC,		/* should not allocate any blocks */
673 	FI_FREE_NID,		/* free allocated nide */
674 	FI_NO_EXTENT,		/* not to use the extent cache */
675 	FI_INLINE_XATTR,	/* used for inline xattr */
676 	FI_INLINE_DATA,		/* used for inline data*/
677 	FI_INLINE_DENTRY,	/* used for inline dentry */
678 	FI_APPEND_WRITE,	/* inode has appended data */
679 	FI_UPDATE_WRITE,	/* inode has in-place-update data */
680 	FI_NEED_IPU,		/* used for ipu per file */
681 	FI_ATOMIC_FILE,		/* indicate atomic file */
682 	FI_ATOMIC_COMMIT,	/* indicate the state of atomical committing */
683 	FI_VOLATILE_FILE,	/* indicate volatile file */
684 	FI_FIRST_BLOCK_WRITTEN,	/* indicate #0 data block was written */
685 	FI_DROP_CACHE,		/* drop dirty page cache */
686 	FI_DATA_EXIST,		/* indicate data exists */
687 	FI_INLINE_DOTS,		/* indicate inline dot dentries */
688 	FI_DO_DEFRAG,		/* indicate defragment is running */
689 	FI_DIRTY_FILE,		/* indicate regular/symlink has dirty pages */
690 	FI_NO_PREALLOC,		/* indicate skipped preallocated blocks */
691 	FI_HOT_DATA,		/* indicate file is hot */
692 	FI_EXTRA_ATTR,		/* indicate file has extra attribute */
693 	FI_PROJ_INHERIT,	/* indicate file inherits projectid */
694 	FI_PIN_FILE,		/* indicate file should not be gced */
695 	FI_ATOMIC_REVOKE_REQUEST, /* request to drop atomic data */
696 	FI_VERITY_IN_PROGRESS,	/* building fs-verity Merkle tree */
697 	FI_COMPRESSED_FILE,	/* indicate file's data can be compressed */
698 	FI_MMAP_FILE,		/* indicate file was mmapped */
699 	FI_MAX,			/* max flag, never be used */
700 };
701 
702 struct f2fs_inode_info {
703 	struct inode vfs_inode;		/* serve a vfs inode */
704 	unsigned long i_flags;		/* keep an inode flags for ioctl */
705 	unsigned char i_advise;		/* use to give file attribute hints */
706 	unsigned char i_dir_level;	/* use for dentry level for large dir */
707 	unsigned int i_current_depth;	/* only for directory depth */
708 	/* for gc failure statistic */
709 	unsigned int i_gc_failures[MAX_GC_FAILURE];
710 	unsigned int i_pino;		/* parent inode number */
711 	umode_t i_acl_mode;		/* keep file acl mode temporarily */
712 
713 	/* Use below internally in f2fs*/
714 	unsigned long flags[BITS_TO_LONGS(FI_MAX)];	/* use to pass per-file flags */
715 	struct rw_semaphore i_sem;	/* protect fi info */
716 	atomic_t dirty_pages;		/* # of dirty pages */
717 	f2fs_hash_t chash;		/* hash value of given file name */
718 	unsigned int clevel;		/* maximum level of given file name */
719 	struct task_struct *task;	/* lookup and create consistency */
720 	struct task_struct *cp_task;	/* separate cp/wb IO stats*/
721 	nid_t i_xattr_nid;		/* node id that contains xattrs */
722 	loff_t	last_disk_size;		/* lastly written file size */
723 	spinlock_t i_size_lock;		/* protect last_disk_size */
724 
725 #ifdef CONFIG_QUOTA
726 	struct dquot *i_dquot[MAXQUOTAS];
727 
728 	/* quota space reservation, managed internally by quota code */
729 	qsize_t i_reserved_quota;
730 #endif
731 	struct list_head dirty_list;	/* dirty list for dirs and files */
732 	struct list_head gdirty_list;	/* linked in global dirty list */
733 	struct list_head inmem_ilist;	/* list for inmem inodes */
734 	struct list_head inmem_pages;	/* inmemory pages managed by f2fs */
735 	struct task_struct *inmem_task;	/* store inmemory task */
736 	struct mutex inmem_lock;	/* lock for inmemory pages */
737 	pgoff_t ra_offset;		/* ongoing readahead offset */
738 	struct extent_tree *extent_tree;	/* cached extent_tree entry */
739 
740 	/* avoid racing between foreground op and gc */
741 	struct rw_semaphore i_gc_rwsem[2];
742 	struct rw_semaphore i_mmap_sem;
743 	struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */
744 
745 	int i_extra_isize;		/* size of extra space located in i_addr */
746 	kprojid_t i_projid;		/* id for project quota */
747 	int i_inline_xattr_size;	/* inline xattr size */
748 	struct timespec64 i_crtime;	/* inode creation time */
749 	struct timespec64 i_disk_time[4];/* inode disk times */
750 
751 	/* for file compress */
752 	atomic_t i_compr_blocks;		/* # of compressed blocks */
753 	unsigned char i_compress_algorithm;	/* algorithm type */
754 	unsigned char i_log_cluster_size;	/* log of cluster size */
755 	unsigned int i_cluster_size;		/* cluster size */
756 };
757 
get_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)758 static inline void get_extent_info(struct extent_info *ext,
759 					struct f2fs_extent *i_ext)
760 {
761 	ext->fofs = le32_to_cpu(i_ext->fofs);
762 	ext->blk = le32_to_cpu(i_ext->blk);
763 	ext->len = le32_to_cpu(i_ext->len);
764 }
765 
set_raw_extent(struct extent_info * ext,struct f2fs_extent * i_ext)766 static inline void set_raw_extent(struct extent_info *ext,
767 					struct f2fs_extent *i_ext)
768 {
769 	i_ext->fofs = cpu_to_le32(ext->fofs);
770 	i_ext->blk = cpu_to_le32(ext->blk);
771 	i_ext->len = cpu_to_le32(ext->len);
772 }
773 
set_extent_info(struct extent_info * ei,unsigned int fofs,u32 blk,unsigned int len)774 static inline void set_extent_info(struct extent_info *ei, unsigned int fofs,
775 						u32 blk, unsigned int len)
776 {
777 	ei->fofs = fofs;
778 	ei->blk = blk;
779 	ei->len = len;
780 }
781 
__is_discard_mergeable(struct discard_info * back,struct discard_info * front,unsigned int max_len)782 static inline bool __is_discard_mergeable(struct discard_info *back,
783 			struct discard_info *front, unsigned int max_len)
784 {
785 	return (back->lstart + back->len == front->lstart) &&
786 		(back->len + front->len <= max_len);
787 }
788 
__is_discard_back_mergeable(struct discard_info * cur,struct discard_info * back,unsigned int max_len)789 static inline bool __is_discard_back_mergeable(struct discard_info *cur,
790 			struct discard_info *back, unsigned int max_len)
791 {
792 	return __is_discard_mergeable(back, cur, max_len);
793 }
794 
__is_discard_front_mergeable(struct discard_info * cur,struct discard_info * front,unsigned int max_len)795 static inline bool __is_discard_front_mergeable(struct discard_info *cur,
796 			struct discard_info *front, unsigned int max_len)
797 {
798 	return __is_discard_mergeable(cur, front, max_len);
799 }
800 
__is_extent_mergeable(struct extent_info * back,struct extent_info * front)801 static inline bool __is_extent_mergeable(struct extent_info *back,
802 						struct extent_info *front)
803 {
804 	return (back->fofs + back->len == front->fofs &&
805 			back->blk + back->len == front->blk);
806 }
807 
__is_back_mergeable(struct extent_info * cur,struct extent_info * back)808 static inline bool __is_back_mergeable(struct extent_info *cur,
809 						struct extent_info *back)
810 {
811 	return __is_extent_mergeable(back, cur);
812 }
813 
__is_front_mergeable(struct extent_info * cur,struct extent_info * front)814 static inline bool __is_front_mergeable(struct extent_info *cur,
815 						struct extent_info *front)
816 {
817 	return __is_extent_mergeable(cur, front);
818 }
819 
820 extern void f2fs_mark_inode_dirty_sync(struct inode *inode, bool sync);
__try_update_largest_extent(struct extent_tree * et,struct extent_node * en)821 static inline void __try_update_largest_extent(struct extent_tree *et,
822 						struct extent_node *en)
823 {
824 	if (en->ei.len > et->largest.len) {
825 		et->largest = en->ei;
826 		et->largest_updated = true;
827 	}
828 }
829 
830 /*
831  * For free nid management
832  */
833 enum nid_state {
834 	FREE_NID,		/* newly added to free nid list */
835 	PREALLOC_NID,		/* it is preallocated */
836 	MAX_NID_STATE,
837 };
838 
839 enum nat_state {
840 	TOTAL_NAT,
841 	DIRTY_NAT,
842 	RECLAIMABLE_NAT,
843 	MAX_NAT_STATE,
844 };
845 
846 struct f2fs_nm_info {
847 	block_t nat_blkaddr;		/* base disk address of NAT */
848 	nid_t max_nid;			/* maximum possible node ids */
849 	nid_t available_nids;		/* # of available node ids */
850 	nid_t next_scan_nid;		/* the next nid to be scanned */
851 	unsigned int ram_thresh;	/* control the memory footprint */
852 	unsigned int ra_nid_pages;	/* # of nid pages to be readaheaded */
853 	unsigned int dirty_nats_ratio;	/* control dirty nats ratio threshold */
854 
855 	/* NAT cache management */
856 	struct radix_tree_root nat_root;/* root of the nat entry cache */
857 	struct radix_tree_root nat_set_root;/* root of the nat set cache */
858 	struct rw_semaphore nat_tree_lock;	/* protect nat_tree_lock */
859 	struct list_head nat_entries;	/* cached nat entry list (clean) */
860 	spinlock_t nat_list_lock;	/* protect clean nat entry list */
861 	unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */
862 	unsigned int nat_blocks;	/* # of nat blocks */
863 
864 	/* free node ids management */
865 	struct radix_tree_root free_nid_root;/* root of the free_nid cache */
866 	struct list_head free_nid_list;		/* list for free nids excluding preallocated nids */
867 	unsigned int nid_cnt[MAX_NID_STATE];	/* the number of free node id */
868 	spinlock_t nid_list_lock;	/* protect nid lists ops */
869 	struct mutex build_lock;	/* lock for build free nids */
870 	unsigned char **free_nid_bitmap;
871 	unsigned char *nat_block_bitmap;
872 	unsigned short *free_nid_count;	/* free nid count of NAT block */
873 
874 	/* for checkpoint */
875 	char *nat_bitmap;		/* NAT bitmap pointer */
876 
877 	unsigned int nat_bits_blocks;	/* # of nat bits blocks */
878 	unsigned char *nat_bits;	/* NAT bits blocks */
879 	unsigned char *full_nat_bits;	/* full NAT pages */
880 	unsigned char *empty_nat_bits;	/* empty NAT pages */
881 #ifdef CONFIG_F2FS_CHECK_FS
882 	char *nat_bitmap_mir;		/* NAT bitmap mirror */
883 #endif
884 	int bitmap_size;		/* bitmap size */
885 };
886 
887 /*
888  * this structure is used as one of function parameters.
889  * all the information are dedicated to a given direct node block determined
890  * by the data offset in a file.
891  */
892 struct dnode_of_data {
893 	struct inode *inode;		/* vfs inode pointer */
894 	struct page *inode_page;	/* its inode page, NULL is possible */
895 	struct page *node_page;		/* cached direct node page */
896 	nid_t nid;			/* node id of the direct node block */
897 	unsigned int ofs_in_node;	/* data offset in the node page */
898 	bool inode_page_locked;		/* inode page is locked or not */
899 	bool node_changed;		/* is node block changed */
900 	char cur_level;			/* level of hole node page */
901 	char max_level;			/* level of current page located */
902 	block_t	data_blkaddr;		/* block address of the node block */
903 };
904 
set_new_dnode(struct dnode_of_data * dn,struct inode * inode,struct page * ipage,struct page * npage,nid_t nid)905 static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
906 		struct page *ipage, struct page *npage, nid_t nid)
907 {
908 	memset(dn, 0, sizeof(*dn));
909 	dn->inode = inode;
910 	dn->inode_page = ipage;
911 	dn->node_page = npage;
912 	dn->nid = nid;
913 }
914 
915 /*
916  * For SIT manager
917  *
918  * By default, there are 6 active log areas across the whole main area.
919  * When considering hot and cold data separation to reduce cleaning overhead,
920  * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
921  * respectively.
922  * In the current design, you should not change the numbers intentionally.
923  * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
924  * logs individually according to the underlying devices. (default: 6)
925  * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
926  * data and 8 for node logs.
927  */
928 #define	NR_CURSEG_DATA_TYPE	(3)
929 #define NR_CURSEG_NODE_TYPE	(3)
930 #define NR_CURSEG_INMEM_TYPE	(2)
931 #define NR_CURSEG_PERSIST_TYPE	(NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
932 #define NR_CURSEG_TYPE		(NR_CURSEG_INMEM_TYPE + NR_CURSEG_PERSIST_TYPE)
933 
934 enum {
935 	CURSEG_HOT_DATA	= 0,	/* directory entry blocks */
936 	CURSEG_WARM_DATA,	/* data blocks */
937 	CURSEG_COLD_DATA,	/* multimedia or GCed data blocks */
938 	CURSEG_HOT_NODE,	/* direct node blocks of directory files */
939 	CURSEG_WARM_NODE,	/* direct node blocks of normal files */
940 	CURSEG_COLD_NODE,	/* indirect node blocks */
941 	NR_PERSISTENT_LOG,	/* number of persistent log */
942 	CURSEG_COLD_DATA_PINNED = NR_PERSISTENT_LOG,
943 				/* pinned file that needs consecutive block address */
944 	CURSEG_ALL_DATA_ATGC,	/* SSR alloctor in hot/warm/cold data area */
945 	NO_CHECK_TYPE,		/* number of persistent & inmem log */
946 };
947 
948 struct flush_cmd {
949 	struct completion wait;
950 	struct llist_node llnode;
951 	nid_t ino;
952 	int ret;
953 };
954 
955 struct flush_cmd_control {
956 	struct task_struct *f2fs_issue_flush;	/* flush thread */
957 	wait_queue_head_t flush_wait_queue;	/* waiting queue for wake-up */
958 	atomic_t issued_flush;			/* # of issued flushes */
959 	atomic_t queued_flush;			/* # of queued flushes */
960 	struct llist_head issue_list;		/* list for command issue */
961 	struct llist_node *dispatch_list;	/* list for command dispatch */
962 };
963 
964 struct f2fs_sm_info {
965 	struct sit_info *sit_info;		/* whole segment information */
966 	struct free_segmap_info *free_info;	/* free segment information */
967 	struct dirty_seglist_info *dirty_info;	/* dirty segment information */
968 	struct curseg_info *curseg_array;	/* active segment information */
969 
970 	struct rw_semaphore curseg_lock;	/* for preventing curseg change */
971 
972 	block_t seg0_blkaddr;		/* block address of 0'th segment */
973 	block_t main_blkaddr;		/* start block address of main area */
974 	block_t ssa_blkaddr;		/* start block address of SSA area */
975 
976 	unsigned int segment_count;	/* total # of segments */
977 	unsigned int main_segments;	/* # of segments in main area */
978 	unsigned int reserved_segments;	/* # of reserved segments */
979 	unsigned int ovp_segments;	/* # of overprovision segments */
980 
981 	/* a threshold to reclaim prefree segments */
982 	unsigned int rec_prefree_segments;
983 
984 	/* for batched trimming */
985 	unsigned int trim_sections;		/* # of sections to trim */
986 
987 	struct list_head sit_entry_set;	/* sit entry set list */
988 
989 	unsigned int ipu_policy;	/* in-place-update policy */
990 	unsigned int min_ipu_util;	/* in-place-update threshold */
991 	unsigned int min_fsync_blocks;	/* threshold for fsync */
992 	unsigned int min_seq_blocks;	/* threshold for sequential blocks */
993 	unsigned int min_hot_blocks;	/* threshold for hot block allocation */
994 	unsigned int min_ssr_sections;	/* threshold to trigger SSR allocation */
995 
996 	/* for flush command control */
997 	struct flush_cmd_control *fcc_info;
998 
999 	/* for discard command control */
1000 	struct discard_cmd_control *dcc_info;
1001 };
1002 
1003 /*
1004  * For superblock
1005  */
1006 /*
1007  * COUNT_TYPE for monitoring
1008  *
1009  * f2fs monitors the number of several block types such as on-writeback,
1010  * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
1011  */
1012 #define WB_DATA_TYPE(p)	(__is_cp_guaranteed(p) ? F2FS_WB_CP_DATA : F2FS_WB_DATA)
1013 enum count_type {
1014 	F2FS_DIRTY_DENTS,
1015 	F2FS_DIRTY_DATA,
1016 	F2FS_DIRTY_QDATA,
1017 	F2FS_DIRTY_NODES,
1018 	F2FS_DIRTY_META,
1019 	F2FS_INMEM_PAGES,
1020 	F2FS_DIRTY_IMETA,
1021 	F2FS_WB_CP_DATA,
1022 	F2FS_WB_DATA,
1023 	F2FS_RD_DATA,
1024 	F2FS_RD_NODE,
1025 	F2FS_RD_META,
1026 	F2FS_DIO_WRITE,
1027 	F2FS_DIO_READ,
1028 	NR_COUNT_TYPE,
1029 };
1030 
1031 /*
1032  * The below are the page types of bios used in submit_bio().
1033  * The available types are:
1034  * DATA			User data pages. It operates as async mode.
1035  * NODE			Node pages. It operates as async mode.
1036  * META			FS metadata pages such as SIT, NAT, CP.
1037  * NR_PAGE_TYPE		The number of page types.
1038  * META_FLUSH		Make sure the previous pages are written
1039  *			with waiting the bio's completion
1040  * ...			Only can be used with META.
1041  */
1042 #define PAGE_TYPE_OF_BIO(type)	((type) > META ? META : (type))
1043 enum page_type {
1044 	DATA,
1045 	NODE,
1046 	META,
1047 	NR_PAGE_TYPE,
1048 	META_FLUSH,
1049 	INMEM,		/* the below types are used by tracepoints only. */
1050 	INMEM_DROP,
1051 	INMEM_INVALIDATE,
1052 	INMEM_REVOKE,
1053 	IPU,
1054 	OPU,
1055 };
1056 
1057 enum temp_type {
1058 	HOT = 0,	/* must be zero for meta bio */
1059 	WARM,
1060 	COLD,
1061 	NR_TEMP_TYPE,
1062 };
1063 
1064 enum need_lock_type {
1065 	LOCK_REQ = 0,
1066 	LOCK_DONE,
1067 	LOCK_RETRY,
1068 };
1069 
1070 enum cp_reason_type {
1071 	CP_NO_NEEDED,
1072 	CP_NON_REGULAR,
1073 	CP_COMPRESSED,
1074 	CP_HARDLINK,
1075 	CP_SB_NEED_CP,
1076 	CP_WRONG_PINO,
1077 	CP_NO_SPC_ROLL,
1078 	CP_NODE_NEED_CP,
1079 	CP_FASTBOOT_MODE,
1080 	CP_SPEC_LOG_NUM,
1081 	CP_RECOVER_DIR,
1082 };
1083 
1084 enum iostat_type {
1085 	/* WRITE IO */
1086 	APP_DIRECT_IO,			/* app direct write IOs */
1087 	APP_BUFFERED_IO,		/* app buffered write IOs */
1088 	APP_WRITE_IO,			/* app write IOs */
1089 	APP_MAPPED_IO,			/* app mapped IOs */
1090 	FS_DATA_IO,			/* data IOs from kworker/fsync/reclaimer */
1091 	FS_NODE_IO,			/* node IOs from kworker/fsync/reclaimer */
1092 	FS_META_IO,			/* meta IOs from kworker/reclaimer */
1093 	FS_GC_DATA_IO,			/* data IOs from forground gc */
1094 	FS_GC_NODE_IO,			/* node IOs from forground gc */
1095 	FS_CP_DATA_IO,			/* data IOs from checkpoint */
1096 	FS_CP_NODE_IO,			/* node IOs from checkpoint */
1097 	FS_CP_META_IO,			/* meta IOs from checkpoint */
1098 
1099 	/* READ IO */
1100 	APP_DIRECT_READ_IO,		/* app direct read IOs */
1101 	APP_BUFFERED_READ_IO,		/* app buffered read IOs */
1102 	APP_READ_IO,			/* app read IOs */
1103 	APP_MAPPED_READ_IO,		/* app mapped read IOs */
1104 	FS_DATA_READ_IO,		/* data read IOs */
1105 	FS_GDATA_READ_IO,		/* data read IOs from background gc */
1106 	FS_CDATA_READ_IO,		/* compressed data read IOs */
1107 	FS_NODE_READ_IO,		/* node read IOs */
1108 	FS_META_READ_IO,		/* meta read IOs */
1109 
1110 	/* other */
1111 	FS_DISCARD,			/* discard */
1112 	NR_IO_TYPE,
1113 };
1114 
1115 struct f2fs_io_info {
1116 	struct f2fs_sb_info *sbi;	/* f2fs_sb_info pointer */
1117 	nid_t ino;		/* inode number */
1118 	enum page_type type;	/* contains DATA/NODE/META/META_FLUSH */
1119 	enum temp_type temp;	/* contains HOT/WARM/COLD */
1120 	int op;			/* contains REQ_OP_ */
1121 	int op_flags;		/* req_flag_bits */
1122 	block_t new_blkaddr;	/* new block address to be written */
1123 	block_t old_blkaddr;	/* old block address before Cow */
1124 	struct page *page;	/* page to be written */
1125 	struct page *encrypted_page;	/* encrypted page */
1126 	struct page *compressed_page;	/* compressed page */
1127 	struct list_head list;		/* serialize IOs */
1128 	bool submitted;		/* indicate IO submission */
1129 	int need_lock;		/* indicate we need to lock cp_rwsem */
1130 	bool in_list;		/* indicate fio is in io_list */
1131 	bool is_por;		/* indicate IO is from recovery or not */
1132 	bool retry;		/* need to reallocate block address */
1133 	int compr_blocks;	/* # of compressed block addresses */
1134 	bool encrypted;		/* indicate file is encrypted */
1135 	enum iostat_type io_type;	/* io type */
1136 	struct writeback_control *io_wbc; /* writeback control */
1137 	struct bio **bio;		/* bio for ipu */
1138 	sector_t *last_block;		/* last block number in bio */
1139 	unsigned char version;		/* version of the node */
1140 };
1141 
1142 struct bio_entry {
1143 	struct bio *bio;
1144 	struct list_head list;
1145 };
1146 
1147 #define is_read_io(rw) ((rw) == READ)
1148 struct f2fs_bio_info {
1149 	struct f2fs_sb_info *sbi;	/* f2fs superblock */
1150 	struct bio *bio;		/* bios to merge */
1151 	sector_t last_block_in_bio;	/* last block number */
1152 	struct f2fs_io_info fio;	/* store buffered io info. */
1153 	struct rw_semaphore io_rwsem;	/* blocking op for bio */
1154 	spinlock_t io_lock;		/* serialize DATA/NODE IOs */
1155 	struct list_head io_list;	/* track fios */
1156 	struct list_head bio_list;	/* bio entry list head */
1157 	struct rw_semaphore bio_list_lock;	/* lock to protect bio entry list */
1158 };
1159 
1160 #define FDEV(i)				(sbi->devs[i])
1161 #define RDEV(i)				(raw_super->devs[i])
1162 struct f2fs_dev_info {
1163 	struct block_device *bdev;
1164 	char path[MAX_PATH_LEN];
1165 	unsigned int total_segments;
1166 	block_t start_blk;
1167 	block_t end_blk;
1168 #ifdef CONFIG_BLK_DEV_ZONED
1169 	unsigned int nr_blkz;		/* Total number of zones */
1170 	unsigned long *blkz_seq;	/* Bitmap indicating sequential zones */
1171 	block_t *zone_capacity_blocks;  /* Array of zone capacity in blks */
1172 #endif
1173 };
1174 
1175 enum inode_type {
1176 	DIR_INODE,			/* for dirty dir inode */
1177 	FILE_INODE,			/* for dirty regular/symlink inode */
1178 	DIRTY_META,			/* for all dirtied inode metadata */
1179 	ATOMIC_FILE,			/* for all atomic files */
1180 	NR_INODE_TYPE,
1181 };
1182 
1183 /* for inner inode cache management */
1184 struct inode_management {
1185 	struct radix_tree_root ino_root;	/* ino entry array */
1186 	spinlock_t ino_lock;			/* for ino entry lock */
1187 	struct list_head ino_list;		/* inode list head */
1188 	unsigned long ino_num;			/* number of entries */
1189 };
1190 
1191 /* for GC_AT */
1192 struct atgc_management {
1193 	bool atgc_enabled;			/* ATGC is enabled or not */
1194 	struct rb_root_cached root;		/* root of victim rb-tree */
1195 	struct list_head victim_list;		/* linked with all victim entries */
1196 	unsigned int victim_count;		/* victim count in rb-tree */
1197 	unsigned int candidate_ratio;		/* candidate ratio */
1198 	unsigned int max_candidate_count;	/* max candidate count */
1199 	unsigned int age_weight;		/* age weight, vblock_weight = 100 - age_weight */
1200 	unsigned long long age_threshold;	/* age threshold */
1201 };
1202 
1203 /* For s_flag in struct f2fs_sb_info */
1204 enum {
1205 	SBI_IS_DIRTY,				/* dirty flag for checkpoint */
1206 	SBI_IS_CLOSE,				/* specify unmounting */
1207 	SBI_NEED_FSCK,				/* need fsck.f2fs to fix */
1208 	SBI_POR_DOING,				/* recovery is doing or not */
1209 	SBI_NEED_SB_WRITE,			/* need to recover superblock */
1210 	SBI_NEED_CP,				/* need to checkpoint */
1211 	SBI_IS_SHUTDOWN,			/* shutdown by ioctl */
1212 	SBI_IS_RECOVERED,			/* recovered orphan/data */
1213 	SBI_CP_DISABLED,			/* CP was disabled last mount */
1214 	SBI_CP_DISABLED_QUICK,			/* CP was disabled quickly */
1215 	SBI_QUOTA_NEED_FLUSH,			/* need to flush quota info in CP */
1216 	SBI_QUOTA_SKIP_FLUSH,			/* skip flushing quota in current CP */
1217 	SBI_QUOTA_NEED_REPAIR,			/* quota file may be corrupted */
1218 	SBI_IS_RESIZEFS,			/* resizefs is in process */
1219 };
1220 
1221 enum {
1222 	CP_TIME,
1223 	REQ_TIME,
1224 	DISCARD_TIME,
1225 	GC_TIME,
1226 	DISABLE_TIME,
1227 	UMOUNT_DISCARD_TIMEOUT,
1228 	MAX_TIME,
1229 };
1230 
1231 enum {
1232 	GC_NORMAL,
1233 	GC_IDLE_CB,
1234 	GC_IDLE_GREEDY,
1235 	GC_IDLE_AT,
1236 	GC_URGENT_HIGH,
1237 	GC_URGENT_LOW,
1238 };
1239 
1240 enum {
1241 	BGGC_MODE_ON,		/* background gc is on */
1242 	BGGC_MODE_OFF,		/* background gc is off */
1243 	BGGC_MODE_SYNC,		/*
1244 				 * background gc is on, migrating blocks
1245 				 * like foreground gc
1246 				 */
1247 };
1248 
1249 enum {
1250 	FS_MODE_ADAPTIVE,	/* use both lfs/ssr allocation */
1251 	FS_MODE_LFS,		/* use lfs allocation only */
1252 };
1253 
1254 enum {
1255 	WHINT_MODE_OFF,		/* not pass down write hints */
1256 	WHINT_MODE_USER,	/* try to pass down hints given by users */
1257 	WHINT_MODE_FS,		/* pass down hints with F2FS policy */
1258 };
1259 
1260 enum {
1261 	ALLOC_MODE_DEFAULT,	/* stay default */
1262 	ALLOC_MODE_REUSE,	/* reuse segments as much as possible */
1263 };
1264 
1265 enum fsync_mode {
1266 	FSYNC_MODE_POSIX,	/* fsync follows posix semantics */
1267 	FSYNC_MODE_STRICT,	/* fsync behaves in line with ext4 */
1268 	FSYNC_MODE_NOBARRIER,	/* fsync behaves nobarrier based on posix */
1269 };
1270 
1271 /*
1272  * this value is set in page as a private data which indicate that
1273  * the page is atomically written, and it is in inmem_pages list.
1274  */
1275 #define ATOMIC_WRITTEN_PAGE		((unsigned long)-1)
1276 #define DUMMY_WRITTEN_PAGE		((unsigned long)-2)
1277 
1278 #define IS_ATOMIC_WRITTEN_PAGE(page)			\
1279 		(page_private(page) == ATOMIC_WRITTEN_PAGE)
1280 #define IS_DUMMY_WRITTEN_PAGE(page)			\
1281 		(page_private(page) == DUMMY_WRITTEN_PAGE)
1282 
1283 #ifdef CONFIG_F2FS_IO_TRACE
1284 #define IS_IO_TRACED_PAGE(page)			\
1285 		(page_private(page) > 0 &&		\
1286 		 page_private(page) < (unsigned long)PID_MAX_LIMIT)
1287 #else
1288 #define IS_IO_TRACED_PAGE(page) (0)
1289 #endif
1290 
1291 /* For compression */
1292 enum compress_algorithm_type {
1293 	COMPRESS_LZO,
1294 	COMPRESS_LZ4,
1295 	COMPRESS_ZSTD,
1296 	COMPRESS_LZORLE,
1297 	COMPRESS_MAX,
1298 };
1299 
1300 #define COMPRESS_DATA_RESERVED_SIZE		5
1301 struct compress_data {
1302 	__le32 clen;			/* compressed data size */
1303 	__le32 reserved[COMPRESS_DATA_RESERVED_SIZE];	/* reserved */
1304 	u8 cdata[];			/* compressed data */
1305 };
1306 
1307 #define COMPRESS_HEADER_SIZE	(sizeof(struct compress_data))
1308 
1309 #define F2FS_COMPRESSED_PAGE_MAGIC	0xF5F2C000
1310 
1311 /* compress context */
1312 struct compress_ctx {
1313 	struct inode *inode;		/* inode the context belong to */
1314 	pgoff_t cluster_idx;		/* cluster index number */
1315 	unsigned int cluster_size;	/* page count in cluster */
1316 	unsigned int log_cluster_size;	/* log of cluster size */
1317 	struct page **rpages;		/* pages store raw data in cluster */
1318 	unsigned int nr_rpages;		/* total page number in rpages */
1319 	struct page **cpages;		/* pages store compressed data in cluster */
1320 	unsigned int nr_cpages;		/* total page number in cpages */
1321 	void *rbuf;			/* virtual mapped address on rpages */
1322 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1323 	size_t rlen;			/* valid data length in rbuf */
1324 	size_t clen;			/* valid data length in cbuf */
1325 	void *private;			/* payload buffer for specified compression algorithm */
1326 	void *private2;			/* extra payload buffer */
1327 };
1328 
1329 /* compress context for write IO path */
1330 struct compress_io_ctx {
1331 	u32 magic;			/* magic number to indicate page is compressed */
1332 	struct inode *inode;		/* inode the context belong to */
1333 	struct page **rpages;		/* pages store raw data in cluster */
1334 	unsigned int nr_rpages;		/* total page number in rpages */
1335 	atomic_t pending_pages;		/* in-flight compressed page count */
1336 };
1337 
1338 /* decompress io context for read IO path */
1339 struct decompress_io_ctx {
1340 	u32 magic;			/* magic number to indicate page is compressed */
1341 	struct inode *inode;		/* inode the context belong to */
1342 	pgoff_t cluster_idx;		/* cluster index number */
1343 	unsigned int cluster_size;	/* page count in cluster */
1344 	unsigned int log_cluster_size;	/* log of cluster size */
1345 	struct page **rpages;		/* pages store raw data in cluster */
1346 	unsigned int nr_rpages;		/* total page number in rpages */
1347 	struct page **cpages;		/* pages store compressed data in cluster */
1348 	unsigned int nr_cpages;		/* total page number in cpages */
1349 	struct page **tpages;		/* temp pages to pad holes in cluster */
1350 	void *rbuf;			/* virtual mapped address on rpages */
1351 	struct compress_data *cbuf;	/* virtual mapped address on cpages */
1352 	size_t rlen;			/* valid data length in rbuf */
1353 	size_t clen;			/* valid data length in cbuf */
1354 	atomic_t pending_pages;		/* in-flight compressed page count */
1355 	atomic_t verity_pages;		/* in-flight page count for verity */
1356 	bool failed;			/* indicate IO error during decompression */
1357 	void *private;			/* payload buffer for specified decompression algorithm */
1358 	void *private2;			/* extra payload buffer */
1359 };
1360 
1361 #define NULL_CLUSTER			((unsigned int)(~0))
1362 #define MIN_COMPRESS_LOG_SIZE		2
1363 #define MAX_COMPRESS_LOG_SIZE		8
1364 #define MAX_COMPRESS_WINDOW_SIZE(log_size)	((PAGE_SIZE) << (log_size))
1365 
1366 #ifdef CONFIG_F2FS_GRADING_SSR
1367 struct f2fs_hot_cold_params {
1368 	unsigned int enable;
1369 	unsigned int hot_data_lower_limit;
1370 	unsigned int hot_data_waterline;
1371 	unsigned int warm_data_lower_limit;
1372 	unsigned int warm_data_waterline;
1373 	unsigned int hot_node_lower_limit;
1374 	unsigned int hot_node_waterline;
1375 	unsigned int warm_node_lower_limit;
1376 	unsigned int warm_node_waterline;
1377 };
1378 #endif
1379 
1380 struct f2fs_sb_info {
1381 	struct super_block *sb;			/* pointer to VFS super block */
1382 	struct proc_dir_entry *s_proc;		/* proc entry */
1383 	struct f2fs_super_block *raw_super;	/* raw super block pointer */
1384 	struct rw_semaphore sb_lock;		/* lock for raw super block */
1385 	int valid_super_block;			/* valid super block no */
1386 	unsigned long s_flag;				/* flags for sbi */
1387 	struct mutex writepages;		/* mutex for writepages() */
1388 
1389 #ifdef CONFIG_BLK_DEV_ZONED
1390 	unsigned int blocks_per_blkz;		/* F2FS blocks per zone */
1391 	unsigned int log_blocks_per_blkz;	/* log2 F2FS blocks per zone */
1392 #endif
1393 
1394 	/* for node-related operations */
1395 	struct f2fs_nm_info *nm_info;		/* node manager */
1396 	struct inode *node_inode;		/* cache node blocks */
1397 
1398 	/* for segment-related operations */
1399 	struct f2fs_sm_info *sm_info;		/* segment manager */
1400 
1401 	/* for bio operations */
1402 	struct f2fs_bio_info *write_io[NR_PAGE_TYPE];	/* for write bios */
1403 	/* keep migration IO order for LFS mode */
1404 	struct rw_semaphore io_order_lock;
1405 	mempool_t *write_io_dummy;		/* Dummy pages */
1406 
1407 	/* for checkpoint */
1408 	struct f2fs_checkpoint *ckpt;		/* raw checkpoint pointer */
1409 	int cur_cp_pack;			/* remain current cp pack */
1410 	spinlock_t cp_lock;			/* for flag in ckpt */
1411 	struct inode *meta_inode;		/* cache meta blocks */
1412 	struct mutex cp_mutex;			/* checkpoint procedure lock */
1413 	struct rw_semaphore cp_rwsem;		/* blocking FS operations */
1414 	struct rw_semaphore node_write;		/* locking node writes */
1415 	struct rw_semaphore node_change;	/* locking node change */
1416 	wait_queue_head_t cp_wait;
1417 	unsigned long last_time[MAX_TIME];	/* to store time in jiffies */
1418 	long interval_time[MAX_TIME];		/* to store thresholds */
1419 
1420 	struct inode_management im[MAX_INO_ENTRY];	/* manage inode cache */
1421 
1422 	spinlock_t fsync_node_lock;		/* for node entry lock */
1423 	struct list_head fsync_node_list;	/* node list head */
1424 	unsigned int fsync_seg_id;		/* sequence id */
1425 	unsigned int fsync_node_num;		/* number of node entries */
1426 
1427 	/* for orphan inode, use 0'th array */
1428 	unsigned int max_orphans;		/* max orphan inodes */
1429 
1430 	/* for inode management */
1431 	struct list_head inode_list[NR_INODE_TYPE];	/* dirty inode list */
1432 	spinlock_t inode_lock[NR_INODE_TYPE];	/* for dirty inode list lock */
1433 	struct mutex flush_lock;		/* for flush exclusion */
1434 
1435 	/* for extent tree cache */
1436 	struct radix_tree_root extent_tree_root;/* cache extent cache entries */
1437 	struct mutex extent_tree_lock;	/* locking extent radix tree */
1438 	struct list_head extent_list;		/* lru list for shrinker */
1439 	spinlock_t extent_lock;			/* locking extent lru list */
1440 	atomic_t total_ext_tree;		/* extent tree count */
1441 	struct list_head zombie_list;		/* extent zombie tree list */
1442 	atomic_t total_zombie_tree;		/* extent zombie tree count */
1443 	atomic_t total_ext_node;		/* extent info count */
1444 
1445 	/* basic filesystem units */
1446 	unsigned int log_sectors_per_block;	/* log2 sectors per block */
1447 	unsigned int log_blocksize;		/* log2 block size */
1448 	unsigned int blocksize;			/* block size */
1449 	unsigned int root_ino_num;		/* root inode number*/
1450 	unsigned int node_ino_num;		/* node inode number*/
1451 	unsigned int meta_ino_num;		/* meta inode number*/
1452 	unsigned int log_blocks_per_seg;	/* log2 blocks per segment */
1453 	unsigned int blocks_per_seg;		/* blocks per segment */
1454 	unsigned int segs_per_sec;		/* segments per section */
1455 	unsigned int secs_per_zone;		/* sections per zone */
1456 	unsigned int total_sections;		/* total section count */
1457 	unsigned int total_node_count;		/* total node block count */
1458 	unsigned int total_valid_node_count;	/* valid node block count */
1459 	loff_t max_file_blocks;			/* max block index of file */
1460 	int dir_level;				/* directory level */
1461 	int readdir_ra;				/* readahead inode in readdir */
1462 
1463 	block_t user_block_count;		/* # of user blocks */
1464 	block_t total_valid_block_count;	/* # of valid blocks */
1465 	block_t discard_blks;			/* discard command candidats */
1466 	block_t last_valid_block_count;		/* for recovery */
1467 	block_t reserved_blocks;		/* configurable reserved blocks */
1468 	block_t current_reserved_blocks;	/* current reserved blocks */
1469 
1470 	/* Additional tracking for no checkpoint mode */
1471 	block_t unusable_block_count;		/* # of blocks saved by last cp */
1472 
1473 	unsigned int nquota_files;		/* # of quota sysfile */
1474 	struct rw_semaphore quota_sem;		/* blocking cp for flags */
1475 
1476 	/* # of pages, see count_type */
1477 	atomic_t nr_pages[NR_COUNT_TYPE];
1478 	/* # of allocated blocks */
1479 	struct percpu_counter alloc_valid_block_count;
1480 
1481 	/* writeback control */
1482 	atomic_t wb_sync_req[META];	/* count # of WB_SYNC threads */
1483 
1484 	/* valid inode count */
1485 	struct percpu_counter total_valid_inode_count;
1486 
1487 	struct f2fs_mount_info mount_opt;	/* mount options */
1488 
1489 	/* for cleaning operations */
1490 	struct rw_semaphore gc_lock;		/*
1491 						 * semaphore for GC, avoid
1492 						 * race between GC and GC or CP
1493 						 */
1494 	struct f2fs_gc_kthread	*gc_thread;	/* GC thread */
1495 	struct atgc_management am;		/* atgc management */
1496 	unsigned int cur_victim_sec;		/* current victim section num */
1497 	unsigned int gc_mode;			/* current GC state */
1498 	unsigned int next_victim_seg[2];	/* next segment in victim section */
1499 
1500 	/* for skip statistic */
1501 	unsigned int atomic_files;		/* # of opened atomic file */
1502 	unsigned long long skipped_atomic_files[2];	/* FG_GC and BG_GC */
1503 	unsigned long long skipped_gc_rwsem;		/* FG_GC only */
1504 
1505 	/* threshold for gc trials on pinned files */
1506 	u64 gc_pin_file_threshold;
1507 	struct rw_semaphore pin_sem;
1508 
1509 	/* maximum # of trials to find a victim segment for SSR and GC */
1510 	unsigned int max_victim_search;
1511 	/* migration granularity of garbage collection, unit: segment */
1512 	unsigned int migration_granularity;
1513 
1514 	/*
1515 	 * for stat information.
1516 	 * one is for the LFS mode, and the other is for the SSR mode.
1517 	 */
1518 #ifdef CONFIG_F2FS_STAT_FS
1519 	struct f2fs_stat_info *stat_info;	/* FS status information */
1520 	atomic_t meta_count[META_MAX];		/* # of meta blocks */
1521 	unsigned int segment_count[2];		/* # of allocated segments */
1522 	unsigned int block_count[2];		/* # of allocated blocks */
1523 	atomic_t inplace_count;		/* # of inplace update */
1524 	atomic64_t total_hit_ext;		/* # of lookup extent cache */
1525 	atomic64_t read_hit_rbtree;		/* # of hit rbtree extent node */
1526 	atomic64_t read_hit_largest;		/* # of hit largest extent node */
1527 	atomic64_t read_hit_cached;		/* # of hit cached extent node */
1528 	atomic_t inline_xattr;			/* # of inline_xattr inodes */
1529 	atomic_t inline_inode;			/* # of inline_data inodes */
1530 	atomic_t inline_dir;			/* # of inline_dentry inodes */
1531 	atomic_t compr_inode;			/* # of compressed inodes */
1532 	atomic64_t compr_blocks;		/* # of compressed blocks */
1533 	atomic_t vw_cnt;			/* # of volatile writes */
1534 	atomic_t max_aw_cnt;			/* max # of atomic writes */
1535 	atomic_t max_vw_cnt;			/* max # of volatile writes */
1536 	unsigned int io_skip_bggc;		/* skip background gc for in-flight IO */
1537 	unsigned int other_skip_bggc;		/* skip background gc for other reasons */
1538 	unsigned int ndirty_inode[NR_INODE_TYPE];	/* # of dirty inodes */
1539 #endif
1540 	spinlock_t stat_lock;			/* lock for stat operations */
1541 
1542 	/* For app/fs IO statistics */
1543 	spinlock_t iostat_lock;
1544 	unsigned long long rw_iostat[NR_IO_TYPE];
1545 	unsigned long long prev_rw_iostat[NR_IO_TYPE];
1546 	bool iostat_enable;
1547 	unsigned long iostat_next_period;
1548 	unsigned int iostat_period_ms;
1549 
1550 	/* to attach REQ_META|REQ_FUA flags */
1551 	unsigned int data_io_flag;
1552 	unsigned int node_io_flag;
1553 
1554 	/* For sysfs suppport */
1555 	struct kobject s_kobj;
1556 	struct completion s_kobj_unregister;
1557 
1558 	/* For shrinker support */
1559 	struct list_head s_list;
1560 	int s_ndevs;				/* number of devices */
1561 	struct f2fs_dev_info *devs;		/* for device list */
1562 	unsigned int dirty_device;		/* for checkpoint data flush */
1563 	spinlock_t dev_lock;			/* protect dirty_device */
1564 	struct mutex umount_mutex;
1565 	unsigned int shrinker_run_no;
1566 
1567 	/* For write statistics */
1568 	u64 sectors_written_start;
1569 	u64 kbytes_written;
1570 
1571 	/* Reference to checksum algorithm driver via cryptoapi */
1572 	struct crypto_shash *s_chksum_driver;
1573 
1574 	/* Precomputed FS UUID checksum for seeding other checksums */
1575 	__u32 s_chksum_seed;
1576 
1577 	struct workqueue_struct *post_read_wq;	/* post read workqueue */
1578 
1579 	struct kmem_cache *inline_xattr_slab;	/* inline xattr entry */
1580 	unsigned int inline_xattr_slab_size;	/* default inline xattr slab size */
1581 
1582 #ifdef CONFIG_F2FS_FS_COMPRESSION
1583 	struct kmem_cache *page_array_slab;	/* page array entry */
1584 	unsigned int page_array_slab_size;	/* default page array slab size */
1585 #endif
1586 
1587 #ifdef CONFIG_F2FS_GRADING_SSR
1588 	struct f2fs_hot_cold_params hot_cold_params;
1589 #endif
1590 };
1591 
1592 struct f2fs_private_dio {
1593 	struct inode *inode;
1594 	void *orig_private;
1595 	bio_end_io_t *orig_end_io;
1596 	bool write;
1597 };
1598 
1599 #ifdef CONFIG_F2FS_FAULT_INJECTION
1600 #define f2fs_show_injection_info(sbi, type)					\
1601 	printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n",	\
1602 		KERN_INFO, sbi->sb->s_id,				\
1603 		f2fs_fault_name[type],					\
1604 		__func__, __builtin_return_address(0))
time_to_inject(struct f2fs_sb_info * sbi,int type)1605 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1606 {
1607 	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
1608 
1609 	if (!ffi->inject_rate)
1610 		return false;
1611 
1612 	if (!IS_FAULT_SET(ffi, type))
1613 		return false;
1614 
1615 	atomic_inc(&ffi->inject_ops);
1616 	if (atomic_read(&ffi->inject_ops) >= ffi->inject_rate) {
1617 		atomic_set(&ffi->inject_ops, 0);
1618 		return true;
1619 	}
1620 	return false;
1621 }
1622 #else
1623 #define f2fs_show_injection_info(sbi, type) do { } while (0)
time_to_inject(struct f2fs_sb_info * sbi,int type)1624 static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
1625 {
1626 	return false;
1627 }
1628 #endif
1629 
1630 /*
1631  * Test if the mounted volume is a multi-device volume.
1632  *   - For a single regular disk volume, sbi->s_ndevs is 0.
1633  *   - For a single zoned disk volume, sbi->s_ndevs is 1.
1634  *   - For a multi-device volume, sbi->s_ndevs is always 2 or more.
1635  */
f2fs_is_multi_device(struct f2fs_sb_info * sbi)1636 static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
1637 {
1638 	return sbi->s_ndevs > 1;
1639 }
1640 
1641 /* For write statistics. Suppose sector size is 512 bytes,
1642  * and the return value is in kbytes. s is of struct f2fs_sb_info.
1643  */
1644 #define BD_PART_WRITTEN(s)						 \
1645 (((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) -   \
1646 		(s)->sectors_written_start) >> 1)
1647 
f2fs_update_time(struct f2fs_sb_info * sbi,int type)1648 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
1649 {
1650 	unsigned long now = jiffies;
1651 
1652 	sbi->last_time[type] = now;
1653 
1654 	/* DISCARD_TIME and GC_TIME are based on REQ_TIME */
1655 	if (type == REQ_TIME) {
1656 		sbi->last_time[DISCARD_TIME] = now;
1657 		sbi->last_time[GC_TIME] = now;
1658 	}
1659 }
1660 
f2fs_time_over(struct f2fs_sb_info * sbi,int type)1661 static inline bool f2fs_time_over(struct f2fs_sb_info *sbi, int type)
1662 {
1663 	unsigned long interval = sbi->interval_time[type] * HZ;
1664 
1665 	return time_after(jiffies, sbi->last_time[type] + interval);
1666 }
1667 
f2fs_time_to_wait(struct f2fs_sb_info * sbi,int type)1668 static inline unsigned int f2fs_time_to_wait(struct f2fs_sb_info *sbi,
1669 						int type)
1670 {
1671 	unsigned long interval = sbi->interval_time[type] * HZ;
1672 	unsigned int wait_ms = 0;
1673 	long delta;
1674 
1675 	delta = (sbi->last_time[type] + interval) - jiffies;
1676 	if (delta > 0)
1677 		wait_ms = jiffies_to_msecs(delta);
1678 
1679 	return wait_ms;
1680 }
1681 
1682 /*
1683  * Inline functions
1684  */
__f2fs_crc32(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1685 static inline u32 __f2fs_crc32(struct f2fs_sb_info *sbi, u32 crc,
1686 			      const void *address, unsigned int length)
1687 {
1688 	struct {
1689 		struct shash_desc shash;
1690 		char ctx[4];
1691 	} desc;
1692 	int err;
1693 
1694 	BUG_ON(crypto_shash_descsize(sbi->s_chksum_driver) != sizeof(desc.ctx));
1695 
1696 	desc.shash.tfm = sbi->s_chksum_driver;
1697 	*(u32 *)desc.ctx = crc;
1698 
1699 	err = crypto_shash_update(&desc.shash, address, length);
1700 	BUG_ON(err);
1701 
1702 	return *(u32 *)desc.ctx;
1703 }
1704 
f2fs_crc32(struct f2fs_sb_info * sbi,const void * address,unsigned int length)1705 static inline u32 f2fs_crc32(struct f2fs_sb_info *sbi, const void *address,
1706 			   unsigned int length)
1707 {
1708 	return __f2fs_crc32(sbi, F2FS_SUPER_MAGIC, address, length);
1709 }
1710 
f2fs_crc_valid(struct f2fs_sb_info * sbi,__u32 blk_crc,void * buf,size_t buf_size)1711 static inline bool f2fs_crc_valid(struct f2fs_sb_info *sbi, __u32 blk_crc,
1712 				  void *buf, size_t buf_size)
1713 {
1714 	return f2fs_crc32(sbi, buf, buf_size) == blk_crc;
1715 }
1716 
f2fs_chksum(struct f2fs_sb_info * sbi,u32 crc,const void * address,unsigned int length)1717 static inline u32 f2fs_chksum(struct f2fs_sb_info *sbi, u32 crc,
1718 			      const void *address, unsigned int length)
1719 {
1720 	return __f2fs_crc32(sbi, crc, address, length);
1721 }
1722 
F2FS_I(struct inode * inode)1723 static inline struct f2fs_inode_info *F2FS_I(struct inode *inode)
1724 {
1725 	return container_of(inode, struct f2fs_inode_info, vfs_inode);
1726 }
1727 
F2FS_SB(struct super_block * sb)1728 static inline struct f2fs_sb_info *F2FS_SB(struct super_block *sb)
1729 {
1730 	return sb->s_fs_info;
1731 }
1732 
F2FS_I_SB(struct inode * inode)1733 static inline struct f2fs_sb_info *F2FS_I_SB(struct inode *inode)
1734 {
1735 	return F2FS_SB(inode->i_sb);
1736 }
1737 
F2FS_M_SB(struct address_space * mapping)1738 static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1739 {
1740 	return F2FS_I_SB(mapping->host);
1741 }
1742 
F2FS_P_SB(struct page * page)1743 static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1744 {
1745 	return F2FS_M_SB(page_file_mapping(page));
1746 }
1747 
F2FS_RAW_SUPER(struct f2fs_sb_info * sbi)1748 static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
1749 {
1750 	return (struct f2fs_super_block *)(sbi->raw_super);
1751 }
1752 
F2FS_CKPT(struct f2fs_sb_info * sbi)1753 static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
1754 {
1755 	return (struct f2fs_checkpoint *)(sbi->ckpt);
1756 }
1757 
F2FS_NODE(struct page * page)1758 static inline struct f2fs_node *F2FS_NODE(struct page *page)
1759 {
1760 	return (struct f2fs_node *)page_address(page);
1761 }
1762 
F2FS_INODE(struct page * page)1763 static inline struct f2fs_inode *F2FS_INODE(struct page *page)
1764 {
1765 	return &((struct f2fs_node *)page_address(page))->i;
1766 }
1767 
NM_I(struct f2fs_sb_info * sbi)1768 static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
1769 {
1770 	return (struct f2fs_nm_info *)(sbi->nm_info);
1771 }
1772 
SM_I(struct f2fs_sb_info * sbi)1773 static inline struct f2fs_sm_info *SM_I(struct f2fs_sb_info *sbi)
1774 {
1775 	return (struct f2fs_sm_info *)(sbi->sm_info);
1776 }
1777 
SIT_I(struct f2fs_sb_info * sbi)1778 static inline struct sit_info *SIT_I(struct f2fs_sb_info *sbi)
1779 {
1780 	return (struct sit_info *)(SM_I(sbi)->sit_info);
1781 }
1782 
FREE_I(struct f2fs_sb_info * sbi)1783 static inline struct free_segmap_info *FREE_I(struct f2fs_sb_info *sbi)
1784 {
1785 	return (struct free_segmap_info *)(SM_I(sbi)->free_info);
1786 }
1787 
DIRTY_I(struct f2fs_sb_info * sbi)1788 static inline struct dirty_seglist_info *DIRTY_I(struct f2fs_sb_info *sbi)
1789 {
1790 	return (struct dirty_seglist_info *)(SM_I(sbi)->dirty_info);
1791 }
1792 
META_MAPPING(struct f2fs_sb_info * sbi)1793 static inline struct address_space *META_MAPPING(struct f2fs_sb_info *sbi)
1794 {
1795 	return sbi->meta_inode->i_mapping;
1796 }
1797 
NODE_MAPPING(struct f2fs_sb_info * sbi)1798 static inline struct address_space *NODE_MAPPING(struct f2fs_sb_info *sbi)
1799 {
1800 	return sbi->node_inode->i_mapping;
1801 }
1802 
is_sbi_flag_set(struct f2fs_sb_info * sbi,unsigned int type)1803 static inline bool is_sbi_flag_set(struct f2fs_sb_info *sbi, unsigned int type)
1804 {
1805 	return test_bit(type, &sbi->s_flag);
1806 }
1807 
set_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)1808 static inline void set_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1809 {
1810 	set_bit(type, &sbi->s_flag);
1811 }
1812 
clear_sbi_flag(struct f2fs_sb_info * sbi,unsigned int type)1813 static inline void clear_sbi_flag(struct f2fs_sb_info *sbi, unsigned int type)
1814 {
1815 	clear_bit(type, &sbi->s_flag);
1816 }
1817 
cur_cp_version(struct f2fs_checkpoint * cp)1818 static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
1819 {
1820 	return le64_to_cpu(cp->checkpoint_ver);
1821 }
1822 
f2fs_qf_ino(struct super_block * sb,int type)1823 static inline unsigned long f2fs_qf_ino(struct super_block *sb, int type)
1824 {
1825 	if (type < F2FS_MAX_QUOTAS)
1826 		return le32_to_cpu(F2FS_SB(sb)->raw_super->qf_ino[type]);
1827 	return 0;
1828 }
1829 
cur_cp_crc(struct f2fs_checkpoint * cp)1830 static inline __u64 cur_cp_crc(struct f2fs_checkpoint *cp)
1831 {
1832 	size_t crc_offset = le32_to_cpu(cp->checksum_offset);
1833 	return le32_to_cpu(*((__le32 *)((unsigned char *)cp + crc_offset)));
1834 }
1835 
__is_set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1836 static inline bool __is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1837 {
1838 	unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1839 
1840 	return ckpt_flags & f;
1841 }
1842 
is_set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1843 static inline bool is_set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1844 {
1845 	return __is_set_ckpt_flags(F2FS_CKPT(sbi), f);
1846 }
1847 
__set_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1848 static inline void __set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1849 {
1850 	unsigned int ckpt_flags;
1851 
1852 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1853 	ckpt_flags |= f;
1854 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1855 }
1856 
set_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1857 static inline void set_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1858 {
1859 	unsigned long flags;
1860 
1861 	spin_lock_irqsave(&sbi->cp_lock, flags);
1862 	__set_ckpt_flags(F2FS_CKPT(sbi), f);
1863 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1864 }
1865 
__clear_ckpt_flags(struct f2fs_checkpoint * cp,unsigned int f)1866 static inline void __clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
1867 {
1868 	unsigned int ckpt_flags;
1869 
1870 	ckpt_flags = le32_to_cpu(cp->ckpt_flags);
1871 	ckpt_flags &= (~f);
1872 	cp->ckpt_flags = cpu_to_le32(ckpt_flags);
1873 }
1874 
clear_ckpt_flags(struct f2fs_sb_info * sbi,unsigned int f)1875 static inline void clear_ckpt_flags(struct f2fs_sb_info *sbi, unsigned int f)
1876 {
1877 	unsigned long flags;
1878 
1879 	spin_lock_irqsave(&sbi->cp_lock, flags);
1880 	__clear_ckpt_flags(F2FS_CKPT(sbi), f);
1881 	spin_unlock_irqrestore(&sbi->cp_lock, flags);
1882 }
1883 
disable_nat_bits(struct f2fs_sb_info * sbi,bool lock)1884 static inline void disable_nat_bits(struct f2fs_sb_info *sbi, bool lock)
1885 {
1886 	unsigned long flags;
1887 	unsigned char *nat_bits;
1888 
1889 	/*
1890 	 * In order to re-enable nat_bits we need to call fsck.f2fs by
1891 	 * set_sbi_flag(sbi, SBI_NEED_FSCK). But it may give huge cost,
1892 	 * so let's rely on regular fsck or unclean shutdown.
1893 	 */
1894 
1895 	if (lock)
1896 		spin_lock_irqsave(&sbi->cp_lock, flags);
1897 	__clear_ckpt_flags(F2FS_CKPT(sbi), CP_NAT_BITS_FLAG);
1898 	nat_bits = NM_I(sbi)->nat_bits;
1899 	NM_I(sbi)->nat_bits = NULL;
1900 	if (lock)
1901 		spin_unlock_irqrestore(&sbi->cp_lock, flags);
1902 
1903 	kvfree(nat_bits);
1904 }
1905 
enabled_nat_bits(struct f2fs_sb_info * sbi,struct cp_control * cpc)1906 static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi,
1907 					struct cp_control *cpc)
1908 {
1909 	bool set = is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
1910 
1911 	return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set;
1912 }
1913 
f2fs_lock_op(struct f2fs_sb_info * sbi)1914 static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
1915 {
1916 	down_read(&sbi->cp_rwsem);
1917 }
1918 
f2fs_trylock_op(struct f2fs_sb_info * sbi)1919 static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
1920 {
1921 	return down_read_trylock(&sbi->cp_rwsem);
1922 }
1923 
f2fs_unlock_op(struct f2fs_sb_info * sbi)1924 static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
1925 {
1926 	up_read(&sbi->cp_rwsem);
1927 }
1928 
f2fs_lock_all(struct f2fs_sb_info * sbi)1929 static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
1930 {
1931 	down_write(&sbi->cp_rwsem);
1932 }
1933 
f2fs_unlock_all(struct f2fs_sb_info * sbi)1934 static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
1935 {
1936 	up_write(&sbi->cp_rwsem);
1937 }
1938 
__get_cp_reason(struct f2fs_sb_info * sbi)1939 static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
1940 {
1941 	int reason = CP_SYNC;
1942 
1943 	if (test_opt(sbi, FASTBOOT))
1944 		reason = CP_FASTBOOT;
1945 	if (is_sbi_flag_set(sbi, SBI_IS_CLOSE))
1946 		reason = CP_UMOUNT;
1947 	return reason;
1948 }
1949 
__remain_node_summaries(int reason)1950 static inline bool __remain_node_summaries(int reason)
1951 {
1952 	return (reason & (CP_UMOUNT | CP_FASTBOOT));
1953 }
1954 
__exist_node_summaries(struct f2fs_sb_info * sbi)1955 static inline bool __exist_node_summaries(struct f2fs_sb_info *sbi)
1956 {
1957 	return (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG) ||
1958 			is_set_ckpt_flags(sbi, CP_FASTBOOT_FLAG));
1959 }
1960 
1961 /*
1962  * Check whether the inode has blocks or not
1963  */
F2FS_HAS_BLOCKS(struct inode * inode)1964 static inline int F2FS_HAS_BLOCKS(struct inode *inode)
1965 {
1966 	block_t xattr_block = F2FS_I(inode)->i_xattr_nid ? 1 : 0;
1967 
1968 	return (inode->i_blocks >> F2FS_LOG_SECTORS_PER_BLOCK) > xattr_block;
1969 }
1970 
f2fs_has_xattr_block(unsigned int ofs)1971 static inline bool f2fs_has_xattr_block(unsigned int ofs)
1972 {
1973 	return ofs == XATTR_NODE_OFFSET;
1974 }
1975 
__allow_reserved_blocks(struct f2fs_sb_info * sbi,struct inode * inode,bool cap)1976 static inline bool __allow_reserved_blocks(struct f2fs_sb_info *sbi,
1977 					struct inode *inode, bool cap)
1978 {
1979 	if (!inode)
1980 		return true;
1981 	if (!test_opt(sbi, RESERVE_ROOT))
1982 		return false;
1983 	if (IS_NOQUOTA(inode))
1984 		return true;
1985 	if (uid_eq(F2FS_OPTION(sbi).s_resuid, current_fsuid()))
1986 		return true;
1987 	if (!gid_eq(F2FS_OPTION(sbi).s_resgid, GLOBAL_ROOT_GID) &&
1988 					in_group_p(F2FS_OPTION(sbi).s_resgid))
1989 		return true;
1990 	if (cap && capable(CAP_SYS_RESOURCE))
1991 		return true;
1992 	return false;
1993 }
1994 
1995 static inline void f2fs_i_blocks_write(struct inode *, block_t, bool, bool);
inc_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,blkcnt_t * count)1996 static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
1997 				 struct inode *inode, blkcnt_t *count)
1998 {
1999 	blkcnt_t diff = 0, release = 0;
2000 	block_t avail_user_block_count;
2001 	int ret;
2002 
2003 	ret = dquot_reserve_block(inode, *count);
2004 	if (ret)
2005 		return ret;
2006 
2007 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2008 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2009 		release = *count;
2010 		goto release_quota;
2011 	}
2012 
2013 	/*
2014 	 * let's increase this in prior to actual block count change in order
2015 	 * for f2fs_sync_file to avoid data races when deciding checkpoint.
2016 	 */
2017 	percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
2018 
2019 	spin_lock(&sbi->stat_lock);
2020 	sbi->total_valid_block_count += (block_t)(*count);
2021 	avail_user_block_count = sbi->user_block_count -
2022 					sbi->current_reserved_blocks;
2023 
2024 	if (!__allow_reserved_blocks(sbi, inode, true))
2025 		avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
2026 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2027 		if (avail_user_block_count > sbi->unusable_block_count)
2028 			avail_user_block_count -= sbi->unusable_block_count;
2029 		else
2030 			avail_user_block_count = 0;
2031 	}
2032 	if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
2033 		diff = sbi->total_valid_block_count - avail_user_block_count;
2034 		if (diff > *count)
2035 			diff = *count;
2036 		*count -= diff;
2037 		release = diff;
2038 		sbi->total_valid_block_count -= diff;
2039 		if (!*count) {
2040 			spin_unlock(&sbi->stat_lock);
2041 			goto enospc;
2042 		}
2043 	}
2044 	spin_unlock(&sbi->stat_lock);
2045 
2046 	if (unlikely(release)) {
2047 		percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2048 		dquot_release_reservation_block(inode, release);
2049 	}
2050 	f2fs_i_blocks_write(inode, *count, true, true);
2051 	return 0;
2052 
2053 enospc:
2054 	percpu_counter_sub(&sbi->alloc_valid_block_count, release);
2055 release_quota:
2056 	dquot_release_reservation_block(inode, release);
2057 	return -ENOSPC;
2058 }
2059 
2060 __printf(2, 3)
2061 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
2062 
2063 #define f2fs_err(sbi, fmt, ...)						\
2064 	f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
2065 #define f2fs_warn(sbi, fmt, ...)					\
2066 	f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
2067 #define f2fs_notice(sbi, fmt, ...)					\
2068 	f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
2069 #define f2fs_info(sbi, fmt, ...)					\
2070 	f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
2071 #define f2fs_debug(sbi, fmt, ...)					\
2072 	f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
2073 
dec_valid_block_count(struct f2fs_sb_info * sbi,struct inode * inode,block_t count)2074 static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
2075 						struct inode *inode,
2076 						block_t count)
2077 {
2078 	blkcnt_t sectors = count << F2FS_LOG_SECTORS_PER_BLOCK;
2079 
2080 	spin_lock(&sbi->stat_lock);
2081 	f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
2082 	sbi->total_valid_block_count -= (block_t)count;
2083 	if (sbi->reserved_blocks &&
2084 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2085 		sbi->current_reserved_blocks = min(sbi->reserved_blocks,
2086 					sbi->current_reserved_blocks + count);
2087 	spin_unlock(&sbi->stat_lock);
2088 	if (unlikely(inode->i_blocks < sectors)) {
2089 		f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
2090 			  inode->i_ino,
2091 			  (unsigned long long)inode->i_blocks,
2092 			  (unsigned long long)sectors);
2093 		set_sbi_flag(sbi, SBI_NEED_FSCK);
2094 		return;
2095 	}
2096 	f2fs_i_blocks_write(inode, count, false, true);
2097 }
2098 
inc_page_count(struct f2fs_sb_info * sbi,int count_type)2099 static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
2100 {
2101 	atomic_inc(&sbi->nr_pages[count_type]);
2102 
2103 	if (count_type == F2FS_DIRTY_DENTS ||
2104 			count_type == F2FS_DIRTY_NODES ||
2105 			count_type == F2FS_DIRTY_META ||
2106 			count_type == F2FS_DIRTY_QDATA ||
2107 			count_type == F2FS_DIRTY_IMETA)
2108 		set_sbi_flag(sbi, SBI_IS_DIRTY);
2109 }
2110 
inode_inc_dirty_pages(struct inode * inode)2111 static inline void inode_inc_dirty_pages(struct inode *inode)
2112 {
2113 	atomic_inc(&F2FS_I(inode)->dirty_pages);
2114 	inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2115 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2116 	if (IS_NOQUOTA(inode))
2117 		inc_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2118 }
2119 
dec_page_count(struct f2fs_sb_info * sbi,int count_type)2120 static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type)
2121 {
2122 	atomic_dec(&sbi->nr_pages[count_type]);
2123 }
2124 
inode_dec_dirty_pages(struct inode * inode)2125 static inline void inode_dec_dirty_pages(struct inode *inode)
2126 {
2127 	if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) &&
2128 			!S_ISLNK(inode->i_mode))
2129 		return;
2130 
2131 	atomic_dec(&F2FS_I(inode)->dirty_pages);
2132 	dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ?
2133 				F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA);
2134 	if (IS_NOQUOTA(inode))
2135 		dec_page_count(F2FS_I_SB(inode), F2FS_DIRTY_QDATA);
2136 }
2137 
get_pages(struct f2fs_sb_info * sbi,int count_type)2138 static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type)
2139 {
2140 	return atomic_read(&sbi->nr_pages[count_type]);
2141 }
2142 
get_dirty_pages(struct inode * inode)2143 static inline int get_dirty_pages(struct inode *inode)
2144 {
2145 	return atomic_read(&F2FS_I(inode)->dirty_pages);
2146 }
2147 
get_blocktype_secs(struct f2fs_sb_info * sbi,int block_type)2148 static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type)
2149 {
2150 	unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg;
2151 	unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >>
2152 						sbi->log_blocks_per_seg;
2153 
2154 	return segs / sbi->segs_per_sec;
2155 }
2156 
valid_user_blocks(struct f2fs_sb_info * sbi)2157 static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi)
2158 {
2159 	return sbi->total_valid_block_count;
2160 }
2161 
discard_blocks(struct f2fs_sb_info * sbi)2162 static inline block_t discard_blocks(struct f2fs_sb_info *sbi)
2163 {
2164 	return sbi->discard_blks;
2165 }
2166 
__bitmap_size(struct f2fs_sb_info * sbi,int flag)2167 static inline unsigned long __bitmap_size(struct f2fs_sb_info *sbi, int flag)
2168 {
2169 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2170 
2171 	/* return NAT or SIT bitmap */
2172 	if (flag == NAT_BITMAP)
2173 		return le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
2174 	else if (flag == SIT_BITMAP)
2175 		return le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
2176 
2177 	return 0;
2178 }
2179 
__cp_payload(struct f2fs_sb_info * sbi)2180 static inline block_t __cp_payload(struct f2fs_sb_info *sbi)
2181 {
2182 	return le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_payload);
2183 }
2184 
__bitmap_ptr(struct f2fs_sb_info * sbi,int flag)2185 static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
2186 {
2187 	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2188 	int offset;
2189 
2190 	if (is_set_ckpt_flags(sbi, CP_LARGE_NAT_BITMAP_FLAG)) {
2191 		offset = (flag == SIT_BITMAP) ?
2192 			le32_to_cpu(ckpt->nat_ver_bitmap_bytesize) : 0;
2193 		/*
2194 		 * if large_nat_bitmap feature is enabled, leave checksum
2195 		 * protection for all nat/sit bitmaps.
2196 		 */
2197 		return &ckpt->sit_nat_version_bitmap + offset + sizeof(__le32);
2198 	}
2199 
2200 	if (__cp_payload(sbi) > 0) {
2201 		if (flag == NAT_BITMAP)
2202 			return &ckpt->sit_nat_version_bitmap;
2203 		else
2204 			return (unsigned char *)ckpt + F2FS_BLKSIZE;
2205 	} else {
2206 		offset = (flag == NAT_BITMAP) ?
2207 			le32_to_cpu(ckpt->sit_ver_bitmap_bytesize) : 0;
2208 		return &ckpt->sit_nat_version_bitmap + offset;
2209 	}
2210 }
2211 
__start_cp_addr(struct f2fs_sb_info * sbi)2212 static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
2213 {
2214 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2215 
2216 	if (sbi->cur_cp_pack == 2)
2217 		start_addr += sbi->blocks_per_seg;
2218 	return start_addr;
2219 }
2220 
__start_cp_next_addr(struct f2fs_sb_info * sbi)2221 static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
2222 {
2223 	block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
2224 
2225 	if (sbi->cur_cp_pack == 1)
2226 		start_addr += sbi->blocks_per_seg;
2227 	return start_addr;
2228 }
2229 
__set_cp_next_pack(struct f2fs_sb_info * sbi)2230 static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
2231 {
2232 	sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
2233 }
2234 
__start_sum_addr(struct f2fs_sb_info * sbi)2235 static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
2236 {
2237 	return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
2238 }
2239 
inc_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2240 static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
2241 					struct inode *inode, bool is_inode)
2242 {
2243 	block_t	valid_block_count;
2244 	unsigned int valid_node_count, user_block_count;
2245 	int err;
2246 
2247 	if (is_inode) {
2248 		if (inode) {
2249 			err = dquot_alloc_inode(inode);
2250 			if (err)
2251 				return err;
2252 		}
2253 	} else {
2254 		err = dquot_reserve_block(inode, 1);
2255 		if (err)
2256 			return err;
2257 	}
2258 
2259 	if (time_to_inject(sbi, FAULT_BLOCK)) {
2260 		f2fs_show_injection_info(sbi, FAULT_BLOCK);
2261 		goto enospc;
2262 	}
2263 
2264 	spin_lock(&sbi->stat_lock);
2265 
2266 	valid_block_count = sbi->total_valid_block_count +
2267 					sbi->current_reserved_blocks + 1;
2268 
2269 	if (!__allow_reserved_blocks(sbi, inode, false))
2270 		valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2271 	user_block_count = sbi->user_block_count;
2272 	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2273 		user_block_count -= sbi->unusable_block_count;
2274 
2275 	if (unlikely(valid_block_count > user_block_count)) {
2276 		spin_unlock(&sbi->stat_lock);
2277 		goto enospc;
2278 	}
2279 
2280 	valid_node_count = sbi->total_valid_node_count + 1;
2281 	if (unlikely(valid_node_count > sbi->total_node_count)) {
2282 		spin_unlock(&sbi->stat_lock);
2283 		goto enospc;
2284 	}
2285 
2286 	sbi->total_valid_node_count++;
2287 	sbi->total_valid_block_count++;
2288 	spin_unlock(&sbi->stat_lock);
2289 
2290 	if (inode) {
2291 		if (is_inode)
2292 			f2fs_mark_inode_dirty_sync(inode, true);
2293 		else
2294 			f2fs_i_blocks_write(inode, 1, true, true);
2295 	}
2296 
2297 	percpu_counter_inc(&sbi->alloc_valid_block_count);
2298 	return 0;
2299 
2300 enospc:
2301 	if (is_inode) {
2302 		if (inode)
2303 			dquot_free_inode(inode);
2304 	} else {
2305 		dquot_release_reservation_block(inode, 1);
2306 	}
2307 	return -ENOSPC;
2308 }
2309 
dec_valid_node_count(struct f2fs_sb_info * sbi,struct inode * inode,bool is_inode)2310 static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2311 					struct inode *inode, bool is_inode)
2312 {
2313 	spin_lock(&sbi->stat_lock);
2314 
2315 	f2fs_bug_on(sbi, !sbi->total_valid_block_count);
2316 	f2fs_bug_on(sbi, !sbi->total_valid_node_count);
2317 
2318 	sbi->total_valid_node_count--;
2319 	sbi->total_valid_block_count--;
2320 	if (sbi->reserved_blocks &&
2321 		sbi->current_reserved_blocks < sbi->reserved_blocks)
2322 		sbi->current_reserved_blocks++;
2323 
2324 	spin_unlock(&sbi->stat_lock);
2325 
2326 	if (is_inode) {
2327 		dquot_free_inode(inode);
2328 	} else {
2329 		if (unlikely(inode->i_blocks == 0)) {
2330 			f2fs_warn(sbi, "dec_valid_node_count: inconsistent i_blocks, ino:%lu, iblocks:%llu",
2331 				  inode->i_ino,
2332 				  (unsigned long long)inode->i_blocks);
2333 			set_sbi_flag(sbi, SBI_NEED_FSCK);
2334 			return;
2335 		}
2336 		f2fs_i_blocks_write(inode, 1, false, true);
2337 	}
2338 }
2339 
valid_node_count(struct f2fs_sb_info * sbi)2340 static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi)
2341 {
2342 	return sbi->total_valid_node_count;
2343 }
2344 
inc_valid_inode_count(struct f2fs_sb_info * sbi)2345 static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi)
2346 {
2347 	percpu_counter_inc(&sbi->total_valid_inode_count);
2348 }
2349 
dec_valid_inode_count(struct f2fs_sb_info * sbi)2350 static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi)
2351 {
2352 	percpu_counter_dec(&sbi->total_valid_inode_count);
2353 }
2354 
valid_inode_count(struct f2fs_sb_info * sbi)2355 static inline s64 valid_inode_count(struct f2fs_sb_info *sbi)
2356 {
2357 	return percpu_counter_sum_positive(&sbi->total_valid_inode_count);
2358 }
2359 
f2fs_grab_cache_page(struct address_space * mapping,pgoff_t index,bool for_write)2360 static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
2361 						pgoff_t index, bool for_write)
2362 {
2363 	struct page *page;
2364 
2365 	if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
2366 		if (!for_write)
2367 			page = find_get_page_flags(mapping, index,
2368 							FGP_LOCK | FGP_ACCESSED);
2369 		else
2370 			page = find_lock_page(mapping, index);
2371 		if (page)
2372 			return page;
2373 
2374 		if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
2375 			f2fs_show_injection_info(F2FS_M_SB(mapping),
2376 							FAULT_PAGE_ALLOC);
2377 			return NULL;
2378 		}
2379 	}
2380 
2381 	if (!for_write)
2382 		return grab_cache_page(mapping, index);
2383 	return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
2384 }
2385 
f2fs_pagecache_get_page(struct address_space * mapping,pgoff_t index,int fgp_flags,gfp_t gfp_mask)2386 static inline struct page *f2fs_pagecache_get_page(
2387 				struct address_space *mapping, pgoff_t index,
2388 				int fgp_flags, gfp_t gfp_mask)
2389 {
2390 	if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
2391 		f2fs_show_injection_info(F2FS_M_SB(mapping), FAULT_PAGE_GET);
2392 		return NULL;
2393 	}
2394 
2395 	return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
2396 }
2397 
f2fs_copy_page(struct page * src,struct page * dst)2398 static inline void f2fs_copy_page(struct page *src, struct page *dst)
2399 {
2400 	char *src_kaddr = kmap(src);
2401 	char *dst_kaddr = kmap(dst);
2402 
2403 	memcpy(dst_kaddr, src_kaddr, PAGE_SIZE);
2404 	kunmap(dst);
2405 	kunmap(src);
2406 }
2407 
f2fs_put_page(struct page * page,int unlock)2408 static inline void f2fs_put_page(struct page *page, int unlock)
2409 {
2410 	if (!page)
2411 		return;
2412 
2413 	if (unlock) {
2414 		f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page));
2415 		unlock_page(page);
2416 	}
2417 	put_page(page);
2418 }
2419 
f2fs_put_dnode(struct dnode_of_data * dn)2420 static inline void f2fs_put_dnode(struct dnode_of_data *dn)
2421 {
2422 	if (dn->node_page)
2423 		f2fs_put_page(dn->node_page, 1);
2424 	if (dn->inode_page && dn->node_page != dn->inode_page)
2425 		f2fs_put_page(dn->inode_page, 0);
2426 	dn->node_page = NULL;
2427 	dn->inode_page = NULL;
2428 }
2429 
f2fs_kmem_cache_create(const char * name,size_t size)2430 static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
2431 					size_t size)
2432 {
2433 	return kmem_cache_create(name, size, 0, SLAB_RECLAIM_ACCOUNT, NULL);
2434 }
2435 
f2fs_kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)2436 static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep,
2437 						gfp_t flags)
2438 {
2439 	void *entry;
2440 
2441 	entry = kmem_cache_alloc(cachep, flags);
2442 	if (!entry)
2443 		entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL);
2444 	return entry;
2445 }
2446 
is_idle(struct f2fs_sb_info * sbi,int type)2447 static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2448 {
2449 	if (sbi->gc_mode == GC_URGENT_HIGH)
2450 		return true;
2451 
2452 	if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2453 		get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2454 		get_pages(sbi, F2FS_WB_CP_DATA) ||
2455 		get_pages(sbi, F2FS_DIO_READ) ||
2456 		get_pages(sbi, F2FS_DIO_WRITE))
2457 		return false;
2458 
2459 	if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2460 			atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2461 		return false;
2462 
2463 	if (SM_I(sbi) && SM_I(sbi)->fcc_info &&
2464 			atomic_read(&SM_I(sbi)->fcc_info->queued_flush))
2465 		return false;
2466 
2467 	if (sbi->gc_mode == GC_URGENT_LOW &&
2468 			(type == DISCARD_TIME || type == GC_TIME))
2469 		return true;
2470 
2471 	return f2fs_time_over(sbi, type);
2472 }
2473 
f2fs_radix_tree_insert(struct radix_tree_root * root,unsigned long index,void * item)2474 static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
2475 				unsigned long index, void *item)
2476 {
2477 	while (radix_tree_insert(root, index, item))
2478 		cond_resched();
2479 }
2480 
2481 #define RAW_IS_INODE(p)	((p)->footer.nid == (p)->footer.ino)
2482 
IS_INODE(struct page * page)2483 static inline bool IS_INODE(struct page *page)
2484 {
2485 	struct f2fs_node *p = F2FS_NODE(page);
2486 
2487 	return RAW_IS_INODE(p);
2488 }
2489 
offset_in_addr(struct f2fs_inode * i)2490 static inline int offset_in_addr(struct f2fs_inode *i)
2491 {
2492 	return (i->i_inline & F2FS_EXTRA_ATTR) ?
2493 			(le16_to_cpu(i->i_extra_isize) / sizeof(__le32)) : 0;
2494 }
2495 
blkaddr_in_node(struct f2fs_node * node)2496 static inline __le32 *blkaddr_in_node(struct f2fs_node *node)
2497 {
2498 	return RAW_IS_INODE(node) ? node->i.i_addr : node->dn.addr;
2499 }
2500 
2501 static inline int f2fs_has_extra_attr(struct inode *inode);
data_blkaddr(struct inode * inode,struct page * node_page,unsigned int offset)2502 static inline block_t data_blkaddr(struct inode *inode,
2503 			struct page *node_page, unsigned int offset)
2504 {
2505 	struct f2fs_node *raw_node;
2506 	__le32 *addr_array;
2507 	int base = 0;
2508 	bool is_inode = IS_INODE(node_page);
2509 
2510 	raw_node = F2FS_NODE(node_page);
2511 
2512 	if (is_inode) {
2513 		if (!inode)
2514 			/* from GC path only */
2515 			base = offset_in_addr(&raw_node->i);
2516 		else if (f2fs_has_extra_attr(inode))
2517 			base = get_extra_isize(inode);
2518 	}
2519 
2520 	addr_array = blkaddr_in_node(raw_node);
2521 	return le32_to_cpu(addr_array[base + offset]);
2522 }
2523 
f2fs_data_blkaddr(struct dnode_of_data * dn)2524 static inline block_t f2fs_data_blkaddr(struct dnode_of_data *dn)
2525 {
2526 	return data_blkaddr(dn->inode, dn->node_page, dn->ofs_in_node);
2527 }
2528 
f2fs_test_bit(unsigned int nr,char * addr)2529 static inline int f2fs_test_bit(unsigned int nr, char *addr)
2530 {
2531 	int mask;
2532 
2533 	addr += (nr >> 3);
2534 	mask = 1 << (7 - (nr & 0x07));
2535 	return mask & *addr;
2536 }
2537 
f2fs_set_bit(unsigned int nr,char * addr)2538 static inline void f2fs_set_bit(unsigned int nr, char *addr)
2539 {
2540 	int mask;
2541 
2542 	addr += (nr >> 3);
2543 	mask = 1 << (7 - (nr & 0x07));
2544 	*addr |= mask;
2545 }
2546 
f2fs_clear_bit(unsigned int nr,char * addr)2547 static inline void f2fs_clear_bit(unsigned int nr, char *addr)
2548 {
2549 	int mask;
2550 
2551 	addr += (nr >> 3);
2552 	mask = 1 << (7 - (nr & 0x07));
2553 	*addr &= ~mask;
2554 }
2555 
f2fs_test_and_set_bit(unsigned int nr,char * addr)2556 static inline int f2fs_test_and_set_bit(unsigned int nr, char *addr)
2557 {
2558 	int mask;
2559 	int ret;
2560 
2561 	addr += (nr >> 3);
2562 	mask = 1 << (7 - (nr & 0x07));
2563 	ret = mask & *addr;
2564 	*addr |= mask;
2565 	return ret;
2566 }
2567 
f2fs_test_and_clear_bit(unsigned int nr,char * addr)2568 static inline int f2fs_test_and_clear_bit(unsigned int nr, char *addr)
2569 {
2570 	int mask;
2571 	int ret;
2572 
2573 	addr += (nr >> 3);
2574 	mask = 1 << (7 - (nr & 0x07));
2575 	ret = mask & *addr;
2576 	*addr &= ~mask;
2577 	return ret;
2578 }
2579 
f2fs_change_bit(unsigned int nr,char * addr)2580 static inline void f2fs_change_bit(unsigned int nr, char *addr)
2581 {
2582 	int mask;
2583 
2584 	addr += (nr >> 3);
2585 	mask = 1 << (7 - (nr & 0x07));
2586 	*addr ^= mask;
2587 }
2588 
2589 /*
2590  * On-disk inode flags (f2fs_inode::i_flags)
2591  */
2592 #define F2FS_COMPR_FL			0x00000004 /* Compress file */
2593 #define F2FS_SYNC_FL			0x00000008 /* Synchronous updates */
2594 #define F2FS_IMMUTABLE_FL		0x00000010 /* Immutable file */
2595 #define F2FS_APPEND_FL			0x00000020 /* writes to file may only append */
2596 #define F2FS_NODUMP_FL			0x00000040 /* do not dump file */
2597 #define F2FS_NOATIME_FL			0x00000080 /* do not update atime */
2598 #define F2FS_NOCOMP_FL			0x00000400 /* Don't compress */
2599 #define F2FS_INDEX_FL			0x00001000 /* hash-indexed directory */
2600 #define F2FS_DIRSYNC_FL			0x00010000 /* dirsync behaviour (directories only) */
2601 #define F2FS_PROJINHERIT_FL		0x20000000 /* Create with parents projid */
2602 #define F2FS_CASEFOLD_FL		0x40000000 /* Casefolded file */
2603 
2604 /* Flags that should be inherited by new inodes from their parent. */
2605 #define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2606 			   F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2607 			   F2FS_CASEFOLD_FL | F2FS_COMPR_FL | F2FS_NOCOMP_FL)
2608 
2609 /* Flags that are appropriate for regular files (all but dir-specific ones). */
2610 #define F2FS_REG_FLMASK		(~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL | \
2611 				F2FS_CASEFOLD_FL))
2612 
2613 /* Flags that are appropriate for non-directories/regular files. */
2614 #define F2FS_OTHER_FLMASK	(F2FS_NODUMP_FL | F2FS_NOATIME_FL)
2615 
f2fs_mask_flags(umode_t mode,__u32 flags)2616 static inline __u32 f2fs_mask_flags(umode_t mode, __u32 flags)
2617 {
2618 	if (S_ISDIR(mode))
2619 		return flags;
2620 	else if (S_ISREG(mode))
2621 		return flags & F2FS_REG_FLMASK;
2622 	else
2623 		return flags & F2FS_OTHER_FLMASK;
2624 }
2625 
__mark_inode_dirty_flag(struct inode * inode,int flag,bool set)2626 static inline void __mark_inode_dirty_flag(struct inode *inode,
2627 						int flag, bool set)
2628 {
2629 	switch (flag) {
2630 	case FI_INLINE_XATTR:
2631 	case FI_INLINE_DATA:
2632 	case FI_INLINE_DENTRY:
2633 	case FI_NEW_INODE:
2634 		if (set)
2635 			return;
2636 		fallthrough;
2637 	case FI_DATA_EXIST:
2638 	case FI_INLINE_DOTS:
2639 	case FI_PIN_FILE:
2640 		f2fs_mark_inode_dirty_sync(inode, true);
2641 	}
2642 }
2643 
set_inode_flag(struct inode * inode,int flag)2644 static inline void set_inode_flag(struct inode *inode, int flag)
2645 {
2646 	set_bit(flag, F2FS_I(inode)->flags);
2647 	__mark_inode_dirty_flag(inode, flag, true);
2648 }
2649 
is_inode_flag_set(struct inode * inode,int flag)2650 static inline int is_inode_flag_set(struct inode *inode, int flag)
2651 {
2652 	return test_bit(flag, F2FS_I(inode)->flags);
2653 }
2654 
clear_inode_flag(struct inode * inode,int flag)2655 static inline void clear_inode_flag(struct inode *inode, int flag)
2656 {
2657 	clear_bit(flag, F2FS_I(inode)->flags);
2658 	__mark_inode_dirty_flag(inode, flag, false);
2659 }
2660 
f2fs_verity_in_progress(struct inode * inode)2661 static inline bool f2fs_verity_in_progress(struct inode *inode)
2662 {
2663 	return IS_ENABLED(CONFIG_FS_VERITY) &&
2664 	       is_inode_flag_set(inode, FI_VERITY_IN_PROGRESS);
2665 }
2666 
set_acl_inode(struct inode * inode,umode_t mode)2667 static inline void set_acl_inode(struct inode *inode, umode_t mode)
2668 {
2669 	F2FS_I(inode)->i_acl_mode = mode;
2670 	set_inode_flag(inode, FI_ACL_MODE);
2671 	f2fs_mark_inode_dirty_sync(inode, false);
2672 }
2673 
f2fs_i_links_write(struct inode * inode,bool inc)2674 static inline void f2fs_i_links_write(struct inode *inode, bool inc)
2675 {
2676 	if (inc)
2677 		inc_nlink(inode);
2678 	else
2679 		drop_nlink(inode);
2680 	f2fs_mark_inode_dirty_sync(inode, true);
2681 }
2682 
f2fs_i_blocks_write(struct inode * inode,block_t diff,bool add,bool claim)2683 static inline void f2fs_i_blocks_write(struct inode *inode,
2684 					block_t diff, bool add, bool claim)
2685 {
2686 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2687 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2688 
2689 	/* add = 1, claim = 1 should be dquot_reserve_block in pair */
2690 	if (add) {
2691 		if (claim)
2692 			dquot_claim_block(inode, diff);
2693 		else
2694 			dquot_alloc_block_nofail(inode, diff);
2695 	} else {
2696 		dquot_free_block(inode, diff);
2697 	}
2698 
2699 	f2fs_mark_inode_dirty_sync(inode, true);
2700 	if (clean || recover)
2701 		set_inode_flag(inode, FI_AUTO_RECOVER);
2702 }
2703 
f2fs_i_size_write(struct inode * inode,loff_t i_size)2704 static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
2705 {
2706 	bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
2707 	bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
2708 
2709 	if (i_size_read(inode) == i_size)
2710 		return;
2711 
2712 	i_size_write(inode, i_size);
2713 	f2fs_mark_inode_dirty_sync(inode, true);
2714 	if (clean || recover)
2715 		set_inode_flag(inode, FI_AUTO_RECOVER);
2716 }
2717 
f2fs_i_depth_write(struct inode * inode,unsigned int depth)2718 static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
2719 {
2720 	F2FS_I(inode)->i_current_depth = depth;
2721 	f2fs_mark_inode_dirty_sync(inode, true);
2722 }
2723 
f2fs_i_gc_failures_write(struct inode * inode,unsigned int count)2724 static inline void f2fs_i_gc_failures_write(struct inode *inode,
2725 					unsigned int count)
2726 {
2727 	F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = count;
2728 	f2fs_mark_inode_dirty_sync(inode, true);
2729 }
2730 
f2fs_i_xnid_write(struct inode * inode,nid_t xnid)2731 static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
2732 {
2733 	F2FS_I(inode)->i_xattr_nid = xnid;
2734 	f2fs_mark_inode_dirty_sync(inode, true);
2735 }
2736 
f2fs_i_pino_write(struct inode * inode,nid_t pino)2737 static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
2738 {
2739 	F2FS_I(inode)->i_pino = pino;
2740 	f2fs_mark_inode_dirty_sync(inode, true);
2741 }
2742 
get_inline_info(struct inode * inode,struct f2fs_inode * ri)2743 static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
2744 {
2745 	struct f2fs_inode_info *fi = F2FS_I(inode);
2746 
2747 	if (ri->i_inline & F2FS_INLINE_XATTR)
2748 		set_bit(FI_INLINE_XATTR, fi->flags);
2749 	if (ri->i_inline & F2FS_INLINE_DATA)
2750 		set_bit(FI_INLINE_DATA, fi->flags);
2751 	if (ri->i_inline & F2FS_INLINE_DENTRY)
2752 		set_bit(FI_INLINE_DENTRY, fi->flags);
2753 	if (ri->i_inline & F2FS_DATA_EXIST)
2754 		set_bit(FI_DATA_EXIST, fi->flags);
2755 	if (ri->i_inline & F2FS_INLINE_DOTS)
2756 		set_bit(FI_INLINE_DOTS, fi->flags);
2757 	if (ri->i_inline & F2FS_EXTRA_ATTR)
2758 		set_bit(FI_EXTRA_ATTR, fi->flags);
2759 	if (ri->i_inline & F2FS_PIN_FILE)
2760 		set_bit(FI_PIN_FILE, fi->flags);
2761 }
2762 
set_raw_inline(struct inode * inode,struct f2fs_inode * ri)2763 static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
2764 {
2765 	ri->i_inline = 0;
2766 
2767 	if (is_inode_flag_set(inode, FI_INLINE_XATTR))
2768 		ri->i_inline |= F2FS_INLINE_XATTR;
2769 	if (is_inode_flag_set(inode, FI_INLINE_DATA))
2770 		ri->i_inline |= F2FS_INLINE_DATA;
2771 	if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
2772 		ri->i_inline |= F2FS_INLINE_DENTRY;
2773 	if (is_inode_flag_set(inode, FI_DATA_EXIST))
2774 		ri->i_inline |= F2FS_DATA_EXIST;
2775 	if (is_inode_flag_set(inode, FI_INLINE_DOTS))
2776 		ri->i_inline |= F2FS_INLINE_DOTS;
2777 	if (is_inode_flag_set(inode, FI_EXTRA_ATTR))
2778 		ri->i_inline |= F2FS_EXTRA_ATTR;
2779 	if (is_inode_flag_set(inode, FI_PIN_FILE))
2780 		ri->i_inline |= F2FS_PIN_FILE;
2781 }
2782 
f2fs_has_extra_attr(struct inode * inode)2783 static inline int f2fs_has_extra_attr(struct inode *inode)
2784 {
2785 	return is_inode_flag_set(inode, FI_EXTRA_ATTR);
2786 }
2787 
f2fs_has_inline_xattr(struct inode * inode)2788 static inline int f2fs_has_inline_xattr(struct inode *inode)
2789 {
2790 	return is_inode_flag_set(inode, FI_INLINE_XATTR);
2791 }
2792 
f2fs_compressed_file(struct inode * inode)2793 static inline int f2fs_compressed_file(struct inode *inode)
2794 {
2795 	return S_ISREG(inode->i_mode) &&
2796 		is_inode_flag_set(inode, FI_COMPRESSED_FILE);
2797 }
2798 
addrs_per_inode(struct inode * inode)2799 static inline unsigned int addrs_per_inode(struct inode *inode)
2800 {
2801 	unsigned int addrs = CUR_ADDRS_PER_INODE(inode) -
2802 				get_inline_xattr_addrs(inode);
2803 
2804 	if (!f2fs_compressed_file(inode))
2805 		return addrs;
2806 	return ALIGN_DOWN(addrs, F2FS_I(inode)->i_cluster_size);
2807 }
2808 
addrs_per_block(struct inode * inode)2809 static inline unsigned int addrs_per_block(struct inode *inode)
2810 {
2811 	if (!f2fs_compressed_file(inode))
2812 		return DEF_ADDRS_PER_BLOCK;
2813 	return ALIGN_DOWN(DEF_ADDRS_PER_BLOCK, F2FS_I(inode)->i_cluster_size);
2814 }
2815 
inline_xattr_addr(struct inode * inode,struct page * page)2816 static inline void *inline_xattr_addr(struct inode *inode, struct page *page)
2817 {
2818 	struct f2fs_inode *ri = F2FS_INODE(page);
2819 
2820 	return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
2821 					get_inline_xattr_addrs(inode)]);
2822 }
2823 
inline_xattr_size(struct inode * inode)2824 static inline int inline_xattr_size(struct inode *inode)
2825 {
2826 	if (f2fs_has_inline_xattr(inode))
2827 		return get_inline_xattr_addrs(inode) * sizeof(__le32);
2828 	return 0;
2829 }
2830 
f2fs_has_inline_data(struct inode * inode)2831 static inline int f2fs_has_inline_data(struct inode *inode)
2832 {
2833 	return is_inode_flag_set(inode, FI_INLINE_DATA);
2834 }
2835 
f2fs_exist_data(struct inode * inode)2836 static inline int f2fs_exist_data(struct inode *inode)
2837 {
2838 	return is_inode_flag_set(inode, FI_DATA_EXIST);
2839 }
2840 
f2fs_has_inline_dots(struct inode * inode)2841 static inline int f2fs_has_inline_dots(struct inode *inode)
2842 {
2843 	return is_inode_flag_set(inode, FI_INLINE_DOTS);
2844 }
2845 
f2fs_is_mmap_file(struct inode * inode)2846 static inline int f2fs_is_mmap_file(struct inode *inode)
2847 {
2848 	return is_inode_flag_set(inode, FI_MMAP_FILE);
2849 }
2850 
f2fs_is_pinned_file(struct inode * inode)2851 static inline bool f2fs_is_pinned_file(struct inode *inode)
2852 {
2853 	return is_inode_flag_set(inode, FI_PIN_FILE);
2854 }
2855 
f2fs_is_atomic_file(struct inode * inode)2856 static inline bool f2fs_is_atomic_file(struct inode *inode)
2857 {
2858 	return is_inode_flag_set(inode, FI_ATOMIC_FILE);
2859 }
2860 
f2fs_is_commit_atomic_write(struct inode * inode)2861 static inline bool f2fs_is_commit_atomic_write(struct inode *inode)
2862 {
2863 	return is_inode_flag_set(inode, FI_ATOMIC_COMMIT);
2864 }
2865 
f2fs_is_volatile_file(struct inode * inode)2866 static inline bool f2fs_is_volatile_file(struct inode *inode)
2867 {
2868 	return is_inode_flag_set(inode, FI_VOLATILE_FILE);
2869 }
2870 
f2fs_is_first_block_written(struct inode * inode)2871 static inline bool f2fs_is_first_block_written(struct inode *inode)
2872 {
2873 	return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
2874 }
2875 
f2fs_is_drop_cache(struct inode * inode)2876 static inline bool f2fs_is_drop_cache(struct inode *inode)
2877 {
2878 	return is_inode_flag_set(inode, FI_DROP_CACHE);
2879 }
2880 
inline_data_addr(struct inode * inode,struct page * page)2881 static inline void *inline_data_addr(struct inode *inode, struct page *page)
2882 {
2883 	struct f2fs_inode *ri = F2FS_INODE(page);
2884 	int extra_size = get_extra_isize(inode);
2885 
2886 	return (void *)&(ri->i_addr[extra_size + DEF_INLINE_RESERVED_SIZE]);
2887 }
2888 
f2fs_has_inline_dentry(struct inode * inode)2889 static inline int f2fs_has_inline_dentry(struct inode *inode)
2890 {
2891 	return is_inode_flag_set(inode, FI_INLINE_DENTRY);
2892 }
2893 
is_file(struct inode * inode,int type)2894 static inline int is_file(struct inode *inode, int type)
2895 {
2896 	return F2FS_I(inode)->i_advise & type;
2897 }
2898 
set_file(struct inode * inode,int type)2899 static inline void set_file(struct inode *inode, int type)
2900 {
2901 	F2FS_I(inode)->i_advise |= type;
2902 	f2fs_mark_inode_dirty_sync(inode, true);
2903 }
2904 
clear_file(struct inode * inode,int type)2905 static inline void clear_file(struct inode *inode, int type)
2906 {
2907 	F2FS_I(inode)->i_advise &= ~type;
2908 	f2fs_mark_inode_dirty_sync(inode, true);
2909 }
2910 
f2fs_is_time_consistent(struct inode * inode)2911 static inline bool f2fs_is_time_consistent(struct inode *inode)
2912 {
2913 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time, &inode->i_atime))
2914 		return false;
2915 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 1, &inode->i_ctime))
2916 		return false;
2917 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 2, &inode->i_mtime))
2918 		return false;
2919 	if (!timespec64_equal(F2FS_I(inode)->i_disk_time + 3,
2920 						&F2FS_I(inode)->i_crtime))
2921 		return false;
2922 	return true;
2923 }
2924 
f2fs_skip_inode_update(struct inode * inode,int dsync)2925 static inline bool f2fs_skip_inode_update(struct inode *inode, int dsync)
2926 {
2927 	bool ret;
2928 
2929 	if (dsync) {
2930 		struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2931 
2932 		spin_lock(&sbi->inode_lock[DIRTY_META]);
2933 		ret = list_empty(&F2FS_I(inode)->gdirty_list);
2934 		spin_unlock(&sbi->inode_lock[DIRTY_META]);
2935 		return ret;
2936 	}
2937 	if (!is_inode_flag_set(inode, FI_AUTO_RECOVER) ||
2938 			file_keep_isize(inode) ||
2939 			i_size_read(inode) & ~PAGE_MASK)
2940 		return false;
2941 
2942 	if (!f2fs_is_time_consistent(inode))
2943 		return false;
2944 
2945 	spin_lock(&F2FS_I(inode)->i_size_lock);
2946 	ret = F2FS_I(inode)->last_disk_size == i_size_read(inode);
2947 	spin_unlock(&F2FS_I(inode)->i_size_lock);
2948 
2949 	return ret;
2950 }
2951 
f2fs_readonly(struct super_block * sb)2952 static inline bool f2fs_readonly(struct super_block *sb)
2953 {
2954 	return sb_rdonly(sb);
2955 }
2956 
f2fs_cp_error(struct f2fs_sb_info * sbi)2957 static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
2958 {
2959 	return is_set_ckpt_flags(sbi, CP_ERROR_FLAG);
2960 }
2961 
is_dot_dotdot(const u8 * name,size_t len)2962 static inline bool is_dot_dotdot(const u8 *name, size_t len)
2963 {
2964 	if (len == 1 && name[0] == '.')
2965 		return true;
2966 
2967 	if (len == 2 && name[0] == '.' && name[1] == '.')
2968 		return true;
2969 
2970 	return false;
2971 }
2972 
f2fs_may_extent_tree(struct inode * inode)2973 static inline bool f2fs_may_extent_tree(struct inode *inode)
2974 {
2975 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2976 
2977 	if (!test_opt(sbi, EXTENT_CACHE) ||
2978 			is_inode_flag_set(inode, FI_NO_EXTENT) ||
2979 			is_inode_flag_set(inode, FI_COMPRESSED_FILE))
2980 		return false;
2981 
2982 	/*
2983 	 * for recovered files during mount do not create extents
2984 	 * if shrinker is not registered.
2985 	 */
2986 	if (list_empty(&sbi->s_list))
2987 		return false;
2988 
2989 	return S_ISREG(inode->i_mode);
2990 }
2991 
f2fs_kmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)2992 static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
2993 					size_t size, gfp_t flags)
2994 {
2995 	if (time_to_inject(sbi, FAULT_KMALLOC)) {
2996 		f2fs_show_injection_info(sbi, FAULT_KMALLOC);
2997 		return NULL;
2998 	}
2999 
3000 	return kmalloc(size, flags);
3001 }
3002 
f2fs_kzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3003 static inline void *f2fs_kzalloc(struct f2fs_sb_info *sbi,
3004 					size_t size, gfp_t flags)
3005 {
3006 	return f2fs_kmalloc(sbi, size, flags | __GFP_ZERO);
3007 }
3008 
f2fs_kvmalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3009 static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
3010 					size_t size, gfp_t flags)
3011 {
3012 	if (time_to_inject(sbi, FAULT_KVMALLOC)) {
3013 		f2fs_show_injection_info(sbi, FAULT_KVMALLOC);
3014 		return NULL;
3015 	}
3016 
3017 	return kvmalloc(size, flags);
3018 }
3019 
f2fs_kvzalloc(struct f2fs_sb_info * sbi,size_t size,gfp_t flags)3020 static inline void *f2fs_kvzalloc(struct f2fs_sb_info *sbi,
3021 					size_t size, gfp_t flags)
3022 {
3023 	return f2fs_kvmalloc(sbi, size, flags | __GFP_ZERO);
3024 }
3025 
get_extra_isize(struct inode * inode)3026 static inline int get_extra_isize(struct inode *inode)
3027 {
3028 	return F2FS_I(inode)->i_extra_isize / sizeof(__le32);
3029 }
3030 
get_inline_xattr_addrs(struct inode * inode)3031 static inline int get_inline_xattr_addrs(struct inode *inode)
3032 {
3033 	return F2FS_I(inode)->i_inline_xattr_size;
3034 }
3035 
3036 #define f2fs_get_inode_mode(i) \
3037 	((is_inode_flag_set(i, FI_ACL_MODE)) ? \
3038 	 (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
3039 
3040 #define F2FS_TOTAL_EXTRA_ATTR_SIZE			\
3041 	(offsetof(struct f2fs_inode, i_extra_end) -	\
3042 	offsetof(struct f2fs_inode, i_extra_isize))	\
3043 
3044 #define F2FS_OLD_ATTRIBUTE_SIZE	(offsetof(struct f2fs_inode, i_addr))
3045 #define F2FS_FITS_IN_INODE(f2fs_inode, extra_isize, field)		\
3046 		((offsetof(typeof(*(f2fs_inode)), field) +	\
3047 		sizeof((f2fs_inode)->field))			\
3048 		<= (F2FS_OLD_ATTRIBUTE_SIZE + (extra_isize)))	\
3049 
3050 #define DEFAULT_IOSTAT_PERIOD_MS	3000
3051 #define MIN_IOSTAT_PERIOD_MS		100
3052 /* maximum period of iostat tracing is 1 day */
3053 #define MAX_IOSTAT_PERIOD_MS		8640000
3054 
f2fs_reset_iostat(struct f2fs_sb_info * sbi)3055 static inline void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
3056 {
3057 	int i;
3058 
3059 	spin_lock(&sbi->iostat_lock);
3060 	for (i = 0; i < NR_IO_TYPE; i++) {
3061 		sbi->rw_iostat[i] = 0;
3062 		sbi->prev_rw_iostat[i] = 0;
3063 	}
3064 	spin_unlock(&sbi->iostat_lock);
3065 }
3066 
3067 extern void f2fs_record_iostat(struct f2fs_sb_info *sbi);
3068 
f2fs_update_iostat(struct f2fs_sb_info * sbi,enum iostat_type type,unsigned long long io_bytes)3069 static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi,
3070 			enum iostat_type type, unsigned long long io_bytes)
3071 {
3072 	if (!sbi->iostat_enable)
3073 		return;
3074 	spin_lock(&sbi->iostat_lock);
3075 	sbi->rw_iostat[type] += io_bytes;
3076 
3077 	if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
3078 		sbi->rw_iostat[APP_BUFFERED_IO] =
3079 			sbi->rw_iostat[APP_WRITE_IO] -
3080 			sbi->rw_iostat[APP_DIRECT_IO];
3081 
3082 	if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
3083 		sbi->rw_iostat[APP_BUFFERED_READ_IO] =
3084 			sbi->rw_iostat[APP_READ_IO] -
3085 			sbi->rw_iostat[APP_DIRECT_READ_IO];
3086 	spin_unlock(&sbi->iostat_lock);
3087 
3088 	f2fs_record_iostat(sbi);
3089 }
3090 
fs_free_space_threshold(struct f2fs_sb_info * sbi)3091 static inline block_t fs_free_space_threshold(struct f2fs_sb_info *sbi)
3092 {
3093 	return (block_t)(SM_I(sbi)->main_segments * sbi->blocks_per_seg *
3094 		FS_FREE_SPACE_PERCENT) / HUNDRED_PERCENT;
3095 }
3096 
device_free_space_threshold(struct f2fs_sb_info * sbi)3097 static inline block_t device_free_space_threshold(struct f2fs_sb_info *sbi)
3098 {
3099 	return (block_t)(SM_I(sbi)->main_segments * sbi->blocks_per_seg *
3100 		DEVICE_FREE_SPACE_PERCENT) / HUNDRED_PERCENT;
3101 }
3102 
3103 #define __is_large_section(sbi)		((sbi)->segs_per_sec > 1)
3104 
3105 #define __is_meta_io(fio) (PAGE_TYPE_OF_BIO((fio)->type) == META)
3106 
3107 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3108 					block_t blkaddr, int type);
verify_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)3109 static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
3110 					block_t blkaddr, int type)
3111 {
3112 	if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
3113 		f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
3114 			 blkaddr, type);
3115 		f2fs_bug_on(sbi, 1);
3116 	}
3117 }
3118 
__is_valid_data_blkaddr(block_t blkaddr)3119 static inline bool __is_valid_data_blkaddr(block_t blkaddr)
3120 {
3121 	if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
3122 			blkaddr == COMPRESS_ADDR)
3123 		return false;
3124 	return true;
3125 }
3126 
f2fs_set_page_private(struct page * page,unsigned long data)3127 static inline void f2fs_set_page_private(struct page *page,
3128 						unsigned long data)
3129 {
3130 	if (PagePrivate(page))
3131 		return;
3132 
3133 	attach_page_private(page, (void *)data);
3134 }
3135 
f2fs_clear_page_private(struct page * page)3136 static inline void f2fs_clear_page_private(struct page *page)
3137 {
3138 	detach_page_private(page);
3139 }
3140 
3141 /*
3142  * file.c
3143  */
3144 int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
3145 void f2fs_truncate_data_blocks(struct dnode_of_data *dn);
3146 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock);
3147 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock);
3148 int f2fs_truncate(struct inode *inode);
3149 int f2fs_getattr(const struct path *path, struct kstat *stat,
3150 			u32 request_mask, unsigned int flags);
3151 int f2fs_setattr(struct dentry *dentry, struct iattr *attr);
3152 int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end);
3153 void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count);
3154 int f2fs_precache_extents(struct inode *inode);
3155 long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
3156 long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
3157 int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid);
3158 int f2fs_pin_file_control(struct inode *inode, bool inc);
3159 
3160 /*
3161  * inode.c
3162  */
3163 void f2fs_set_inode_flags(struct inode *inode);
3164 bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page);
3165 void f2fs_inode_chksum_set(struct f2fs_sb_info *sbi, struct page *page);
3166 struct inode *f2fs_iget(struct super_block *sb, unsigned long ino);
3167 struct inode *f2fs_iget_retry(struct super_block *sb, unsigned long ino);
3168 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink);
3169 void f2fs_update_inode(struct inode *inode, struct page *node_page);
3170 void f2fs_update_inode_page(struct inode *inode);
3171 int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc);
3172 void f2fs_evict_inode(struct inode *inode);
3173 void f2fs_handle_failed_inode(struct inode *inode);
3174 
3175 /*
3176  * namei.c
3177  */
3178 int f2fs_update_extension_list(struct f2fs_sb_info *sbi, const char *name,
3179 							bool hot, bool set);
3180 struct dentry *f2fs_get_parent(struct dentry *child);
3181 
3182 /*
3183  * dir.c
3184  */
3185 unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de);
3186 int f2fs_init_casefolded_name(const struct inode *dir,
3187 			      struct f2fs_filename *fname);
3188 int f2fs_setup_filename(struct inode *dir, const struct qstr *iname,
3189 			int lookup, struct f2fs_filename *fname);
3190 int f2fs_prepare_lookup(struct inode *dir, struct dentry *dentry,
3191 			struct f2fs_filename *fname);
3192 void f2fs_free_filename(struct f2fs_filename *fname);
3193 struct f2fs_dir_entry *f2fs_find_target_dentry(const struct f2fs_dentry_ptr *d,
3194 			const struct f2fs_filename *fname, int *max_slots);
3195 int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
3196 			unsigned int start_pos, struct fscrypt_str *fstr);
3197 void f2fs_do_make_empty_dir(struct inode *inode, struct inode *parent,
3198 			struct f2fs_dentry_ptr *d);
3199 struct page *f2fs_init_inode_metadata(struct inode *inode, struct inode *dir,
3200 			const struct f2fs_filename *fname, struct page *dpage);
3201 void f2fs_update_parent_metadata(struct inode *dir, struct inode *inode,
3202 			unsigned int current_depth);
3203 int f2fs_room_for_filename(const void *bitmap, int slots, int max_slots);
3204 void f2fs_drop_nlink(struct inode *dir, struct inode *inode);
3205 struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
3206 					 const struct f2fs_filename *fname,
3207 					 struct page **res_page);
3208 struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3209 			const struct qstr *child, struct page **res_page);
3210 struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p);
3211 ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
3212 			struct page **page);
3213 void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
3214 			struct page *page, struct inode *inode);
3215 bool f2fs_has_enough_room(struct inode *dir, struct page *ipage,
3216 			  const struct f2fs_filename *fname);
3217 void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
3218 			const struct fscrypt_str *name, f2fs_hash_t name_hash,
3219 			unsigned int bit_pos);
3220 int f2fs_add_regular_entry(struct inode *dir, const struct f2fs_filename *fname,
3221 			struct inode *inode, nid_t ino, umode_t mode);
3222 int f2fs_add_dentry(struct inode *dir, const struct f2fs_filename *fname,
3223 			struct inode *inode, nid_t ino, umode_t mode);
3224 int f2fs_do_add_link(struct inode *dir, const struct qstr *name,
3225 			struct inode *inode, nid_t ino, umode_t mode);
3226 void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
3227 			struct inode *dir, struct inode *inode);
3228 int f2fs_do_tmpfile(struct inode *inode, struct inode *dir);
3229 bool f2fs_empty_dir(struct inode *dir);
3230 
f2fs_add_link(struct dentry * dentry,struct inode * inode)3231 static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
3232 {
3233 	if (fscrypt_is_nokey_name(dentry))
3234 		return -ENOKEY;
3235 	return f2fs_do_add_link(d_inode(dentry->d_parent), &dentry->d_name,
3236 				inode, inode->i_ino, inode->i_mode);
3237 }
3238 
3239 /*
3240  * super.c
3241  */
3242 int f2fs_inode_dirtied(struct inode *inode, bool sync);
3243 void f2fs_inode_synced(struct inode *inode);
3244 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
3245 int f2fs_quota_sync(struct super_block *sb, int type);
3246 void f2fs_quota_off_umount(struct super_block *sb);
3247 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
3248 int f2fs_sync_fs(struct super_block *sb, int sync);
3249 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
3250 
3251 /*
3252  * hash.c
3253  */
3254 void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname);
3255 
3256 /*
3257  * node.c
3258  */
3259 struct dnode_of_data;
3260 struct node_info;
3261 
3262 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
3263 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
3264 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
3265 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
3266 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
3267 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
3268 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
3269 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
3270 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
3271 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
3272 						struct node_info *ni);
3273 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
3274 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
3275 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
3276 int f2fs_truncate_xattr_node(struct inode *inode);
3277 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
3278 					unsigned int seq_id);
3279 int f2fs_remove_inode_page(struct inode *inode);
3280 struct page *f2fs_new_inode_page(struct inode *inode);
3281 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
3282 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid);
3283 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid);
3284 struct page *f2fs_get_node_page_ra(struct page *parent, int start);
3285 int f2fs_move_node_page(struct page *node_page, int gc_type);
3286 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi);
3287 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3288 			struct writeback_control *wbc, bool atomic,
3289 			unsigned int *seq_id);
3290 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3291 			struct writeback_control *wbc,
3292 			bool do_balance, enum iostat_type io_type);
3293 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3294 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3295 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3296 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3297 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
3298 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
3299 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
3300 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
3301 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
3302 			unsigned int segno, struct f2fs_summary_block *sum);
3303 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3304 int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
3305 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi);
3306 int __init f2fs_create_node_manager_caches(void);
3307 void f2fs_destroy_node_manager_caches(void);
3308 
3309 /*
3310  * segment.c
3311  */
3312 unsigned long find_rev_next_bit(const unsigned long *addr,
3313 		unsigned long size, unsigned long offset);
3314 unsigned long find_rev_next_zero_bit(const unsigned long *addr,
3315 		unsigned long size, unsigned long offset);
3316 bool f2fs_need_SSR(struct f2fs_sb_info *sbi);
3317 void f2fs_register_inmem_page(struct inode *inode, struct page *page);
3318 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure);
3319 void f2fs_drop_inmem_pages(struct inode *inode);
3320 void f2fs_drop_inmem_page(struct inode *inode, struct page *page);
3321 int f2fs_commit_inmem_pages(struct inode *inode);
3322 void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need);
3323 void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg);
3324 int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino);
3325 int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi);
3326 int f2fs_flush_device_cache(struct f2fs_sb_info *sbi);
3327 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free);
3328 void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr);
3329 bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr);
3330 void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi);
3331 void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi);
3332 bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3333 void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3334 					struct cp_control *cpc);
3335 void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3336 block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3337 int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3338 void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3339 int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3340 bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno);
3341 void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi);
3342 void f2fs_save_inmem_curseg(struct f2fs_sb_info *sbi);
3343 void f2fs_restore_inmem_curseg(struct f2fs_sb_info *sbi);
3344 void f2fs_get_new_segment(struct f2fs_sb_info *sbi,
3345 			unsigned int *newseg, bool new_sec, int dir);
3346 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3347 					unsigned int start, unsigned int end);
3348 void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type);
3349 void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3350 int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3351 bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
3352 					struct cp_control *cpc);
3353 struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno);
3354 void f2fs_update_meta_page(struct f2fs_sb_info *sbi, void *src,
3355 					block_t blk_addr);
3356 void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
3357 						enum iostat_type io_type);
3358 void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio);
3359 void f2fs_outplace_write_data(struct dnode_of_data *dn,
3360 			struct f2fs_io_info *fio);
3361 int f2fs_inplace_write_data(struct f2fs_io_info *fio);
3362 void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
3363 			block_t old_blkaddr, block_t new_blkaddr,
3364 			bool recover_curseg, bool recover_newaddr,
3365 			bool from_gc);
3366 void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3367 			block_t old_addr, block_t new_addr,
3368 			unsigned char version, bool recover_curseg,
3369 			bool recover_newaddr);
3370 void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
3371 			block_t old_blkaddr, block_t *new_blkaddr,
3372 			struct f2fs_summary *sum, int type,
3373 			struct f2fs_io_info *fio, int contig_level);
3374 void f2fs_wait_on_page_writeback(struct page *page,
3375 			enum page_type type, bool ordered, bool locked);
3376 void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
3377 void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3378 								block_t len);
3379 void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3380 void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk);
3381 int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
3382 			unsigned int val, int alloc);
3383 void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3384 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi);
3385 int f2fs_check_write_pointer(struct f2fs_sb_info *sbi);
3386 int f2fs_build_segment_manager(struct f2fs_sb_info *sbi);
3387 void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi);
3388 int __init f2fs_create_segment_manager_caches(void);
3389 void f2fs_destroy_segment_manager_caches(void);
3390 int f2fs_rw_hint_to_seg_type(enum rw_hint hint);
3391 enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
3392 			enum page_type type, enum temp_type temp);
3393 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
3394 			unsigned int segno);
3395 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
3396 			unsigned int segno);
3397 
3398 /*
3399  * checkpoint.c
3400  */
3401 void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
3402 struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3403 struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
3404 struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);
3405 struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
3406 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
3407 					block_t blkaddr, int type);
3408 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
3409 			int type, bool sync);
3410 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
3411 long f2fs_sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
3412 			long nr_to_write, enum iostat_type io_type);
3413 void f2fs_add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3414 void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type);
3415 void f2fs_release_ino_entry(struct f2fs_sb_info *sbi, bool all);
3416 bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode);
3417 void f2fs_set_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3418 					unsigned int devidx, int type);
3419 bool f2fs_is_dirty_device(struct f2fs_sb_info *sbi, nid_t ino,
3420 					unsigned int devidx, int type);
3421 int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi);
3422 int f2fs_acquire_orphan_inode(struct f2fs_sb_info *sbi);
3423 void f2fs_release_orphan_inode(struct f2fs_sb_info *sbi);
3424 void f2fs_add_orphan_inode(struct inode *inode);
3425 void f2fs_remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino);
3426 int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi);
3427 int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi);
3428 void f2fs_update_dirty_page(struct inode *inode, struct page *page);
3429 void f2fs_remove_dirty_inode(struct inode *inode);
3430 int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
3431 void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type);
3432 int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
3433 void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
3434 int __init f2fs_create_checkpoint_caches(void);
3435 void f2fs_destroy_checkpoint_caches(void);
3436 
3437 /*
3438  * data.c
3439  */
3440 int __init f2fs_init_bioset(void);
3441 void f2fs_destroy_bioset(void);
3442 struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi, int npages, bool noio);
3443 int f2fs_init_bio_entry_cache(void);
3444 void f2fs_destroy_bio_entry_cache(void);
3445 void f2fs_submit_bio(struct f2fs_sb_info *sbi,
3446 				struct bio *bio, enum page_type type);
3447 void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type);
3448 void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3449 				struct inode *inode, struct page *page,
3450 				nid_t ino, enum page_type type);
3451 void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
3452 					struct bio **bio, struct page *page);
3453 void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3454 int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3455 int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3456 void f2fs_submit_page_write(struct f2fs_io_info *fio);
3457 struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3458 			block_t blk_addr, struct bio *bio);
3459 int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr);
3460 void f2fs_set_data_blkaddr(struct dnode_of_data *dn);
3461 void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
3462 int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
3463 int f2fs_reserve_new_block(struct dnode_of_data *dn);
3464 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
3465 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
3466 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
3467 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
3468 			int op_flags, bool for_write);
3469 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
3470 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
3471 			bool for_write);
3472 struct page *f2fs_get_new_data_page(struct inode *inode,
3473 			struct page *ipage, pgoff_t index, bool new_i_size);
3474 int f2fs_do_write_data_page(struct f2fs_io_info *fio);
3475 void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock);
3476 int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
3477 			int create, int flag);
3478 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3479 			u64 start, u64 len);
3480 int f2fs_encrypt_one_page(struct f2fs_io_info *fio);
3481 bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio);
3482 bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio);
3483 int f2fs_write_single_data_page(struct page *page, int *submitted,
3484 				struct bio **bio, sector_t *last_block,
3485 				struct writeback_control *wbc,
3486 				enum iostat_type io_type,
3487 				int compr_blocks, bool allow_balance);
3488 void f2fs_invalidate_page(struct page *page, unsigned int offset,
3489 			unsigned int length);
3490 int f2fs_release_page(struct page *page, gfp_t wait);
3491 #ifdef CONFIG_MIGRATION
3492 int f2fs_migrate_page(struct address_space *mapping, struct page *newpage,
3493 			struct page *page, enum migrate_mode mode);
3494 #endif
3495 bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len);
3496 void f2fs_clear_page_cache_dirty_tag(struct page *page);
3497 int f2fs_init_post_read_processing(void);
3498 void f2fs_destroy_post_read_processing(void);
3499 int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
3500 void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
3501 
3502 /*
3503  * gc.c
3504  */
3505 int f2fs_start_gc_thread(struct f2fs_sb_info *sbi);
3506 void f2fs_stop_gc_thread(struct f2fs_sb_info *sbi);
3507 block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3508 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, bool force,
3509 			unsigned int segno);
3510 void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3511 int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3512 int __init f2fs_create_garbage_collection_cache(void);
3513 void f2fs_destroy_garbage_collection_cache(void);
3514 
3515 /*
3516  * recovery.c
3517  */
3518 int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only);
3519 bool f2fs_space_for_roll_forward(struct f2fs_sb_info *sbi);
3520 int __init f2fs_create_recovery_cache(void);
3521 void f2fs_destroy_recovery_cache(void);
3522 
3523 /*
3524  * debug.c
3525  */
3526 #ifdef CONFIG_F2FS_STAT_FS
3527 struct f2fs_stat_info {
3528 	struct list_head stat_list;
3529 	struct f2fs_sb_info *sbi;
3530 	int all_area_segs, sit_area_segs, nat_area_segs, ssa_area_segs;
3531 	int main_area_segs, main_area_sections, main_area_zones;
3532 	unsigned long long hit_largest, hit_cached, hit_rbtree;
3533 	unsigned long long hit_total, total_ext;
3534 	int ext_tree, zombie_tree, ext_node;
3535 	int ndirty_node, ndirty_dent, ndirty_meta, ndirty_imeta;
3536 	int ndirty_data, ndirty_qdata;
3537 	int inmem_pages;
3538 	unsigned int ndirty_dirs, ndirty_files, nquota_files, ndirty_all;
3539 	int nats, dirty_nats, sits, dirty_sits;
3540 	int free_nids, avail_nids, alloc_nids;
3541 	int total_count, utilization;
3542 	int bg_gc, nr_wb_cp_data, nr_wb_data;
3543 	int nr_rd_data, nr_rd_node, nr_rd_meta;
3544 	int nr_dio_read, nr_dio_write;
3545 	unsigned int io_skip_bggc, other_skip_bggc;
3546 	int nr_flushing, nr_flushed, flush_list_empty;
3547 	int nr_discarding, nr_discarded;
3548 	int nr_discard_cmd;
3549 	unsigned int undiscard_blks;
3550 	int inline_xattr, inline_inode, inline_dir, append, update, orphans;
3551 	int compr_inode;
3552 	unsigned long long compr_blocks;
3553 	int aw_cnt, max_aw_cnt, vw_cnt, max_vw_cnt;
3554 	unsigned int valid_count, valid_node_count, valid_inode_count, discard_blks;
3555 	unsigned int bimodal, avg_vblocks;
3556 	int util_free, util_valid, util_invalid;
3557 	int rsvd_segs, overp_segs;
3558 	int dirty_count, node_pages, meta_pages;
3559 	int prefree_count, call_count, cp_count, bg_cp_count;
3560 	int tot_segs, node_segs, data_segs, free_segs, free_secs;
3561 	int bg_node_segs, bg_data_segs;
3562 	int tot_blks, data_blks, node_blks;
3563 	int bg_data_blks, bg_node_blks;
3564 	unsigned long long skipped_atomic_files[2];
3565 	int curseg[NR_CURSEG_TYPE];
3566 	int cursec[NR_CURSEG_TYPE];
3567 	int curzone[NR_CURSEG_TYPE];
3568 	unsigned int dirty_seg[NR_CURSEG_TYPE];
3569 	unsigned int full_seg[NR_CURSEG_TYPE];
3570 	unsigned int valid_blks[NR_CURSEG_TYPE];
3571 
3572 	unsigned int meta_count[META_MAX];
3573 	unsigned int segment_count[2];
3574 	unsigned int block_count[2];
3575 	unsigned int inplace_count;
3576 	unsigned long long base_mem, cache_mem, page_mem;
3577 };
3578 
F2FS_STAT(struct f2fs_sb_info * sbi)3579 static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
3580 {
3581 	return (struct f2fs_stat_info *)sbi->stat_info;
3582 }
3583 
3584 #define stat_inc_cp_count(si)		((si)->cp_count++)
3585 #define stat_inc_bg_cp_count(si)	((si)->bg_cp_count++)
3586 #define stat_inc_call_count(si)		((si)->call_count++)
3587 #define stat_inc_bggc_count(si)		((si)->bg_gc++)
3588 #define stat_io_skip_bggc_count(sbi)	((sbi)->io_skip_bggc++)
3589 #define stat_other_skip_bggc_count(sbi)	((sbi)->other_skip_bggc++)
3590 #define stat_inc_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]++)
3591 #define stat_dec_dirty_inode(sbi, type)	((sbi)->ndirty_inode[type]--)
3592 #define stat_inc_total_hit(sbi)		(atomic64_inc(&(sbi)->total_hit_ext))
3593 #define stat_inc_rbtree_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_rbtree))
3594 #define stat_inc_largest_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_largest))
3595 #define stat_inc_cached_node_hit(sbi)	(atomic64_inc(&(sbi)->read_hit_cached))
3596 #define stat_inc_inline_xattr(inode)					\
3597 	do {								\
3598 		if (f2fs_has_inline_xattr(inode))			\
3599 			(atomic_inc(&F2FS_I_SB(inode)->inline_xattr));	\
3600 	} while (0)
3601 #define stat_dec_inline_xattr(inode)					\
3602 	do {								\
3603 		if (f2fs_has_inline_xattr(inode))			\
3604 			(atomic_dec(&F2FS_I_SB(inode)->inline_xattr));	\
3605 	} while (0)
3606 #define stat_inc_inline_inode(inode)					\
3607 	do {								\
3608 		if (f2fs_has_inline_data(inode))			\
3609 			(atomic_inc(&F2FS_I_SB(inode)->inline_inode));	\
3610 	} while (0)
3611 #define stat_dec_inline_inode(inode)					\
3612 	do {								\
3613 		if (f2fs_has_inline_data(inode))			\
3614 			(atomic_dec(&F2FS_I_SB(inode)->inline_inode));	\
3615 	} while (0)
3616 #define stat_inc_inline_dir(inode)					\
3617 	do {								\
3618 		if (f2fs_has_inline_dentry(inode))			\
3619 			(atomic_inc(&F2FS_I_SB(inode)->inline_dir));	\
3620 	} while (0)
3621 #define stat_dec_inline_dir(inode)					\
3622 	do {								\
3623 		if (f2fs_has_inline_dentry(inode))			\
3624 			(atomic_dec(&F2FS_I_SB(inode)->inline_dir));	\
3625 	} while (0)
3626 #define stat_inc_compr_inode(inode)					\
3627 	do {								\
3628 		if (f2fs_compressed_file(inode))			\
3629 			(atomic_inc(&F2FS_I_SB(inode)->compr_inode));	\
3630 	} while (0)
3631 #define stat_dec_compr_inode(inode)					\
3632 	do {								\
3633 		if (f2fs_compressed_file(inode))			\
3634 			(atomic_dec(&F2FS_I_SB(inode)->compr_inode));	\
3635 	} while (0)
3636 #define stat_add_compr_blocks(inode, blocks)				\
3637 		(atomic64_add(blocks, &F2FS_I_SB(inode)->compr_blocks))
3638 #define stat_sub_compr_blocks(inode, blocks)				\
3639 		(atomic64_sub(blocks, &F2FS_I_SB(inode)->compr_blocks))
3640 #define stat_inc_meta_count(sbi, blkaddr)				\
3641 	do {								\
3642 		if (blkaddr < SIT_I(sbi)->sit_base_addr)		\
3643 			atomic_inc(&(sbi)->meta_count[META_CP]);	\
3644 		else if (blkaddr < NM_I(sbi)->nat_blkaddr)		\
3645 			atomic_inc(&(sbi)->meta_count[META_SIT]);	\
3646 		else if (blkaddr < SM_I(sbi)->ssa_blkaddr)		\
3647 			atomic_inc(&(sbi)->meta_count[META_NAT]);	\
3648 		else if (blkaddr < SM_I(sbi)->main_blkaddr)		\
3649 			atomic_inc(&(sbi)->meta_count[META_SSA]);	\
3650 	} while (0)
3651 #define stat_inc_seg_type(sbi, curseg)					\
3652 		((sbi)->segment_count[(curseg)->alloc_type]++)
3653 #define stat_inc_block_count(sbi, curseg)				\
3654 		((sbi)->block_count[(curseg)->alloc_type]++)
3655 #define stat_inc_inplace_blocks(sbi)					\
3656 		(atomic_inc(&(sbi)->inplace_count))
3657 #define stat_update_max_atomic_write(inode)				\
3658 	do {								\
3659 		int cur = F2FS_I_SB(inode)->atomic_files;	\
3660 		int max = atomic_read(&F2FS_I_SB(inode)->max_aw_cnt);	\
3661 		if (cur > max)						\
3662 			atomic_set(&F2FS_I_SB(inode)->max_aw_cnt, cur);	\
3663 	} while (0)
3664 #define stat_inc_volatile_write(inode)					\
3665 		(atomic_inc(&F2FS_I_SB(inode)->vw_cnt))
3666 #define stat_dec_volatile_write(inode)					\
3667 		(atomic_dec(&F2FS_I_SB(inode)->vw_cnt))
3668 #define stat_update_max_volatile_write(inode)				\
3669 	do {								\
3670 		int cur = atomic_read(&F2FS_I_SB(inode)->vw_cnt);	\
3671 		int max = atomic_read(&F2FS_I_SB(inode)->max_vw_cnt);	\
3672 		if (cur > max)						\
3673 			atomic_set(&F2FS_I_SB(inode)->max_vw_cnt, cur);	\
3674 	} while (0)
3675 #define stat_inc_seg_count(sbi, type, gc_type)				\
3676 	do {								\
3677 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3678 		si->tot_segs++;						\
3679 		if ((type) == SUM_TYPE_DATA) {				\
3680 			si->data_segs++;				\
3681 			si->bg_data_segs += (gc_type == BG_GC) ? 1 : 0;	\
3682 		} else {						\
3683 			si->node_segs++;				\
3684 			si->bg_node_segs += (gc_type == BG_GC) ? 1 : 0;	\
3685 		}							\
3686 	} while (0)
3687 
3688 #define stat_inc_tot_blk_count(si, blks)				\
3689 	((si)->tot_blks += (blks))
3690 
3691 #define stat_inc_data_blk_count(sbi, blks, gc_type)			\
3692 	do {								\
3693 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3694 		stat_inc_tot_blk_count(si, blks);			\
3695 		si->data_blks += (blks);				\
3696 		si->bg_data_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3697 	} while (0)
3698 
3699 #define stat_inc_node_blk_count(sbi, blks, gc_type)			\
3700 	do {								\
3701 		struct f2fs_stat_info *si = F2FS_STAT(sbi);		\
3702 		stat_inc_tot_blk_count(si, blks);			\
3703 		si->node_blks += (blks);				\
3704 		si->bg_node_blks += ((gc_type) == BG_GC) ? (blks) : 0;	\
3705 	} while (0)
3706 
3707 int f2fs_build_stats(struct f2fs_sb_info *sbi);
3708 void f2fs_destroy_stats(struct f2fs_sb_info *sbi);
3709 void __init f2fs_create_root_stats(void);
3710 void f2fs_destroy_root_stats(void);
3711 void f2fs_update_sit_info(struct f2fs_sb_info *sbi);
3712 #else
3713 #define stat_inc_cp_count(si)				do { } while (0)
3714 #define stat_inc_bg_cp_count(si)			do { } while (0)
3715 #define stat_inc_call_count(si)				do { } while (0)
3716 #define stat_inc_bggc_count(si)				do { } while (0)
3717 #define stat_io_skip_bggc_count(sbi)			do { } while (0)
3718 #define stat_other_skip_bggc_count(sbi)			do { } while (0)
3719 #define stat_inc_dirty_inode(sbi, type)			do { } while (0)
3720 #define stat_dec_dirty_inode(sbi, type)			do { } while (0)
3721 #define stat_inc_total_hit(sbi)				do { } while (0)
3722 #define stat_inc_rbtree_node_hit(sbi)			do { } while (0)
3723 #define stat_inc_largest_node_hit(sbi)			do { } while (0)
3724 #define stat_inc_cached_node_hit(sbi)			do { } while (0)
3725 #define stat_inc_inline_xattr(inode)			do { } while (0)
3726 #define stat_dec_inline_xattr(inode)			do { } while (0)
3727 #define stat_inc_inline_inode(inode)			do { } while (0)
3728 #define stat_dec_inline_inode(inode)			do { } while (0)
3729 #define stat_inc_inline_dir(inode)			do { } while (0)
3730 #define stat_dec_inline_dir(inode)			do { } while (0)
3731 #define stat_inc_compr_inode(inode)			do { } while (0)
3732 #define stat_dec_compr_inode(inode)			do { } while (0)
3733 #define stat_add_compr_blocks(inode, blocks)		do { } while (0)
3734 #define stat_sub_compr_blocks(inode, blocks)		do { } while (0)
3735 #define stat_inc_atomic_write(inode)			do { } while (0)
3736 #define stat_dec_atomic_write(inode)			do { } while (0)
3737 #define stat_update_max_atomic_write(inode)		do { } while (0)
3738 #define stat_inc_volatile_write(inode)			do { } while (0)
3739 #define stat_dec_volatile_write(inode)			do { } while (0)
3740 #define stat_update_max_volatile_write(inode)		do { } while (0)
3741 #define stat_inc_meta_count(sbi, blkaddr)		do { } while (0)
3742 #define stat_inc_seg_type(sbi, curseg)			do { } while (0)
3743 #define stat_inc_block_count(sbi, curseg)		do { } while (0)
3744 #define stat_inc_inplace_blocks(sbi)			do { } while (0)
3745 #define stat_inc_seg_count(sbi, type, gc_type)		do { } while (0)
3746 #define stat_inc_tot_blk_count(si, blks)		do { } while (0)
3747 #define stat_inc_data_blk_count(sbi, blks, gc_type)	do { } while (0)
3748 #define stat_inc_node_blk_count(sbi, blks, gc_type)	do { } while (0)
3749 
f2fs_build_stats(struct f2fs_sb_info * sbi)3750 static inline int f2fs_build_stats(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_stats(struct f2fs_sb_info * sbi)3751 static inline void f2fs_destroy_stats(struct f2fs_sb_info *sbi) { }
f2fs_create_root_stats(void)3752 static inline void __init f2fs_create_root_stats(void) { }
f2fs_destroy_root_stats(void)3753 static inline void f2fs_destroy_root_stats(void) { }
f2fs_update_sit_info(struct f2fs_sb_info * sbi)3754 static inline void f2fs_update_sit_info(struct f2fs_sb_info *sbi) {}
3755 #endif
3756 
3757 extern const struct file_operations f2fs_dir_operations;
3758 #ifdef CONFIG_UNICODE
3759 extern const struct dentry_operations f2fs_dentry_ops;
3760 #endif
3761 extern const struct file_operations f2fs_file_operations;
3762 extern const struct inode_operations f2fs_file_inode_operations;
3763 extern const struct address_space_operations f2fs_dblock_aops;
3764 extern const struct address_space_operations f2fs_node_aops;
3765 extern const struct address_space_operations f2fs_meta_aops;
3766 extern const struct inode_operations f2fs_dir_inode_operations;
3767 extern const struct inode_operations f2fs_symlink_inode_operations;
3768 extern const struct inode_operations f2fs_encrypted_symlink_inode_operations;
3769 extern const struct inode_operations f2fs_special_inode_operations;
3770 extern struct kmem_cache *f2fs_inode_entry_slab;
3771 
3772 /*
3773  * inline.c
3774  */
3775 bool f2fs_may_inline_data(struct inode *inode);
3776 bool f2fs_may_inline_dentry(struct inode *inode);
3777 void f2fs_do_read_inline_data(struct page *page, struct page *ipage);
3778 void f2fs_truncate_inline_inode(struct inode *inode,
3779 						struct page *ipage, u64 from);
3780 int f2fs_read_inline_data(struct inode *inode, struct page *page);
3781 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
3782 int f2fs_convert_inline_inode(struct inode *inode);
3783 int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry);
3784 int f2fs_write_inline_data(struct inode *inode, struct page *page);
3785 int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
3786 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
3787 					const struct f2fs_filename *fname,
3788 					struct page **res_page);
3789 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
3790 			struct page *ipage);
3791 int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname,
3792 			struct inode *inode, nid_t ino, umode_t mode);
3793 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry,
3794 				struct page *page, struct inode *dir,
3795 				struct inode *inode);
3796 bool f2fs_empty_inline_dir(struct inode *dir);
3797 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
3798 			struct fscrypt_str *fstr);
3799 int f2fs_inline_data_fiemap(struct inode *inode,
3800 			struct fiemap_extent_info *fieinfo,
3801 			__u64 start, __u64 len);
3802 
3803 /*
3804  * shrinker.c
3805  */
3806 unsigned long f2fs_shrink_count(struct shrinker *shrink,
3807 			struct shrink_control *sc);
3808 unsigned long f2fs_shrink_scan(struct shrinker *shrink,
3809 			struct shrink_control *sc);
3810 void f2fs_join_shrinker(struct f2fs_sb_info *sbi);
3811 void f2fs_leave_shrinker(struct f2fs_sb_info *sbi);
3812 
3813 /*
3814  * extent_cache.c
3815  */
3816 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root,
3817 				struct rb_entry *cached_re, unsigned int ofs);
3818 struct rb_node **f2fs_lookup_rb_tree_ext(struct f2fs_sb_info *sbi,
3819 				struct rb_root_cached *root,
3820 				struct rb_node **parent,
3821 				unsigned long long key, bool *left_most);
3822 struct rb_node **f2fs_lookup_rb_tree_for_insert(struct f2fs_sb_info *sbi,
3823 				struct rb_root_cached *root,
3824 				struct rb_node **parent,
3825 				unsigned int ofs, bool *leftmost);
3826 struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root,
3827 		struct rb_entry *cached_re, unsigned int ofs,
3828 		struct rb_entry **prev_entry, struct rb_entry **next_entry,
3829 		struct rb_node ***insert_p, struct rb_node **insert_parent,
3830 		bool force, bool *leftmost);
3831 bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
3832 				struct rb_root_cached *root, bool check_key);
3833 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink);
3834 void f2fs_init_extent_tree(struct inode *inode, struct page *ipage);
3835 void f2fs_drop_extent_tree(struct inode *inode);
3836 unsigned int f2fs_destroy_extent_node(struct inode *inode);
3837 void f2fs_destroy_extent_tree(struct inode *inode);
3838 bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
3839 			struct extent_info *ei);
3840 void f2fs_update_extent_cache(struct dnode_of_data *dn);
3841 void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
3842 			pgoff_t fofs, block_t blkaddr, unsigned int len);
3843 void f2fs_init_extent_cache_info(struct f2fs_sb_info *sbi);
3844 int __init f2fs_create_extent_cache(void);
3845 void f2fs_destroy_extent_cache(void);
3846 
3847 /*
3848  * sysfs.c
3849  */
3850 int __init f2fs_init_sysfs(void);
3851 void f2fs_exit_sysfs(void);
3852 int f2fs_register_sysfs(struct f2fs_sb_info *sbi);
3853 void f2fs_unregister_sysfs(struct f2fs_sb_info *sbi);
3854 
3855 /* verity.c */
3856 extern const struct fsverity_operations f2fs_verityops;
3857 
3858 /*
3859  * crypto support
3860  */
f2fs_encrypted_file(struct inode * inode)3861 static inline bool f2fs_encrypted_file(struct inode *inode)
3862 {
3863 	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
3864 }
3865 
f2fs_set_encrypted_inode(struct inode * inode)3866 static inline void f2fs_set_encrypted_inode(struct inode *inode)
3867 {
3868 #ifdef CONFIG_FS_ENCRYPTION
3869 	file_set_encrypt(inode);
3870 	f2fs_set_inode_flags(inode);
3871 #endif
3872 }
3873 
3874 /*
3875  * Returns true if the reads of the inode's data need to undergo some
3876  * postprocessing step, like decryption or authenticity verification.
3877  */
f2fs_post_read_required(struct inode * inode)3878 static inline bool f2fs_post_read_required(struct inode *inode)
3879 {
3880 	return f2fs_encrypted_file(inode) || fsverity_active(inode) ||
3881 		f2fs_compressed_file(inode);
3882 }
3883 
3884 /*
3885  * compress.c
3886  */
3887 #ifdef CONFIG_F2FS_FS_COMPRESSION
3888 bool f2fs_is_compressed_page(struct page *page);
3889 struct page *f2fs_compress_control_page(struct page *page);
3890 int f2fs_prepare_compress_overwrite(struct inode *inode,
3891 			struct page **pagep, pgoff_t index, void **fsdata);
3892 bool f2fs_compress_write_end(struct inode *inode, void *fsdata,
3893 					pgoff_t index, unsigned copied);
3894 int f2fs_truncate_partial_cluster(struct inode *inode, u64 from, bool lock);
3895 void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
3896 bool f2fs_is_compress_backend_ready(struct inode *inode);
3897 int f2fs_init_compress_mempool(void);
3898 void f2fs_destroy_compress_mempool(void);
3899 void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity);
3900 bool f2fs_cluster_is_empty(struct compress_ctx *cc);
3901 bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
3902 void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
3903 int f2fs_write_multi_pages(struct compress_ctx *cc,
3904 						int *submitted,
3905 						struct writeback_control *wbc,
3906 						enum iostat_type io_type);
3907 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index);
3908 int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
3909 				unsigned nr_pages, sector_t *last_block_in_bio,
3910 				bool is_readahead, bool for_write);
3911 struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
3912 void f2fs_free_dic(struct decompress_io_ctx *dic);
3913 void f2fs_decompress_end_io(struct page **rpages,
3914 			unsigned int cluster_size, bool err, bool verity);
3915 int f2fs_init_compress_ctx(struct compress_ctx *cc);
3916 void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse);
3917 void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
3918 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi);
3919 void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi);
3920 int __init f2fs_init_compress_cache(void);
3921 void f2fs_destroy_compress_cache(void);
3922 #else
f2fs_is_compressed_page(struct page * page)3923 static inline bool f2fs_is_compressed_page(struct page *page) { return false; }
f2fs_is_compress_backend_ready(struct inode * inode)3924 static inline bool f2fs_is_compress_backend_ready(struct inode *inode)
3925 {
3926 	if (!f2fs_compressed_file(inode))
3927 		return true;
3928 	/* not support compression */
3929 	return false;
3930 }
f2fs_compress_control_page(struct page * page)3931 static inline struct page *f2fs_compress_control_page(struct page *page)
3932 {
3933 	WARN_ON_ONCE(1);
3934 	return ERR_PTR(-EINVAL);
3935 }
f2fs_init_compress_mempool(void)3936 static inline int f2fs_init_compress_mempool(void) { return 0; }
f2fs_destroy_compress_mempool(void)3937 static inline void f2fs_destroy_compress_mempool(void) { }
f2fs_init_page_array_cache(struct f2fs_sb_info * sbi)3938 static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
f2fs_destroy_page_array_cache(struct f2fs_sb_info * sbi)3939 static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
f2fs_init_compress_cache(void)3940 static inline int __init f2fs_init_compress_cache(void) { return 0; }
f2fs_destroy_compress_cache(void)3941 static inline void f2fs_destroy_compress_cache(void) { }
3942 #endif
3943 
set_compress_context(struct inode * inode)3944 static inline void set_compress_context(struct inode *inode)
3945 {
3946 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3947 
3948 	F2FS_I(inode)->i_compress_algorithm =
3949 			F2FS_OPTION(sbi).compress_algorithm;
3950 	F2FS_I(inode)->i_log_cluster_size =
3951 			F2FS_OPTION(sbi).compress_log_size;
3952 	F2FS_I(inode)->i_cluster_size =
3953 			1 << F2FS_I(inode)->i_log_cluster_size;
3954 	F2FS_I(inode)->i_flags |= F2FS_COMPR_FL;
3955 	set_inode_flag(inode, FI_COMPRESSED_FILE);
3956 	stat_inc_compr_inode(inode);
3957 	f2fs_mark_inode_dirty_sync(inode, true);
3958 }
3959 
f2fs_disable_compressed_file(struct inode * inode)3960 static inline bool f2fs_disable_compressed_file(struct inode *inode)
3961 {
3962 	struct f2fs_inode_info *fi = F2FS_I(inode);
3963 
3964 	if (!f2fs_compressed_file(inode))
3965 		return true;
3966 	if (S_ISREG(inode->i_mode) &&
3967 		(get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
3968 		return false;
3969 
3970 	fi->i_flags &= ~F2FS_COMPR_FL;
3971 	stat_dec_compr_inode(inode);
3972 	clear_inode_flag(inode, FI_COMPRESSED_FILE);
3973 	f2fs_mark_inode_dirty_sync(inode, true);
3974 	return true;
3975 }
3976 
3977 #define F2FS_FEATURE_FUNCS(name, flagname) \
3978 static inline int f2fs_sb_has_##name(struct f2fs_sb_info *sbi) \
3979 { \
3980 	return F2FS_HAS_FEATURE(sbi, F2FS_FEATURE_##flagname); \
3981 }
3982 
3983 F2FS_FEATURE_FUNCS(encrypt, ENCRYPT);
3984 F2FS_FEATURE_FUNCS(blkzoned, BLKZONED);
3985 F2FS_FEATURE_FUNCS(extra_attr, EXTRA_ATTR);
3986 F2FS_FEATURE_FUNCS(project_quota, PRJQUOTA);
3987 F2FS_FEATURE_FUNCS(inode_chksum, INODE_CHKSUM);
3988 F2FS_FEATURE_FUNCS(flexible_inline_xattr, FLEXIBLE_INLINE_XATTR);
3989 F2FS_FEATURE_FUNCS(quota_ino, QUOTA_INO);
3990 F2FS_FEATURE_FUNCS(inode_crtime, INODE_CRTIME);
3991 F2FS_FEATURE_FUNCS(lost_found, LOST_FOUND);
3992 F2FS_FEATURE_FUNCS(verity, VERITY);
3993 F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
3994 F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
3995 F2FS_FEATURE_FUNCS(compression, COMPRESSION);
3996 
3997 #ifdef CONFIG_BLK_DEV_ZONED
f2fs_blkz_is_seq(struct f2fs_sb_info * sbi,int devi,block_t blkaddr)3998 static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
3999 				    block_t blkaddr)
4000 {
4001 	unsigned int zno = blkaddr >> sbi->log_blocks_per_blkz;
4002 
4003 	return test_bit(zno, FDEV(devi).blkz_seq);
4004 }
4005 #endif
4006 
f2fs_hw_should_discard(struct f2fs_sb_info * sbi)4007 static inline bool f2fs_hw_should_discard(struct f2fs_sb_info *sbi)
4008 {
4009 	return f2fs_sb_has_blkzoned(sbi);
4010 }
4011 
f2fs_bdev_support_discard(struct block_device * bdev)4012 static inline bool f2fs_bdev_support_discard(struct block_device *bdev)
4013 {
4014 	return blk_queue_discard(bdev_get_queue(bdev)) ||
4015 	       bdev_is_zoned(bdev);
4016 }
4017 
f2fs_hw_support_discard(struct f2fs_sb_info * sbi)4018 static inline bool f2fs_hw_support_discard(struct f2fs_sb_info *sbi)
4019 {
4020 	int i;
4021 
4022 	if (!f2fs_is_multi_device(sbi))
4023 		return f2fs_bdev_support_discard(sbi->sb->s_bdev);
4024 
4025 	for (i = 0; i < sbi->s_ndevs; i++)
4026 		if (f2fs_bdev_support_discard(FDEV(i).bdev))
4027 			return true;
4028 	return false;
4029 }
4030 
f2fs_realtime_discard_enable(struct f2fs_sb_info * sbi)4031 static inline bool f2fs_realtime_discard_enable(struct f2fs_sb_info *sbi)
4032 {
4033 	return (test_opt(sbi, DISCARD) && f2fs_hw_support_discard(sbi)) ||
4034 					f2fs_hw_should_discard(sbi);
4035 }
4036 
f2fs_hw_is_readonly(struct f2fs_sb_info * sbi)4037 static inline bool f2fs_hw_is_readonly(struct f2fs_sb_info *sbi)
4038 {
4039 	int i;
4040 
4041 	if (!f2fs_is_multi_device(sbi))
4042 		return bdev_read_only(sbi->sb->s_bdev);
4043 
4044 	for (i = 0; i < sbi->s_ndevs; i++)
4045 		if (bdev_read_only(FDEV(i).bdev))
4046 			return true;
4047 	return false;
4048 }
4049 
f2fs_lfs_mode(struct f2fs_sb_info * sbi)4050 static inline bool f2fs_lfs_mode(struct f2fs_sb_info *sbi)
4051 {
4052 	return F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS;
4053 }
4054 
f2fs_may_compress(struct inode * inode)4055 static inline bool f2fs_may_compress(struct inode *inode)
4056 {
4057 	if (IS_SWAPFILE(inode) || f2fs_is_pinned_file(inode) ||
4058 				f2fs_is_atomic_file(inode) ||
4059 				f2fs_is_volatile_file(inode))
4060 		return false;
4061 	return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
4062 }
4063 
f2fs_i_compr_blocks_update(struct inode * inode,u64 blocks,bool add)4064 static inline void f2fs_i_compr_blocks_update(struct inode *inode,
4065 						u64 blocks, bool add)
4066 {
4067 	int diff = F2FS_I(inode)->i_cluster_size - blocks;
4068 	struct f2fs_inode_info *fi = F2FS_I(inode);
4069 
4070 	/* don't update i_compr_blocks if saved blocks were released */
4071 	if (!add && !atomic_read(&fi->i_compr_blocks))
4072 		return;
4073 
4074 	if (add) {
4075 		atomic_add(diff, &fi->i_compr_blocks);
4076 		stat_add_compr_blocks(inode, diff);
4077 	} else {
4078 		atomic_sub(diff, &fi->i_compr_blocks);
4079 		stat_sub_compr_blocks(inode, diff);
4080 	}
4081 	f2fs_mark_inode_dirty_sync(inode, true);
4082 }
4083 
block_unaligned_IO(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4084 static inline int block_unaligned_IO(struct inode *inode,
4085 				struct kiocb *iocb, struct iov_iter *iter)
4086 {
4087 	unsigned int i_blkbits = READ_ONCE(inode->i_blkbits);
4088 	unsigned int blocksize_mask = (1 << i_blkbits) - 1;
4089 	loff_t offset = iocb->ki_pos;
4090 	unsigned long align = offset | iov_iter_alignment(iter);
4091 
4092 	return align & blocksize_mask;
4093 }
4094 
allow_outplace_dio(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4095 static inline int allow_outplace_dio(struct inode *inode,
4096 				struct kiocb *iocb, struct iov_iter *iter)
4097 {
4098 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4099 	int rw = iov_iter_rw(iter);
4100 
4101 	return (f2fs_lfs_mode(sbi) && (rw == WRITE) &&
4102 				!block_unaligned_IO(inode, iocb, iter));
4103 }
4104 
f2fs_force_buffered_io(struct inode * inode,struct kiocb * iocb,struct iov_iter * iter)4105 static inline bool f2fs_force_buffered_io(struct inode *inode,
4106 				struct kiocb *iocb, struct iov_iter *iter)
4107 {
4108 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4109 	int rw = iov_iter_rw(iter);
4110 
4111 	if (f2fs_post_read_required(inode))
4112 		return true;
4113 	if (f2fs_is_multi_device(sbi))
4114 		return true;
4115 	/*
4116 	 * for blkzoned device, fallback direct IO to buffered IO, so
4117 	 * all IOs can be serialized by log-structured write.
4118 	 */
4119 	if (f2fs_sb_has_blkzoned(sbi))
4120 		return true;
4121 	if (f2fs_lfs_mode(sbi) && (rw == WRITE)) {
4122 		if (block_unaligned_IO(inode, iocb, iter))
4123 			return true;
4124 		if (F2FS_IO_ALIGNED(sbi))
4125 			return true;
4126 	}
4127 	if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
4128 					!IS_SWAPFILE(inode))
4129 		return true;
4130 
4131 	return false;
4132 }
4133 
4134 #ifdef CONFIG_F2FS_FAULT_INJECTION
4135 extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
4136 							unsigned int type);
4137 #else
4138 #define f2fs_build_fault_attr(sbi, rate, type)		do { } while (0)
4139 #endif
4140 
is_journalled_quota(struct f2fs_sb_info * sbi)4141 static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
4142 {
4143 #ifdef CONFIG_QUOTA
4144 	if (f2fs_sb_has_quota_ino(sbi))
4145 		return true;
4146 	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
4147 		F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
4148 		F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
4149 		return true;
4150 #endif
4151 	return false;
4152 }
4153 
4154 #define EFSBADCRC	EBADMSG		/* Bad CRC detected */
4155 #define EFSCORRUPTED	EUCLEAN		/* Filesystem is corrupted */
4156 
4157 #endif /* _LINUX_F2FS_H */
4158