• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * fs/f2fs/super.c
4   *
5   * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6   *             http://www.samsung.com/
7   */
8  #include <linux/module.h>
9  #include <linux/init.h>
10  #include <linux/fs.h>
11  #include <linux/fs_context.h>
12  #include <linux/statfs.h>
13  #include <linux/buffer_head.h>
14  #include <linux/backing-dev.h>
15  #include <linux/kthread.h>
16  #include <linux/parser.h>
17  #include <linux/mount.h>
18  #include <linux/seq_file.h>
19  #include <linux/proc_fs.h>
20  #include <linux/random.h>
21  #include <linux/exportfs.h>
22  #include <linux/blkdev.h>
23  #include <linux/quotaops.h>
24  #include <linux/f2fs_fs.h>
25  #include <linux/sysfs.h>
26  #include <linux/quota.h>
27  #include <linux/unicode.h>
28  #include <linux/part_stat.h>
29  #include <linux/zstd.h>
30  #include <linux/lz4.h>
31  #include <linux/cleancache.h>
32  
33  #include "f2fs.h"
34  #include "node.h"
35  #include "segment.h"
36  #include "xattr.h"
37  #include "gc.h"
38  #include "iostat.h"
39  
40  #define CREATE_TRACE_POINTS
41  #include <trace/events/f2fs.h>
42  
43  static struct kmem_cache *f2fs_inode_cachep;
44  
45  #ifdef CONFIG_F2FS_FAULT_INJECTION
46  
47  const char *f2fs_fault_name[FAULT_MAX] = {
48  	[FAULT_KMALLOC]		= "kmalloc",
49  	[FAULT_KVMALLOC]	= "kvmalloc",
50  	[FAULT_PAGE_ALLOC]	= "page alloc",
51  	[FAULT_PAGE_GET]	= "page get",
52  	[FAULT_ALLOC_NID]	= "alloc nid",
53  	[FAULT_ORPHAN]		= "orphan",
54  	[FAULT_BLOCK]		= "no more block",
55  	[FAULT_DIR_DEPTH]	= "too big dir depth",
56  	[FAULT_EVICT_INODE]	= "evict_inode fail",
57  	[FAULT_TRUNCATE]	= "truncate fail",
58  	[FAULT_READ_IO]		= "read IO error",
59  	[FAULT_CHECKPOINT]	= "checkpoint error",
60  	[FAULT_DISCARD]		= "discard error",
61  	[FAULT_WRITE_IO]	= "write IO error",
62  	[FAULT_SLAB_ALLOC]	= "slab alloc",
63  	[FAULT_DQUOT_INIT]	= "dquot initialize",
64  	[FAULT_LOCK_OP]		= "lock_op",
65  	[FAULT_BLKADDR]		= "invalid blkaddr",
66  };
67  
f2fs_build_fault_attr(struct f2fs_sb_info * sbi,unsigned int rate,unsigned int type)68  void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
69  							unsigned int type)
70  {
71  	struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
72  
73  	if (rate) {
74  		atomic_set(&ffi->inject_ops, 0);
75  		ffi->inject_rate = rate;
76  	}
77  
78  	if (type)
79  		ffi->inject_type = type;
80  
81  	if (!rate && !type)
82  		memset(ffi, 0, sizeof(struct f2fs_fault_info));
83  }
84  #endif
85  
86  /* f2fs-wide shrinker description */
87  static struct shrinker f2fs_shrinker_info = {
88  	.scan_objects = f2fs_shrink_scan,
89  	.count_objects = f2fs_shrink_count,
90  	.seeks = DEFAULT_SEEKS,
91  };
92  
93  enum {
94  	Opt_gc_background,
95  	Opt_disable_roll_forward,
96  	Opt_norecovery,
97  	Opt_discard,
98  	Opt_nodiscard,
99  	Opt_noheap,
100  	Opt_heap,
101  	Opt_user_xattr,
102  	Opt_nouser_xattr,
103  	Opt_acl,
104  	Opt_noacl,
105  	Opt_active_logs,
106  	Opt_disable_ext_identify,
107  	Opt_inline_xattr,
108  	Opt_noinline_xattr,
109  	Opt_inline_xattr_size,
110  	Opt_inline_data,
111  	Opt_inline_dentry,
112  	Opt_noinline_dentry,
113  	Opt_flush_merge,
114  	Opt_noflush_merge,
115  	Opt_barrier,
116  	Opt_nobarrier,
117  	Opt_fastboot,
118  	Opt_extent_cache,
119  	Opt_noextent_cache,
120  	Opt_noinline_data,
121  	Opt_data_flush,
122  	Opt_reserve_root,
123  	Opt_resgid,
124  	Opt_resuid,
125  	Opt_mode,
126  	Opt_io_size_bits,
127  	Opt_fault_injection,
128  	Opt_fault_type,
129  	Opt_lazytime,
130  	Opt_nolazytime,
131  	Opt_quota,
132  	Opt_noquota,
133  	Opt_usrquota,
134  	Opt_grpquota,
135  	Opt_prjquota,
136  	Opt_usrjquota,
137  	Opt_grpjquota,
138  	Opt_prjjquota,
139  	Opt_offusrjquota,
140  	Opt_offgrpjquota,
141  	Opt_offprjjquota,
142  	Opt_jqfmt_vfsold,
143  	Opt_jqfmt_vfsv0,
144  	Opt_jqfmt_vfsv1,
145  	Opt_alloc,
146  	Opt_fsync,
147  	Opt_test_dummy_encryption,
148  	Opt_inlinecrypt,
149  	Opt_checkpoint_disable,
150  	Opt_checkpoint_disable_cap,
151  	Opt_checkpoint_disable_cap_perc,
152  	Opt_checkpoint_enable,
153  	Opt_checkpoint_merge,
154  	Opt_nocheckpoint_merge,
155  	Opt_compress_algorithm,
156  	Opt_compress_log_size,
157  	Opt_compress_extension,
158  	Opt_nocompress_extension,
159  	Opt_compress_chksum,
160  	Opt_compress_mode,
161  	Opt_compress_cache,
162  	Opt_atgc,
163  	Opt_gc_merge,
164  	Opt_nogc_merge,
165  	Opt_discard_unit,
166  	Opt_memory_mode,
167  	Opt_age_extent_cache,
168  	Opt_err,
169  };
170  
171  static match_table_t f2fs_tokens = {
172  	{Opt_gc_background, "background_gc=%s"},
173  	{Opt_disable_roll_forward, "disable_roll_forward"},
174  	{Opt_norecovery, "norecovery"},
175  	{Opt_discard, "discard"},
176  	{Opt_nodiscard, "nodiscard"},
177  	{Opt_noheap, "no_heap"},
178  	{Opt_heap, "heap"},
179  	{Opt_user_xattr, "user_xattr"},
180  	{Opt_nouser_xattr, "nouser_xattr"},
181  	{Opt_acl, "acl"},
182  	{Opt_noacl, "noacl"},
183  	{Opt_active_logs, "active_logs=%u"},
184  	{Opt_disable_ext_identify, "disable_ext_identify"},
185  	{Opt_inline_xattr, "inline_xattr"},
186  	{Opt_noinline_xattr, "noinline_xattr"},
187  	{Opt_inline_xattr_size, "inline_xattr_size=%u"},
188  	{Opt_inline_data, "inline_data"},
189  	{Opt_inline_dentry, "inline_dentry"},
190  	{Opt_noinline_dentry, "noinline_dentry"},
191  	{Opt_flush_merge, "flush_merge"},
192  	{Opt_noflush_merge, "noflush_merge"},
193  	{Opt_barrier, "barrier"},
194  	{Opt_nobarrier, "nobarrier"},
195  	{Opt_fastboot, "fastboot"},
196  	{Opt_extent_cache, "extent_cache"},
197  	{Opt_noextent_cache, "noextent_cache"},
198  	{Opt_noinline_data, "noinline_data"},
199  	{Opt_data_flush, "data_flush"},
200  	{Opt_reserve_root, "reserve_root=%u"},
201  	{Opt_resgid, "resgid=%u"},
202  	{Opt_resuid, "resuid=%u"},
203  	{Opt_mode, "mode=%s"},
204  	{Opt_io_size_bits, "io_bits=%u"},
205  	{Opt_fault_injection, "fault_injection=%u"},
206  	{Opt_fault_type, "fault_type=%u"},
207  	{Opt_lazytime, "lazytime"},
208  	{Opt_nolazytime, "nolazytime"},
209  	{Opt_quota, "quota"},
210  	{Opt_noquota, "noquota"},
211  	{Opt_usrquota, "usrquota"},
212  	{Opt_grpquota, "grpquota"},
213  	{Opt_prjquota, "prjquota"},
214  	{Opt_usrjquota, "usrjquota=%s"},
215  	{Opt_grpjquota, "grpjquota=%s"},
216  	{Opt_prjjquota, "prjjquota=%s"},
217  	{Opt_offusrjquota, "usrjquota="},
218  	{Opt_offgrpjquota, "grpjquota="},
219  	{Opt_offprjjquota, "prjjquota="},
220  	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
221  	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
222  	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
223  	{Opt_alloc, "alloc_mode=%s"},
224  	{Opt_fsync, "fsync_mode=%s"},
225  	{Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
226  	{Opt_test_dummy_encryption, "test_dummy_encryption"},
227  	{Opt_inlinecrypt, "inlinecrypt"},
228  	{Opt_checkpoint_disable, "checkpoint=disable"},
229  	{Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
230  	{Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
231  	{Opt_checkpoint_enable, "checkpoint=enable"},
232  	{Opt_checkpoint_merge, "checkpoint_merge"},
233  	{Opt_nocheckpoint_merge, "nocheckpoint_merge"},
234  	{Opt_compress_algorithm, "compress_algorithm=%s"},
235  	{Opt_compress_log_size, "compress_log_size=%u"},
236  	{Opt_compress_extension, "compress_extension=%s"},
237  	{Opt_nocompress_extension, "nocompress_extension=%s"},
238  	{Opt_compress_chksum, "compress_chksum"},
239  	{Opt_compress_mode, "compress_mode=%s"},
240  	{Opt_compress_cache, "compress_cache"},
241  	{Opt_atgc, "atgc"},
242  	{Opt_gc_merge, "gc_merge"},
243  	{Opt_nogc_merge, "nogc_merge"},
244  	{Opt_discard_unit, "discard_unit=%s"},
245  	{Opt_memory_mode, "memory=%s"},
246  	{Opt_age_extent_cache, "age_extent_cache"},
247  	{Opt_err, NULL},
248  };
249  
f2fs_printk(struct f2fs_sb_info * sbi,const char * fmt,...)250  void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
251  {
252  	struct va_format vaf;
253  	va_list args;
254  	int level;
255  
256  	va_start(args, fmt);
257  
258  	level = printk_get_level(fmt);
259  	vaf.fmt = printk_skip_level(fmt);
260  	vaf.va = &args;
261  	printk("%c%cF2FS-fs (%s): %pV\n",
262  	       KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
263  
264  	va_end(args);
265  }
266  
267  #ifdef CONFIG_UNICODE
268  static const struct f2fs_sb_encodings {
269  	__u16 magic;
270  	char *name;
271  	char *version;
272  } f2fs_sb_encoding_map[] = {
273  	{F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
274  };
275  
f2fs_sb_read_encoding(const struct f2fs_super_block * sb,const struct f2fs_sb_encodings ** encoding,__u16 * flags)276  static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
277  				 const struct f2fs_sb_encodings **encoding,
278  				 __u16 *flags)
279  {
280  	__u16 magic = le16_to_cpu(sb->s_encoding);
281  	int i;
282  
283  	for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
284  		if (magic == f2fs_sb_encoding_map[i].magic)
285  			break;
286  
287  	if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
288  		return -EINVAL;
289  
290  	*encoding = &f2fs_sb_encoding_map[i];
291  	*flags = le16_to_cpu(sb->s_encoding_flags);
292  
293  	return 0;
294  }
295  
296  struct kmem_cache *f2fs_cf_name_slab;
f2fs_create_casefold_cache(void)297  static int __init f2fs_create_casefold_cache(void)
298  {
299  	f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
300  							F2FS_NAME_LEN);
301  	return f2fs_cf_name_slab ? 0 : -ENOMEM;
302  }
303  
f2fs_destroy_casefold_cache(void)304  static void f2fs_destroy_casefold_cache(void)
305  {
306  	kmem_cache_destroy(f2fs_cf_name_slab);
307  }
308  #else
f2fs_create_casefold_cache(void)309  static int __init f2fs_create_casefold_cache(void) { return 0; }
f2fs_destroy_casefold_cache(void)310  static void f2fs_destroy_casefold_cache(void) { }
311  #endif
312  
limit_reserve_root(struct f2fs_sb_info * sbi)313  static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
314  {
315  	block_t limit = min((sbi->user_block_count >> 3),
316  			sbi->user_block_count - sbi->reserved_blocks);
317  
318  	/* limit is 12.5% */
319  	if (test_opt(sbi, RESERVE_ROOT) &&
320  			F2FS_OPTION(sbi).root_reserved_blocks > limit) {
321  		F2FS_OPTION(sbi).root_reserved_blocks = limit;
322  		f2fs_info(sbi, "Reduce reserved blocks for root = %u",
323  			  F2FS_OPTION(sbi).root_reserved_blocks);
324  	}
325  	if (!test_opt(sbi, RESERVE_ROOT) &&
326  		(!uid_eq(F2FS_OPTION(sbi).s_resuid,
327  				make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
328  		!gid_eq(F2FS_OPTION(sbi).s_resgid,
329  				make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
330  		f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
331  			  from_kuid_munged(&init_user_ns,
332  					   F2FS_OPTION(sbi).s_resuid),
333  			  from_kgid_munged(&init_user_ns,
334  					   F2FS_OPTION(sbi).s_resgid));
335  }
336  
adjust_reserved_segment(struct f2fs_sb_info * sbi)337  static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
338  {
339  	unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
340  	unsigned int avg_vblocks;
341  	unsigned int wanted_reserved_segments;
342  	block_t avail_user_block_count;
343  
344  	if (!F2FS_IO_ALIGNED(sbi))
345  		return 0;
346  
347  	/* average valid block count in section in worst case */
348  	avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
349  
350  	/*
351  	 * we need enough free space when migrating one section in worst case
352  	 */
353  	wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
354  						reserved_segments(sbi);
355  	wanted_reserved_segments -= reserved_segments(sbi);
356  
357  	avail_user_block_count = sbi->user_block_count -
358  				sbi->current_reserved_blocks -
359  				F2FS_OPTION(sbi).root_reserved_blocks;
360  
361  	if (wanted_reserved_segments * sbi->blocks_per_seg >
362  					avail_user_block_count) {
363  		f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
364  			wanted_reserved_segments,
365  			avail_user_block_count >> sbi->log_blocks_per_seg);
366  		return -ENOSPC;
367  	}
368  
369  	SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
370  
371  	f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
372  			 wanted_reserved_segments);
373  
374  	return 0;
375  }
376  
adjust_unusable_cap_perc(struct f2fs_sb_info * sbi)377  static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
378  {
379  	if (!F2FS_OPTION(sbi).unusable_cap_perc)
380  		return;
381  
382  	if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
383  		F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
384  	else
385  		F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
386  					F2FS_OPTION(sbi).unusable_cap_perc;
387  
388  	f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
389  			F2FS_OPTION(sbi).unusable_cap,
390  			F2FS_OPTION(sbi).unusable_cap_perc);
391  }
392  
init_once(void * foo)393  static void init_once(void *foo)
394  {
395  	struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
396  
397  	inode_init_once(&fi->vfs_inode);
398  }
399  
400  #ifdef CONFIG_QUOTA
401  static const char * const quotatypes[] = INITQFNAMES;
402  #define QTYPE2NAME(t) (quotatypes[t])
f2fs_set_qf_name(struct super_block * sb,int qtype,substring_t * args)403  static int f2fs_set_qf_name(struct super_block *sb, int qtype,
404  							substring_t *args)
405  {
406  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
407  	char *qname;
408  	int ret = -EINVAL;
409  
410  	if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
411  		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
412  		return -EINVAL;
413  	}
414  	if (f2fs_sb_has_quota_ino(sbi)) {
415  		f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
416  		return 0;
417  	}
418  
419  	qname = match_strdup(args);
420  	if (!qname) {
421  		f2fs_err(sbi, "Not enough memory for storing quotafile name");
422  		return -ENOMEM;
423  	}
424  	if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
425  		if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
426  			ret = 0;
427  		else
428  			f2fs_err(sbi, "%s quota file already specified",
429  				 QTYPE2NAME(qtype));
430  		goto errout;
431  	}
432  	if (strchr(qname, '/')) {
433  		f2fs_err(sbi, "quotafile must be on filesystem root");
434  		goto errout;
435  	}
436  	F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
437  	set_opt(sbi, QUOTA);
438  	return 0;
439  errout:
440  	kfree(qname);
441  	return ret;
442  }
443  
f2fs_clear_qf_name(struct super_block * sb,int qtype)444  static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
445  {
446  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
447  
448  	if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
449  		f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
450  		return -EINVAL;
451  	}
452  	kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
453  	F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
454  	return 0;
455  }
456  
f2fs_check_quota_options(struct f2fs_sb_info * sbi)457  static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
458  {
459  	/*
460  	 * We do the test below only for project quotas. 'usrquota' and
461  	 * 'grpquota' mount options are allowed even without quota feature
462  	 * to support legacy quotas in quota files.
463  	 */
464  	if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
465  		f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
466  		return -1;
467  	}
468  	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
469  			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
470  			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
471  		if (test_opt(sbi, USRQUOTA) &&
472  				F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
473  			clear_opt(sbi, USRQUOTA);
474  
475  		if (test_opt(sbi, GRPQUOTA) &&
476  				F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
477  			clear_opt(sbi, GRPQUOTA);
478  
479  		if (test_opt(sbi, PRJQUOTA) &&
480  				F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
481  			clear_opt(sbi, PRJQUOTA);
482  
483  		if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
484  				test_opt(sbi, PRJQUOTA)) {
485  			f2fs_err(sbi, "old and new quota format mixing");
486  			return -1;
487  		}
488  
489  		if (!F2FS_OPTION(sbi).s_jquota_fmt) {
490  			f2fs_err(sbi, "journaled quota format not specified");
491  			return -1;
492  		}
493  	}
494  
495  	if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
496  		f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
497  		F2FS_OPTION(sbi).s_jquota_fmt = 0;
498  	}
499  	return 0;
500  }
501  #endif
502  
f2fs_set_test_dummy_encryption(struct super_block * sb,const char * opt,const substring_t * arg,bool is_remount)503  static int f2fs_set_test_dummy_encryption(struct super_block *sb,
504  					  const char *opt,
505  					  const substring_t *arg,
506  					  bool is_remount)
507  {
508  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
509  	struct fs_parameter param = {
510  		.type = fs_value_is_string,
511  		.string = arg->from ? arg->from : "",
512  	};
513  	struct fscrypt_dummy_policy *policy =
514  		&F2FS_OPTION(sbi).dummy_enc_policy;
515  	int err;
516  
517  	if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
518  		f2fs_warn(sbi, "test_dummy_encryption option not supported");
519  		return -EINVAL;
520  	}
521  
522  	if (!f2fs_sb_has_encrypt(sbi)) {
523  		f2fs_err(sbi, "Encrypt feature is off");
524  		return -EINVAL;
525  	}
526  
527  	/*
528  	 * This mount option is just for testing, and it's not worthwhile to
529  	 * implement the extra complexity (e.g. RCU protection) that would be
530  	 * needed to allow it to be set or changed during remount.  We do allow
531  	 * it to be specified during remount, but only if there is no change.
532  	 */
533  	if (is_remount && !fscrypt_is_dummy_policy_set(policy)) {
534  		f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
535  		return -EINVAL;
536  	}
537  
538  	err = fscrypt_parse_test_dummy_encryption(&param, policy);
539  	if (err) {
540  		if (err == -EEXIST)
541  			f2fs_warn(sbi,
542  				  "Can't change test_dummy_encryption on remount");
543  		else if (err == -EINVAL)
544  			f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
545  				  opt);
546  		else
547  			f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
548  				  opt, err);
549  		return -EINVAL;
550  	}
551  	f2fs_warn(sbi, "Test dummy encryption mode enabled");
552  	return 0;
553  }
554  
555  #ifdef CONFIG_F2FS_FS_COMPRESSION
is_compress_extension_exist(struct f2fs_sb_info * sbi,const char * new_ext,bool is_ext)556  static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
557  					const char *new_ext, bool is_ext)
558  {
559  	unsigned char (*ext)[F2FS_EXTENSION_LEN];
560  	int ext_cnt;
561  	int i;
562  
563  	if (is_ext) {
564  		ext = F2FS_OPTION(sbi).extensions;
565  		ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
566  	} else {
567  		ext = F2FS_OPTION(sbi).noextensions;
568  		ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
569  	}
570  
571  	for (i = 0; i < ext_cnt; i++) {
572  		if (!strcasecmp(new_ext, ext[i]))
573  			return true;
574  	}
575  
576  	return false;
577  }
578  
579  /*
580   * 1. The same extension name cannot not appear in both compress and non-compress extension
581   * at the same time.
582   * 2. If the compress extension specifies all files, the types specified by the non-compress
583   * extension will be treated as special cases and will not be compressed.
584   * 3. Don't allow the non-compress extension specifies all files.
585   */
f2fs_test_compress_extension(struct f2fs_sb_info * sbi)586  static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi)
587  {
588  	unsigned char (*ext)[F2FS_EXTENSION_LEN];
589  	unsigned char (*noext)[F2FS_EXTENSION_LEN];
590  	int ext_cnt, noext_cnt, index = 0, no_index = 0;
591  
592  	ext = F2FS_OPTION(sbi).extensions;
593  	ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
594  	noext = F2FS_OPTION(sbi).noextensions;
595  	noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
596  
597  	if (!noext_cnt)
598  		return 0;
599  
600  	for (no_index = 0; no_index < noext_cnt; no_index++) {
601  		if (!strcasecmp("*", noext[no_index])) {
602  			f2fs_info(sbi, "Don't allow the nocompress extension specifies all files");
603  			return -EINVAL;
604  		}
605  		for (index = 0; index < ext_cnt; index++) {
606  			if (!strcasecmp(ext[index], noext[no_index])) {
607  				f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension",
608  						ext[index]);
609  				return -EINVAL;
610  			}
611  		}
612  	}
613  	return 0;
614  }
615  
616  #ifdef CONFIG_F2FS_FS_LZ4
f2fs_set_lz4hc_level(struct f2fs_sb_info * sbi,const char * str)617  static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
618  {
619  #ifdef CONFIG_F2FS_FS_LZ4HC
620  	unsigned int level;
621  #endif
622  
623  	if (strlen(str) == 3) {
624  		F2FS_OPTION(sbi).compress_level = 0;
625  		return 0;
626  	}
627  
628  #ifdef CONFIG_F2FS_FS_LZ4HC
629  	str += 3;
630  
631  	if (str[0] != ':') {
632  		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
633  		return -EINVAL;
634  	}
635  	if (kstrtouint(str + 1, 10, &level))
636  		return -EINVAL;
637  
638  	if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
639  		f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
640  		return -EINVAL;
641  	}
642  
643  	F2FS_OPTION(sbi).compress_level = level;
644  	return 0;
645  #else
646  	f2fs_info(sbi, "kernel doesn't support lz4hc compression");
647  	return -EINVAL;
648  #endif
649  }
650  #endif
651  
652  #ifdef CONFIG_F2FS_FS_ZSTD
f2fs_set_zstd_level(struct f2fs_sb_info * sbi,const char * str)653  static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
654  {
655  	unsigned int level;
656  	int len = 4;
657  
658  	if (strlen(str) == len) {
659  		F2FS_OPTION(sbi).compress_level = 0;
660  		return 0;
661  	}
662  
663  	str += len;
664  
665  	if (str[0] != ':') {
666  		f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
667  		return -EINVAL;
668  	}
669  	if (kstrtouint(str + 1, 10, &level))
670  		return -EINVAL;
671  
672  	if (!level || level > ZSTD_maxCLevel()) {
673  		f2fs_info(sbi, "invalid zstd compress level: %d", level);
674  		return -EINVAL;
675  	}
676  
677  	F2FS_OPTION(sbi).compress_level = level;
678  	return 0;
679  }
680  #endif
681  #endif
682  
parse_options(struct super_block * sb,char * options,bool is_remount)683  static int parse_options(struct super_block *sb, char *options, bool is_remount)
684  {
685  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
686  	substring_t args[MAX_OPT_ARGS];
687  #ifdef CONFIG_F2FS_FS_COMPRESSION
688  	unsigned char (*ext)[F2FS_EXTENSION_LEN];
689  	unsigned char (*noext)[F2FS_EXTENSION_LEN];
690  	int ext_cnt, noext_cnt;
691  #endif
692  	char *p, *name;
693  	int arg = 0;
694  	kuid_t uid;
695  	kgid_t gid;
696  	int ret;
697  
698  	if (!options)
699  		goto default_check;
700  
701  	while ((p = strsep(&options, ",")) != NULL) {
702  		int token;
703  
704  		if (!*p)
705  			continue;
706  		/*
707  		 * Initialize args struct so we know whether arg was
708  		 * found; some options take optional arguments.
709  		 */
710  		args[0].to = args[0].from = NULL;
711  		token = match_token(p, f2fs_tokens, args);
712  
713  		switch (token) {
714  		case Opt_gc_background:
715  			name = match_strdup(&args[0]);
716  
717  			if (!name)
718  				return -ENOMEM;
719  			if (!strcmp(name, "on")) {
720  				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
721  			} else if (!strcmp(name, "off")) {
722  				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
723  			} else if (!strcmp(name, "sync")) {
724  				F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
725  			} else {
726  				kfree(name);
727  				return -EINVAL;
728  			}
729  			kfree(name);
730  			break;
731  		case Opt_disable_roll_forward:
732  			set_opt(sbi, DISABLE_ROLL_FORWARD);
733  			break;
734  		case Opt_norecovery:
735  			/* this option mounts f2fs with ro */
736  			set_opt(sbi, NORECOVERY);
737  			if (!f2fs_readonly(sb))
738  				return -EINVAL;
739  			break;
740  		case Opt_discard:
741  			if (!f2fs_hw_support_discard(sbi)) {
742  				f2fs_warn(sbi, "device does not support discard");
743  				break;
744  			}
745  			set_opt(sbi, DISCARD);
746  			break;
747  		case Opt_nodiscard:
748  			if (f2fs_hw_should_discard(sbi)) {
749  				f2fs_warn(sbi, "discard is required for zoned block devices");
750  				return -EINVAL;
751  			}
752  			clear_opt(sbi, DISCARD);
753  			break;
754  		case Opt_noheap:
755  			set_opt(sbi, NOHEAP);
756  			break;
757  		case Opt_heap:
758  			clear_opt(sbi, NOHEAP);
759  			break;
760  #ifdef CONFIG_F2FS_FS_XATTR
761  		case Opt_user_xattr:
762  			set_opt(sbi, XATTR_USER);
763  			break;
764  		case Opt_nouser_xattr:
765  			clear_opt(sbi, XATTR_USER);
766  			break;
767  		case Opt_inline_xattr:
768  			set_opt(sbi, INLINE_XATTR);
769  			break;
770  		case Opt_noinline_xattr:
771  			clear_opt(sbi, INLINE_XATTR);
772  			break;
773  		case Opt_inline_xattr_size:
774  			if (args->from && match_int(args, &arg))
775  				return -EINVAL;
776  			set_opt(sbi, INLINE_XATTR_SIZE);
777  			F2FS_OPTION(sbi).inline_xattr_size = arg;
778  			break;
779  #else
780  		case Opt_user_xattr:
781  			f2fs_info(sbi, "user_xattr options not supported");
782  			break;
783  		case Opt_nouser_xattr:
784  			f2fs_info(sbi, "nouser_xattr options not supported");
785  			break;
786  		case Opt_inline_xattr:
787  			f2fs_info(sbi, "inline_xattr options not supported");
788  			break;
789  		case Opt_noinline_xattr:
790  			f2fs_info(sbi, "noinline_xattr options not supported");
791  			break;
792  #endif
793  #ifdef CONFIG_F2FS_FS_POSIX_ACL
794  		case Opt_acl:
795  			set_opt(sbi, POSIX_ACL);
796  			break;
797  		case Opt_noacl:
798  			clear_opt(sbi, POSIX_ACL);
799  			break;
800  #else
801  		case Opt_acl:
802  			f2fs_info(sbi, "acl options not supported");
803  			break;
804  		case Opt_noacl:
805  			f2fs_info(sbi, "noacl options not supported");
806  			break;
807  #endif
808  		case Opt_active_logs:
809  			if (args->from && match_int(args, &arg))
810  				return -EINVAL;
811  			if (arg != 2 && arg != 4 &&
812  				arg != NR_CURSEG_PERSIST_TYPE)
813  				return -EINVAL;
814  			F2FS_OPTION(sbi).active_logs = arg;
815  			break;
816  		case Opt_disable_ext_identify:
817  			set_opt(sbi, DISABLE_EXT_IDENTIFY);
818  			break;
819  		case Opt_inline_data:
820  			set_opt(sbi, INLINE_DATA);
821  			break;
822  		case Opt_inline_dentry:
823  			set_opt(sbi, INLINE_DENTRY);
824  			break;
825  		case Opt_noinline_dentry:
826  			clear_opt(sbi, INLINE_DENTRY);
827  			break;
828  		case Opt_flush_merge:
829  			set_opt(sbi, FLUSH_MERGE);
830  			break;
831  		case Opt_noflush_merge:
832  			clear_opt(sbi, FLUSH_MERGE);
833  			break;
834  		case Opt_nobarrier:
835  			set_opt(sbi, NOBARRIER);
836  			break;
837  		case Opt_barrier:
838  			clear_opt(sbi, NOBARRIER);
839  			break;
840  		case Opt_fastboot:
841  			set_opt(sbi, FASTBOOT);
842  			break;
843  		case Opt_extent_cache:
844  			set_opt(sbi, READ_EXTENT_CACHE);
845  			break;
846  		case Opt_noextent_cache:
847  			clear_opt(sbi, READ_EXTENT_CACHE);
848  			break;
849  		case Opt_noinline_data:
850  			clear_opt(sbi, INLINE_DATA);
851  			break;
852  		case Opt_data_flush:
853  			set_opt(sbi, DATA_FLUSH);
854  			break;
855  		case Opt_reserve_root:
856  			if (args->from && match_int(args, &arg))
857  				return -EINVAL;
858  			if (test_opt(sbi, RESERVE_ROOT)) {
859  				f2fs_info(sbi, "Preserve previous reserve_root=%u",
860  					  F2FS_OPTION(sbi).root_reserved_blocks);
861  			} else {
862  				F2FS_OPTION(sbi).root_reserved_blocks = arg;
863  				set_opt(sbi, RESERVE_ROOT);
864  			}
865  			break;
866  		case Opt_resuid:
867  			if (args->from && match_int(args, &arg))
868  				return -EINVAL;
869  			uid = make_kuid(current_user_ns(), arg);
870  			if (!uid_valid(uid)) {
871  				f2fs_err(sbi, "Invalid uid value %d", arg);
872  				return -EINVAL;
873  			}
874  			F2FS_OPTION(sbi).s_resuid = uid;
875  			break;
876  		case Opt_resgid:
877  			if (args->from && match_int(args, &arg))
878  				return -EINVAL;
879  			gid = make_kgid(current_user_ns(), arg);
880  			if (!gid_valid(gid)) {
881  				f2fs_err(sbi, "Invalid gid value %d", arg);
882  				return -EINVAL;
883  			}
884  			F2FS_OPTION(sbi).s_resgid = gid;
885  			break;
886  		case Opt_mode:
887  			name = match_strdup(&args[0]);
888  
889  			if (!name)
890  				return -ENOMEM;
891  			if (!strcmp(name, "adaptive")) {
892  				if (f2fs_sb_has_blkzoned(sbi)) {
893  					f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
894  					kfree(name);
895  					return -EINVAL;
896  				}
897  				F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
898  			} else if (!strcmp(name, "lfs")) {
899  				F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
900  			} else if (!strcmp(name, "fragment:segment")) {
901  				F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
902  			} else if (!strcmp(name, "fragment:block")) {
903  				F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
904  			} else {
905  				kfree(name);
906  				return -EINVAL;
907  			}
908  			kfree(name);
909  			break;
910  		case Opt_io_size_bits:
911  			if (args->from && match_int(args, &arg))
912  				return -EINVAL;
913  			if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_VECS)) {
914  				f2fs_warn(sbi, "Not support %ld, larger than %d",
915  					BIT(arg), BIO_MAX_VECS);
916  				return -EINVAL;
917  			}
918  			F2FS_OPTION(sbi).write_io_size_bits = arg;
919  			break;
920  #ifdef CONFIG_F2FS_FAULT_INJECTION
921  		case Opt_fault_injection:
922  			if (args->from && match_int(args, &arg))
923  				return -EINVAL;
924  			f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
925  			set_opt(sbi, FAULT_INJECTION);
926  			break;
927  
928  		case Opt_fault_type:
929  			if (args->from && match_int(args, &arg))
930  				return -EINVAL;
931  			f2fs_build_fault_attr(sbi, 0, arg);
932  			set_opt(sbi, FAULT_INJECTION);
933  			break;
934  #else
935  		case Opt_fault_injection:
936  			f2fs_info(sbi, "fault_injection options not supported");
937  			break;
938  
939  		case Opt_fault_type:
940  			f2fs_info(sbi, "fault_type options not supported");
941  			break;
942  #endif
943  		case Opt_lazytime:
944  			sb->s_flags |= SB_LAZYTIME;
945  			break;
946  		case Opt_nolazytime:
947  			sb->s_flags &= ~SB_LAZYTIME;
948  			break;
949  #ifdef CONFIG_QUOTA
950  		case Opt_quota:
951  		case Opt_usrquota:
952  			set_opt(sbi, USRQUOTA);
953  			break;
954  		case Opt_grpquota:
955  			set_opt(sbi, GRPQUOTA);
956  			break;
957  		case Opt_prjquota:
958  			set_opt(sbi, PRJQUOTA);
959  			break;
960  		case Opt_usrjquota:
961  			ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
962  			if (ret)
963  				return ret;
964  			break;
965  		case Opt_grpjquota:
966  			ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
967  			if (ret)
968  				return ret;
969  			break;
970  		case Opt_prjjquota:
971  			ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
972  			if (ret)
973  				return ret;
974  			break;
975  		case Opt_offusrjquota:
976  			ret = f2fs_clear_qf_name(sb, USRQUOTA);
977  			if (ret)
978  				return ret;
979  			break;
980  		case Opt_offgrpjquota:
981  			ret = f2fs_clear_qf_name(sb, GRPQUOTA);
982  			if (ret)
983  				return ret;
984  			break;
985  		case Opt_offprjjquota:
986  			ret = f2fs_clear_qf_name(sb, PRJQUOTA);
987  			if (ret)
988  				return ret;
989  			break;
990  		case Opt_jqfmt_vfsold:
991  			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
992  			break;
993  		case Opt_jqfmt_vfsv0:
994  			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
995  			break;
996  		case Opt_jqfmt_vfsv1:
997  			F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
998  			break;
999  		case Opt_noquota:
1000  			clear_opt(sbi, QUOTA);
1001  			clear_opt(sbi, USRQUOTA);
1002  			clear_opt(sbi, GRPQUOTA);
1003  			clear_opt(sbi, PRJQUOTA);
1004  			break;
1005  #else
1006  		case Opt_quota:
1007  		case Opt_usrquota:
1008  		case Opt_grpquota:
1009  		case Opt_prjquota:
1010  		case Opt_usrjquota:
1011  		case Opt_grpjquota:
1012  		case Opt_prjjquota:
1013  		case Opt_offusrjquota:
1014  		case Opt_offgrpjquota:
1015  		case Opt_offprjjquota:
1016  		case Opt_jqfmt_vfsold:
1017  		case Opt_jqfmt_vfsv0:
1018  		case Opt_jqfmt_vfsv1:
1019  		case Opt_noquota:
1020  			f2fs_info(sbi, "quota operations not supported");
1021  			break;
1022  #endif
1023  		case Opt_alloc:
1024  			name = match_strdup(&args[0]);
1025  			if (!name)
1026  				return -ENOMEM;
1027  
1028  			if (!strcmp(name, "default")) {
1029  				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
1030  			} else if (!strcmp(name, "reuse")) {
1031  				F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
1032  			} else {
1033  				kfree(name);
1034  				return -EINVAL;
1035  			}
1036  			kfree(name);
1037  			break;
1038  		case Opt_fsync:
1039  			name = match_strdup(&args[0]);
1040  			if (!name)
1041  				return -ENOMEM;
1042  			if (!strcmp(name, "posix")) {
1043  				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
1044  			} else if (!strcmp(name, "strict")) {
1045  				F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
1046  			} else if (!strcmp(name, "nobarrier")) {
1047  				F2FS_OPTION(sbi).fsync_mode =
1048  							FSYNC_MODE_NOBARRIER;
1049  			} else {
1050  				kfree(name);
1051  				return -EINVAL;
1052  			}
1053  			kfree(name);
1054  			break;
1055  		case Opt_test_dummy_encryption:
1056  			ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
1057  							     is_remount);
1058  			if (ret)
1059  				return ret;
1060  			break;
1061  		case Opt_inlinecrypt:
1062  #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
1063  			sb->s_flags |= SB_INLINECRYPT;
1064  #else
1065  			f2fs_info(sbi, "inline encryption not supported");
1066  #endif
1067  			break;
1068  		case Opt_checkpoint_disable_cap_perc:
1069  			if (args->from && match_int(args, &arg))
1070  				return -EINVAL;
1071  			if (arg < 0 || arg > 100)
1072  				return -EINVAL;
1073  			F2FS_OPTION(sbi).unusable_cap_perc = arg;
1074  			set_opt(sbi, DISABLE_CHECKPOINT);
1075  			break;
1076  		case Opt_checkpoint_disable_cap:
1077  			if (args->from && match_int(args, &arg))
1078  				return -EINVAL;
1079  			F2FS_OPTION(sbi).unusable_cap = arg;
1080  			set_opt(sbi, DISABLE_CHECKPOINT);
1081  			break;
1082  		case Opt_checkpoint_disable:
1083  			set_opt(sbi, DISABLE_CHECKPOINT);
1084  			break;
1085  		case Opt_checkpoint_enable:
1086  			clear_opt(sbi, DISABLE_CHECKPOINT);
1087  			break;
1088  		case Opt_checkpoint_merge:
1089  			set_opt(sbi, MERGE_CHECKPOINT);
1090  			break;
1091  		case Opt_nocheckpoint_merge:
1092  			clear_opt(sbi, MERGE_CHECKPOINT);
1093  			break;
1094  #ifdef CONFIG_F2FS_FS_COMPRESSION
1095  		case Opt_compress_algorithm:
1096  			if (!f2fs_sb_has_compression(sbi)) {
1097  				f2fs_info(sbi, "Image doesn't support compression");
1098  				break;
1099  			}
1100  			name = match_strdup(&args[0]);
1101  			if (!name)
1102  				return -ENOMEM;
1103  			if (!strcmp(name, "lzo")) {
1104  #ifdef CONFIG_F2FS_FS_LZO
1105  				F2FS_OPTION(sbi).compress_level = 0;
1106  				F2FS_OPTION(sbi).compress_algorithm =
1107  								COMPRESS_LZO;
1108  #else
1109  				f2fs_info(sbi, "kernel doesn't support lzo compression");
1110  #endif
1111  			} else if (!strncmp(name, "lz4", 3)) {
1112  #ifdef CONFIG_F2FS_FS_LZ4
1113  				ret = f2fs_set_lz4hc_level(sbi, name);
1114  				if (ret) {
1115  					kfree(name);
1116  					return -EINVAL;
1117  				}
1118  				F2FS_OPTION(sbi).compress_algorithm =
1119  								COMPRESS_LZ4;
1120  #else
1121  				f2fs_info(sbi, "kernel doesn't support lz4 compression");
1122  #endif
1123  			} else if (!strncmp(name, "zstd", 4)) {
1124  #ifdef CONFIG_F2FS_FS_ZSTD
1125  				ret = f2fs_set_zstd_level(sbi, name);
1126  				if (ret) {
1127  					kfree(name);
1128  					return -EINVAL;
1129  				}
1130  				F2FS_OPTION(sbi).compress_algorithm =
1131  								COMPRESS_ZSTD;
1132  #else
1133  				f2fs_info(sbi, "kernel doesn't support zstd compression");
1134  #endif
1135  			} else if (!strcmp(name, "lzo-rle")) {
1136  #ifdef CONFIG_F2FS_FS_LZORLE
1137  				F2FS_OPTION(sbi).compress_level = 0;
1138  				F2FS_OPTION(sbi).compress_algorithm =
1139  								COMPRESS_LZORLE;
1140  #else
1141  				f2fs_info(sbi, "kernel doesn't support lzorle compression");
1142  #endif
1143  			} else {
1144  				kfree(name);
1145  				return -EINVAL;
1146  			}
1147  			kfree(name);
1148  			break;
1149  		case Opt_compress_log_size:
1150  			if (!f2fs_sb_has_compression(sbi)) {
1151  				f2fs_info(sbi, "Image doesn't support compression");
1152  				break;
1153  			}
1154  			if (args->from && match_int(args, &arg))
1155  				return -EINVAL;
1156  			if (arg < MIN_COMPRESS_LOG_SIZE ||
1157  				arg > MAX_COMPRESS_LOG_SIZE) {
1158  				f2fs_err(sbi,
1159  					"Compress cluster log size is out of range");
1160  				return -EINVAL;
1161  			}
1162  			F2FS_OPTION(sbi).compress_log_size = arg;
1163  			break;
1164  		case Opt_compress_extension:
1165  			if (!f2fs_sb_has_compression(sbi)) {
1166  				f2fs_info(sbi, "Image doesn't support compression");
1167  				break;
1168  			}
1169  			name = match_strdup(&args[0]);
1170  			if (!name)
1171  				return -ENOMEM;
1172  
1173  			ext = F2FS_OPTION(sbi).extensions;
1174  			ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
1175  
1176  			if (strlen(name) >= F2FS_EXTENSION_LEN ||
1177  				ext_cnt >= COMPRESS_EXT_NUM) {
1178  				f2fs_err(sbi,
1179  					"invalid extension length/number");
1180  				kfree(name);
1181  				return -EINVAL;
1182  			}
1183  
1184  			if (is_compress_extension_exist(sbi, name, true)) {
1185  				kfree(name);
1186  				break;
1187  			}
1188  
1189  			strcpy(ext[ext_cnt], name);
1190  			F2FS_OPTION(sbi).compress_ext_cnt++;
1191  			kfree(name);
1192  			break;
1193  		case Opt_nocompress_extension:
1194  			if (!f2fs_sb_has_compression(sbi)) {
1195  				f2fs_info(sbi, "Image doesn't support compression");
1196  				break;
1197  			}
1198  			name = match_strdup(&args[0]);
1199  			if (!name)
1200  				return -ENOMEM;
1201  
1202  			noext = F2FS_OPTION(sbi).noextensions;
1203  			noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
1204  
1205  			if (strlen(name) >= F2FS_EXTENSION_LEN ||
1206  				noext_cnt >= COMPRESS_EXT_NUM) {
1207  				f2fs_err(sbi,
1208  					"invalid extension length/number");
1209  				kfree(name);
1210  				return -EINVAL;
1211  			}
1212  
1213  			if (is_compress_extension_exist(sbi, name, false)) {
1214  				kfree(name);
1215  				break;
1216  			}
1217  
1218  			strcpy(noext[noext_cnt], name);
1219  			F2FS_OPTION(sbi).nocompress_ext_cnt++;
1220  			kfree(name);
1221  			break;
1222  		case Opt_compress_chksum:
1223  			if (!f2fs_sb_has_compression(sbi)) {
1224  				f2fs_info(sbi, "Image doesn't support compression");
1225  				break;
1226  			}
1227  			F2FS_OPTION(sbi).compress_chksum = true;
1228  			break;
1229  		case Opt_compress_mode:
1230  			if (!f2fs_sb_has_compression(sbi)) {
1231  				f2fs_info(sbi, "Image doesn't support compression");
1232  				break;
1233  			}
1234  			name = match_strdup(&args[0]);
1235  			if (!name)
1236  				return -ENOMEM;
1237  			if (!strcmp(name, "fs")) {
1238  				F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
1239  			} else if (!strcmp(name, "user")) {
1240  				F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
1241  			} else {
1242  				kfree(name);
1243  				return -EINVAL;
1244  			}
1245  			kfree(name);
1246  			break;
1247  		case Opt_compress_cache:
1248  			if (!f2fs_sb_has_compression(sbi)) {
1249  				f2fs_info(sbi, "Image doesn't support compression");
1250  				break;
1251  			}
1252  			set_opt(sbi, COMPRESS_CACHE);
1253  			break;
1254  #else
1255  		case Opt_compress_algorithm:
1256  		case Opt_compress_log_size:
1257  		case Opt_compress_extension:
1258  		case Opt_nocompress_extension:
1259  		case Opt_compress_chksum:
1260  		case Opt_compress_mode:
1261  		case Opt_compress_cache:
1262  			f2fs_info(sbi, "compression options not supported");
1263  			break;
1264  #endif
1265  		case Opt_atgc:
1266  			set_opt(sbi, ATGC);
1267  			break;
1268  		case Opt_gc_merge:
1269  			set_opt(sbi, GC_MERGE);
1270  			break;
1271  		case Opt_nogc_merge:
1272  			clear_opt(sbi, GC_MERGE);
1273  			break;
1274  		case Opt_discard_unit:
1275  			name = match_strdup(&args[0]);
1276  			if (!name)
1277  				return -ENOMEM;
1278  			if (!strcmp(name, "block")) {
1279  				F2FS_OPTION(sbi).discard_unit =
1280  						DISCARD_UNIT_BLOCK;
1281  			} else if (!strcmp(name, "segment")) {
1282  				F2FS_OPTION(sbi).discard_unit =
1283  						DISCARD_UNIT_SEGMENT;
1284  			} else if (!strcmp(name, "section")) {
1285  				F2FS_OPTION(sbi).discard_unit =
1286  						DISCARD_UNIT_SECTION;
1287  			} else {
1288  				kfree(name);
1289  				return -EINVAL;
1290  			}
1291  			kfree(name);
1292  			break;
1293  		case Opt_memory_mode:
1294  			name = match_strdup(&args[0]);
1295  			if (!name)
1296  				return -ENOMEM;
1297  			if (!strcmp(name, "normal")) {
1298  				F2FS_OPTION(sbi).memory_mode =
1299  						MEMORY_MODE_NORMAL;
1300  			} else if (!strcmp(name, "low")) {
1301  				F2FS_OPTION(sbi).memory_mode =
1302  						MEMORY_MODE_LOW;
1303  			} else {
1304  				kfree(name);
1305  				return -EINVAL;
1306  			}
1307  			kfree(name);
1308  			break;
1309  		case Opt_age_extent_cache:
1310  			set_opt(sbi, AGE_EXTENT_CACHE);
1311  			break;
1312  		default:
1313  			f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
1314  				 p);
1315  			return -EINVAL;
1316  		}
1317  	}
1318  default_check:
1319  #ifdef CONFIG_QUOTA
1320  	if (f2fs_check_quota_options(sbi))
1321  		return -EINVAL;
1322  #else
1323  	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
1324  		f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1325  		return -EINVAL;
1326  	}
1327  	if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
1328  		f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
1329  		return -EINVAL;
1330  	}
1331  #endif
1332  #ifndef CONFIG_UNICODE
1333  	if (f2fs_sb_has_casefold(sbi)) {
1334  		f2fs_err(sbi,
1335  			"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
1336  		return -EINVAL;
1337  	}
1338  #endif
1339  	/*
1340  	 * The BLKZONED feature indicates that the drive was formatted with
1341  	 * zone alignment optimization. This is optional for host-aware
1342  	 * devices, but mandatory for host-managed zoned block devices.
1343  	 */
1344  	if (f2fs_sb_has_blkzoned(sbi)) {
1345  #ifdef CONFIG_BLK_DEV_ZONED
1346  		if (F2FS_OPTION(sbi).discard_unit !=
1347  						DISCARD_UNIT_SECTION) {
1348  			f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default");
1349  			F2FS_OPTION(sbi).discard_unit =
1350  					DISCARD_UNIT_SECTION;
1351  		}
1352  #else
1353  		f2fs_err(sbi, "Zoned block device support is not enabled");
1354  		return -EINVAL;
1355  #endif
1356  	}
1357  
1358  #ifdef CONFIG_F2FS_FS_COMPRESSION
1359  	if (f2fs_test_compress_extension(sbi)) {
1360  		f2fs_err(sbi, "invalid compress or nocompress extension");
1361  		return -EINVAL;
1362  	}
1363  #endif
1364  
1365  	if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
1366  		f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO",
1367  			 F2FS_IO_SIZE_KB(sbi));
1368  		return -EINVAL;
1369  	}
1370  
1371  	if (test_opt(sbi, INLINE_XATTR_SIZE)) {
1372  		int min_size, max_size;
1373  
1374  		if (!f2fs_sb_has_extra_attr(sbi) ||
1375  			!f2fs_sb_has_flexible_inline_xattr(sbi)) {
1376  			f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
1377  			return -EINVAL;
1378  		}
1379  		if (!test_opt(sbi, INLINE_XATTR)) {
1380  			f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
1381  			return -EINVAL;
1382  		}
1383  
1384  		min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
1385  		max_size = MAX_INLINE_XATTR_SIZE;
1386  
1387  		if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
1388  				F2FS_OPTION(sbi).inline_xattr_size > max_size) {
1389  			f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
1390  				 min_size, max_size);
1391  			return -EINVAL;
1392  		}
1393  	}
1394  
1395  	if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
1396  		f2fs_err(sbi, "LFS is not compatible with checkpoint=disable");
1397  		return -EINVAL;
1398  	}
1399  
1400  	if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) {
1401  		f2fs_err(sbi, "LFS is not compatible with ATGC");
1402  		return -EINVAL;
1403  	}
1404  
1405  	if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) {
1406  		f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode");
1407  		return -EINVAL;
1408  	}
1409  
1410  	if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
1411  		f2fs_err(sbi, "Allow to mount readonly mode only");
1412  		return -EROFS;
1413  	}
1414  	return 0;
1415  }
1416  
f2fs_alloc_inode(struct super_block * sb)1417  static struct inode *f2fs_alloc_inode(struct super_block *sb)
1418  {
1419  	struct f2fs_inode_info *fi;
1420  
1421  	if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
1422  		return NULL;
1423  
1424  	fi = f2fs_kmem_cache_alloc(f2fs_inode_cachep,
1425  				GFP_F2FS_ZERO, false, F2FS_SB(sb));
1426  	if (!fi)
1427  		return NULL;
1428  
1429  	init_once((void *) fi);
1430  
1431  	/* Initialize f2fs-specific inode info */
1432  	atomic_set(&fi->dirty_pages, 0);
1433  	atomic_set(&fi->i_compr_blocks, 0);
1434  	init_f2fs_rwsem(&fi->i_sem);
1435  	spin_lock_init(&fi->i_size_lock);
1436  	INIT_LIST_HEAD(&fi->dirty_list);
1437  	INIT_LIST_HEAD(&fi->gdirty_list);
1438  	init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
1439  	init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
1440  	init_f2fs_rwsem(&fi->i_xattr_sem);
1441  
1442  	/* Will be used by directory only */
1443  	fi->i_dir_level = F2FS_SB(sb)->dir_level;
1444  
1445  	return &fi->vfs_inode;
1446  }
1447  
f2fs_drop_inode(struct inode * inode)1448  static int f2fs_drop_inode(struct inode *inode)
1449  {
1450  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1451  	int ret;
1452  
1453  	/*
1454  	 * during filesystem shutdown, if checkpoint is disabled,
1455  	 * drop useless meta/node dirty pages.
1456  	 */
1457  	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1458  		if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1459  			inode->i_ino == F2FS_META_INO(sbi)) {
1460  			trace_f2fs_drop_inode(inode, 1);
1461  			return 1;
1462  		}
1463  	}
1464  
1465  	/*
1466  	 * This is to avoid a deadlock condition like below.
1467  	 * writeback_single_inode(inode)
1468  	 *  - f2fs_write_data_page
1469  	 *    - f2fs_gc -> iput -> evict
1470  	 *       - inode_wait_for_writeback(inode)
1471  	 */
1472  	if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
1473  		if (!inode->i_nlink && !is_bad_inode(inode)) {
1474  			/* to avoid evict_inode call simultaneously */
1475  			atomic_inc(&inode->i_count);
1476  			spin_unlock(&inode->i_lock);
1477  
1478  			/* should remain fi->extent_tree for writepage */
1479  			f2fs_destroy_extent_node(inode);
1480  
1481  			sb_start_intwrite(inode->i_sb);
1482  			f2fs_i_size_write(inode, 0);
1483  
1484  			f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
1485  					inode, NULL, 0, DATA);
1486  			truncate_inode_pages_final(inode->i_mapping);
1487  
1488  			if (F2FS_HAS_BLOCKS(inode))
1489  				f2fs_truncate(inode);
1490  
1491  			sb_end_intwrite(inode->i_sb);
1492  
1493  			spin_lock(&inode->i_lock);
1494  			atomic_dec(&inode->i_count);
1495  		}
1496  		trace_f2fs_drop_inode(inode, 0);
1497  		return 0;
1498  	}
1499  	ret = generic_drop_inode(inode);
1500  	if (!ret)
1501  		ret = fscrypt_drop_inode(inode);
1502  	trace_f2fs_drop_inode(inode, ret);
1503  	return ret;
1504  }
1505  
f2fs_inode_dirtied(struct inode * inode,bool sync)1506  int f2fs_inode_dirtied(struct inode *inode, bool sync)
1507  {
1508  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1509  	int ret = 0;
1510  
1511  	spin_lock(&sbi->inode_lock[DIRTY_META]);
1512  	if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1513  		ret = 1;
1514  	} else {
1515  		set_inode_flag(inode, FI_DIRTY_INODE);
1516  		stat_inc_dirty_inode(sbi, DIRTY_META);
1517  	}
1518  	if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
1519  		list_add_tail(&F2FS_I(inode)->gdirty_list,
1520  				&sbi->inode_list[DIRTY_META]);
1521  		inc_page_count(sbi, F2FS_DIRTY_IMETA);
1522  	}
1523  	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1524  	return ret;
1525  }
1526  
f2fs_inode_synced(struct inode * inode)1527  void f2fs_inode_synced(struct inode *inode)
1528  {
1529  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1530  
1531  	spin_lock(&sbi->inode_lock[DIRTY_META]);
1532  	if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
1533  		spin_unlock(&sbi->inode_lock[DIRTY_META]);
1534  		return;
1535  	}
1536  	if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
1537  		list_del_init(&F2FS_I(inode)->gdirty_list);
1538  		dec_page_count(sbi, F2FS_DIRTY_IMETA);
1539  	}
1540  	clear_inode_flag(inode, FI_DIRTY_INODE);
1541  	clear_inode_flag(inode, FI_AUTO_RECOVER);
1542  	stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
1543  	spin_unlock(&sbi->inode_lock[DIRTY_META]);
1544  }
1545  
1546  /*
1547   * f2fs_dirty_inode() is called from __mark_inode_dirty()
1548   *
1549   * We should call set_dirty_inode to write the dirty inode through write_inode.
1550   */
f2fs_dirty_inode(struct inode * inode,int flags)1551  static void f2fs_dirty_inode(struct inode *inode, int flags)
1552  {
1553  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1554  
1555  	if (inode->i_ino == F2FS_NODE_INO(sbi) ||
1556  			inode->i_ino == F2FS_META_INO(sbi))
1557  		return;
1558  
1559  	if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
1560  		clear_inode_flag(inode, FI_AUTO_RECOVER);
1561  
1562  	f2fs_inode_dirtied(inode, false);
1563  }
1564  
f2fs_free_inode(struct inode * inode)1565  static void f2fs_free_inode(struct inode *inode)
1566  {
1567  	fscrypt_free_inode(inode);
1568  	kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
1569  }
1570  
destroy_percpu_info(struct f2fs_sb_info * sbi)1571  static void destroy_percpu_info(struct f2fs_sb_info *sbi)
1572  {
1573  	percpu_counter_destroy(&sbi->total_valid_inode_count);
1574  	percpu_counter_destroy(&sbi->rf_node_block_count);
1575  	percpu_counter_destroy(&sbi->alloc_valid_block_count);
1576  }
1577  
destroy_device_list(struct f2fs_sb_info * sbi)1578  static void destroy_device_list(struct f2fs_sb_info *sbi)
1579  {
1580  	int i;
1581  
1582  	for (i = 0; i < sbi->s_ndevs; i++) {
1583  		blkdev_put(FDEV(i).bdev, FMODE_EXCL);
1584  #ifdef CONFIG_BLK_DEV_ZONED
1585  		kvfree(FDEV(i).blkz_seq);
1586  #endif
1587  	}
1588  	kvfree(sbi->devs);
1589  }
1590  
f2fs_put_super(struct super_block * sb)1591  static void f2fs_put_super(struct super_block *sb)
1592  {
1593  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1594  	int i;
1595  	bool done;
1596  
1597  	/* unregister procfs/sysfs entries in advance to avoid race case */
1598  	f2fs_unregister_sysfs(sbi);
1599  
1600  	f2fs_quota_off_umount(sb);
1601  
1602  	/* prevent remaining shrinker jobs */
1603  	mutex_lock(&sbi->umount_mutex);
1604  
1605  	/*
1606  	 * flush all issued checkpoints and stop checkpoint issue thread.
1607  	 * after then, all checkpoints should be done by each process context.
1608  	 */
1609  	f2fs_stop_ckpt_thread(sbi);
1610  
1611  	/*
1612  	 * We don't need to do checkpoint when superblock is clean.
1613  	 * But, the previous checkpoint was not done by umount, it needs to do
1614  	 * clean checkpoint again.
1615  	 */
1616  	if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
1617  			!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
1618  		struct cp_control cpc = {
1619  			.reason = CP_UMOUNT,
1620  		};
1621  		f2fs_write_checkpoint(sbi, &cpc);
1622  	}
1623  
1624  	/* be sure to wait for any on-going discard commands */
1625  	done = f2fs_issue_discard_timeout(sbi);
1626  	if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
1627  		struct cp_control cpc = {
1628  			.reason = CP_UMOUNT | CP_TRIMMED,
1629  		};
1630  		f2fs_write_checkpoint(sbi, &cpc);
1631  	}
1632  
1633  	/*
1634  	 * normally superblock is clean, so we need to release this.
1635  	 * In addition, EIO will skip do checkpoint, we need this as well.
1636  	 */
1637  	f2fs_release_ino_entry(sbi, true);
1638  
1639  	f2fs_leave_shrinker(sbi);
1640  	mutex_unlock(&sbi->umount_mutex);
1641  
1642  	/* our cp_error case, we can wait for any writeback page */
1643  	f2fs_flush_merged_writes(sbi);
1644  
1645  	f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
1646  
1647  	f2fs_bug_on(sbi, sbi->fsync_node_num);
1648  
1649  	f2fs_destroy_compress_inode(sbi);
1650  
1651  	iput(sbi->node_inode);
1652  	sbi->node_inode = NULL;
1653  
1654  	iput(sbi->meta_inode);
1655  	sbi->meta_inode = NULL;
1656  
1657  	/*
1658  	 * iput() can update stat information, if f2fs_write_checkpoint()
1659  	 * above failed with error.
1660  	 */
1661  	f2fs_destroy_stats(sbi);
1662  
1663  	/* destroy f2fs internal modules */
1664  	f2fs_destroy_node_manager(sbi);
1665  	f2fs_destroy_segment_manager(sbi);
1666  
1667  	f2fs_destroy_post_read_wq(sbi);
1668  
1669  	kvfree(sbi->ckpt);
1670  
1671  	sb->s_fs_info = NULL;
1672  	if (sbi->s_chksum_driver)
1673  		crypto_free_shash(sbi->s_chksum_driver);
1674  	kfree(sbi->raw_super);
1675  
1676  	destroy_device_list(sbi);
1677  	f2fs_destroy_page_array_cache(sbi);
1678  	f2fs_destroy_xattr_caches(sbi);
1679  	mempool_destroy(sbi->write_io_dummy);
1680  #ifdef CONFIG_QUOTA
1681  	for (i = 0; i < MAXQUOTAS; i++)
1682  		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
1683  #endif
1684  	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
1685  	destroy_percpu_info(sbi);
1686  	f2fs_destroy_iostat(sbi);
1687  	for (i = 0; i < NR_PAGE_TYPE; i++)
1688  		kvfree(sbi->write_io[i]);
1689  #ifdef CONFIG_UNICODE
1690  	utf8_unload(sb->s_encoding);
1691  #endif
1692  	kfree(sbi);
1693  }
1694  
f2fs_sync_fs(struct super_block * sb,int sync)1695  int f2fs_sync_fs(struct super_block *sb, int sync)
1696  {
1697  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1698  	int err = 0;
1699  
1700  	if (unlikely(f2fs_cp_error(sbi)))
1701  		return 0;
1702  	if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1703  		return 0;
1704  
1705  	trace_f2fs_sync_fs(sb, sync);
1706  
1707  	if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1708  		return -EAGAIN;
1709  
1710  	if (sync)
1711  		err = f2fs_issue_checkpoint(sbi);
1712  
1713  	return err;
1714  }
1715  
f2fs_freeze(struct super_block * sb)1716  static int f2fs_freeze(struct super_block *sb)
1717  {
1718  	if (f2fs_readonly(sb))
1719  		return 0;
1720  
1721  	/* IO error happened before */
1722  	if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
1723  		return -EIO;
1724  
1725  	/* must be clean, since sync_filesystem() was already called */
1726  	if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
1727  		return -EINVAL;
1728  
1729  	/* Let's flush checkpoints and stop the thread. */
1730  	f2fs_flush_ckpt_thread(F2FS_SB(sb));
1731  
1732  	/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
1733  	set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1734  	return 0;
1735  }
1736  
f2fs_unfreeze(struct super_block * sb)1737  static int f2fs_unfreeze(struct super_block *sb)
1738  {
1739  	clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
1740  	return 0;
1741  }
1742  
1743  #ifdef CONFIG_QUOTA
f2fs_statfs_project(struct super_block * sb,kprojid_t projid,struct kstatfs * buf)1744  static int f2fs_statfs_project(struct super_block *sb,
1745  				kprojid_t projid, struct kstatfs *buf)
1746  {
1747  	struct kqid qid;
1748  	struct dquot *dquot;
1749  	u64 limit;
1750  	u64 curblock;
1751  
1752  	qid = make_kqid_projid(projid);
1753  	dquot = dqget(sb, qid);
1754  	if (IS_ERR(dquot))
1755  		return PTR_ERR(dquot);
1756  	spin_lock(&dquot->dq_dqb_lock);
1757  
1758  	limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
1759  					dquot->dq_dqb.dqb_bhardlimit);
1760  	if (limit)
1761  		limit >>= sb->s_blocksize_bits;
1762  
1763  	if (limit && buf->f_blocks > limit) {
1764  		curblock = (dquot->dq_dqb.dqb_curspace +
1765  			    dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
1766  		buf->f_blocks = limit;
1767  		buf->f_bfree = buf->f_bavail =
1768  			(buf->f_blocks > curblock) ?
1769  			 (buf->f_blocks - curblock) : 0;
1770  	}
1771  
1772  	limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
1773  					dquot->dq_dqb.dqb_ihardlimit);
1774  
1775  	if (limit && buf->f_files > limit) {
1776  		buf->f_files = limit;
1777  		buf->f_ffree =
1778  			(buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
1779  			 (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
1780  	}
1781  
1782  	spin_unlock(&dquot->dq_dqb_lock);
1783  	dqput(dquot);
1784  	return 0;
1785  }
1786  #endif
1787  
f2fs_statfs(struct dentry * dentry,struct kstatfs * buf)1788  static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
1789  {
1790  	struct super_block *sb = dentry->d_sb;
1791  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1792  	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
1793  	block_t total_count, user_block_count, start_count;
1794  	u64 avail_node_count;
1795  	unsigned int total_valid_node_count;
1796  
1797  	total_count = le64_to_cpu(sbi->raw_super->block_count);
1798  	start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
1799  	buf->f_type = F2FS_SUPER_MAGIC;
1800  	buf->f_bsize = sbi->blocksize;
1801  
1802  	buf->f_blocks = total_count - start_count;
1803  
1804  	spin_lock(&sbi->stat_lock);
1805  
1806  	user_block_count = sbi->user_block_count;
1807  	total_valid_node_count = valid_node_count(sbi);
1808  	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
1809  	buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
1810  						sbi->current_reserved_blocks;
1811  
1812  	if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
1813  		buf->f_bfree = 0;
1814  	else
1815  		buf->f_bfree -= sbi->unusable_block_count;
1816  	spin_unlock(&sbi->stat_lock);
1817  
1818  	if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
1819  		buf->f_bavail = buf->f_bfree -
1820  				F2FS_OPTION(sbi).root_reserved_blocks;
1821  	else
1822  		buf->f_bavail = 0;
1823  
1824  	if (avail_node_count > user_block_count) {
1825  		buf->f_files = user_block_count;
1826  		buf->f_ffree = buf->f_bavail;
1827  	} else {
1828  		buf->f_files = avail_node_count;
1829  		buf->f_ffree = min(avail_node_count - total_valid_node_count,
1830  					buf->f_bavail);
1831  	}
1832  
1833  	buf->f_namelen = F2FS_NAME_LEN;
1834  	buf->f_fsid    = u64_to_fsid(id);
1835  
1836  #ifdef CONFIG_QUOTA
1837  	if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
1838  			sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
1839  		f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
1840  	}
1841  #endif
1842  	return 0;
1843  }
1844  
f2fs_show_quota_options(struct seq_file * seq,struct super_block * sb)1845  static inline void f2fs_show_quota_options(struct seq_file *seq,
1846  					   struct super_block *sb)
1847  {
1848  #ifdef CONFIG_QUOTA
1849  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1850  
1851  	if (F2FS_OPTION(sbi).s_jquota_fmt) {
1852  		char *fmtname = "";
1853  
1854  		switch (F2FS_OPTION(sbi).s_jquota_fmt) {
1855  		case QFMT_VFS_OLD:
1856  			fmtname = "vfsold";
1857  			break;
1858  		case QFMT_VFS_V0:
1859  			fmtname = "vfsv0";
1860  			break;
1861  		case QFMT_VFS_V1:
1862  			fmtname = "vfsv1";
1863  			break;
1864  		}
1865  		seq_printf(seq, ",jqfmt=%s", fmtname);
1866  	}
1867  
1868  	if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
1869  		seq_show_option(seq, "usrjquota",
1870  			F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
1871  
1872  	if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
1873  		seq_show_option(seq, "grpjquota",
1874  			F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
1875  
1876  	if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
1877  		seq_show_option(seq, "prjjquota",
1878  			F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
1879  #endif
1880  }
1881  
1882  #ifdef CONFIG_F2FS_FS_COMPRESSION
f2fs_show_compress_options(struct seq_file * seq,struct super_block * sb)1883  static inline void f2fs_show_compress_options(struct seq_file *seq,
1884  							struct super_block *sb)
1885  {
1886  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
1887  	char *algtype = "";
1888  	int i;
1889  
1890  	if (!f2fs_sb_has_compression(sbi))
1891  		return;
1892  
1893  	switch (F2FS_OPTION(sbi).compress_algorithm) {
1894  	case COMPRESS_LZO:
1895  		algtype = "lzo";
1896  		break;
1897  	case COMPRESS_LZ4:
1898  		algtype = "lz4";
1899  		break;
1900  	case COMPRESS_ZSTD:
1901  		algtype = "zstd";
1902  		break;
1903  	case COMPRESS_LZORLE:
1904  		algtype = "lzo-rle";
1905  		break;
1906  	}
1907  	seq_printf(seq, ",compress_algorithm=%s", algtype);
1908  
1909  	if (F2FS_OPTION(sbi).compress_level)
1910  		seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
1911  
1912  	seq_printf(seq, ",compress_log_size=%u",
1913  			F2FS_OPTION(sbi).compress_log_size);
1914  
1915  	for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
1916  		seq_printf(seq, ",compress_extension=%s",
1917  			F2FS_OPTION(sbi).extensions[i]);
1918  	}
1919  
1920  	for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
1921  		seq_printf(seq, ",nocompress_extension=%s",
1922  			F2FS_OPTION(sbi).noextensions[i]);
1923  	}
1924  
1925  	if (F2FS_OPTION(sbi).compress_chksum)
1926  		seq_puts(seq, ",compress_chksum");
1927  
1928  	if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
1929  		seq_printf(seq, ",compress_mode=%s", "fs");
1930  	else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
1931  		seq_printf(seq, ",compress_mode=%s", "user");
1932  
1933  	if (test_opt(sbi, COMPRESS_CACHE))
1934  		seq_puts(seq, ",compress_cache");
1935  }
1936  #endif
1937  
f2fs_show_options(struct seq_file * seq,struct dentry * root)1938  static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1939  {
1940  	struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
1941  
1942  	if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
1943  		seq_printf(seq, ",background_gc=%s", "sync");
1944  	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
1945  		seq_printf(seq, ",background_gc=%s", "on");
1946  	else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
1947  		seq_printf(seq, ",background_gc=%s", "off");
1948  
1949  	if (test_opt(sbi, GC_MERGE))
1950  		seq_puts(seq, ",gc_merge");
1951  	else
1952  		seq_puts(seq, ",nogc_merge");
1953  
1954  	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
1955  		seq_puts(seq, ",disable_roll_forward");
1956  	if (test_opt(sbi, NORECOVERY))
1957  		seq_puts(seq, ",norecovery");
1958  	if (test_opt(sbi, DISCARD)) {
1959  		seq_puts(seq, ",discard");
1960  		if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
1961  			seq_printf(seq, ",discard_unit=%s", "block");
1962  		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
1963  			seq_printf(seq, ",discard_unit=%s", "segment");
1964  		else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
1965  			seq_printf(seq, ",discard_unit=%s", "section");
1966  	} else {
1967  		seq_puts(seq, ",nodiscard");
1968  	}
1969  	if (test_opt(sbi, NOHEAP))
1970  		seq_puts(seq, ",no_heap");
1971  	else
1972  		seq_puts(seq, ",heap");
1973  #ifdef CONFIG_F2FS_FS_XATTR
1974  	if (test_opt(sbi, XATTR_USER))
1975  		seq_puts(seq, ",user_xattr");
1976  	else
1977  		seq_puts(seq, ",nouser_xattr");
1978  	if (test_opt(sbi, INLINE_XATTR))
1979  		seq_puts(seq, ",inline_xattr");
1980  	else
1981  		seq_puts(seq, ",noinline_xattr");
1982  	if (test_opt(sbi, INLINE_XATTR_SIZE))
1983  		seq_printf(seq, ",inline_xattr_size=%u",
1984  					F2FS_OPTION(sbi).inline_xattr_size);
1985  #endif
1986  #ifdef CONFIG_F2FS_FS_POSIX_ACL
1987  	if (test_opt(sbi, POSIX_ACL))
1988  		seq_puts(seq, ",acl");
1989  	else
1990  		seq_puts(seq, ",noacl");
1991  #endif
1992  	if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
1993  		seq_puts(seq, ",disable_ext_identify");
1994  	if (test_opt(sbi, INLINE_DATA))
1995  		seq_puts(seq, ",inline_data");
1996  	else
1997  		seq_puts(seq, ",noinline_data");
1998  	if (test_opt(sbi, INLINE_DENTRY))
1999  		seq_puts(seq, ",inline_dentry");
2000  	else
2001  		seq_puts(seq, ",noinline_dentry");
2002  	if (test_opt(sbi, FLUSH_MERGE))
2003  		seq_puts(seq, ",flush_merge");
2004  	else
2005  		seq_puts(seq, ",noflush_merge");
2006  	if (test_opt(sbi, NOBARRIER))
2007  		seq_puts(seq, ",nobarrier");
2008  	else
2009  		seq_puts(seq, ",barrier");
2010  	if (test_opt(sbi, FASTBOOT))
2011  		seq_puts(seq, ",fastboot");
2012  	if (test_opt(sbi, READ_EXTENT_CACHE))
2013  		seq_puts(seq, ",extent_cache");
2014  	else
2015  		seq_puts(seq, ",noextent_cache");
2016  	if (test_opt(sbi, AGE_EXTENT_CACHE))
2017  		seq_puts(seq, ",age_extent_cache");
2018  	if (test_opt(sbi, DATA_FLUSH))
2019  		seq_puts(seq, ",data_flush");
2020  
2021  	seq_puts(seq, ",mode=");
2022  	if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
2023  		seq_puts(seq, "adaptive");
2024  	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
2025  		seq_puts(seq, "lfs");
2026  	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
2027  		seq_puts(seq, "fragment:segment");
2028  	else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
2029  		seq_puts(seq, "fragment:block");
2030  	seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
2031  	if (test_opt(sbi, RESERVE_ROOT))
2032  		seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
2033  				F2FS_OPTION(sbi).root_reserved_blocks,
2034  				from_kuid_munged(&init_user_ns,
2035  					F2FS_OPTION(sbi).s_resuid),
2036  				from_kgid_munged(&init_user_ns,
2037  					F2FS_OPTION(sbi).s_resgid));
2038  	if (F2FS_IO_SIZE_BITS(sbi))
2039  		seq_printf(seq, ",io_bits=%u",
2040  				F2FS_OPTION(sbi).write_io_size_bits);
2041  #ifdef CONFIG_F2FS_FAULT_INJECTION
2042  	if (test_opt(sbi, FAULT_INJECTION)) {
2043  		seq_printf(seq, ",fault_injection=%u",
2044  				F2FS_OPTION(sbi).fault_info.inject_rate);
2045  		seq_printf(seq, ",fault_type=%u",
2046  				F2FS_OPTION(sbi).fault_info.inject_type);
2047  	}
2048  #endif
2049  #ifdef CONFIG_QUOTA
2050  	if (test_opt(sbi, QUOTA))
2051  		seq_puts(seq, ",quota");
2052  	if (test_opt(sbi, USRQUOTA))
2053  		seq_puts(seq, ",usrquota");
2054  	if (test_opt(sbi, GRPQUOTA))
2055  		seq_puts(seq, ",grpquota");
2056  	if (test_opt(sbi, PRJQUOTA))
2057  		seq_puts(seq, ",prjquota");
2058  #endif
2059  	f2fs_show_quota_options(seq, sbi->sb);
2060  
2061  	fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
2062  
2063  	if (sbi->sb->s_flags & SB_INLINECRYPT)
2064  		seq_puts(seq, ",inlinecrypt");
2065  
2066  	if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
2067  		seq_printf(seq, ",alloc_mode=%s", "default");
2068  	else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
2069  		seq_printf(seq, ",alloc_mode=%s", "reuse");
2070  
2071  	if (test_opt(sbi, DISABLE_CHECKPOINT))
2072  		seq_printf(seq, ",checkpoint=disable:%u",
2073  				F2FS_OPTION(sbi).unusable_cap);
2074  	if (test_opt(sbi, MERGE_CHECKPOINT))
2075  		seq_puts(seq, ",checkpoint_merge");
2076  	else
2077  		seq_puts(seq, ",nocheckpoint_merge");
2078  	if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
2079  		seq_printf(seq, ",fsync_mode=%s", "posix");
2080  	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
2081  		seq_printf(seq, ",fsync_mode=%s", "strict");
2082  	else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
2083  		seq_printf(seq, ",fsync_mode=%s", "nobarrier");
2084  
2085  #ifdef CONFIG_F2FS_FS_COMPRESSION
2086  	f2fs_show_compress_options(seq, sbi->sb);
2087  #endif
2088  
2089  	if (test_opt(sbi, ATGC))
2090  		seq_puts(seq, ",atgc");
2091  
2092  	if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL)
2093  		seq_printf(seq, ",memory=%s", "normal");
2094  	else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW)
2095  		seq_printf(seq, ",memory=%s", "low");
2096  
2097  	return 0;
2098  }
2099  
default_options(struct f2fs_sb_info * sbi)2100  static void default_options(struct f2fs_sb_info *sbi)
2101  {
2102  	/* init some FS parameters */
2103  	if (f2fs_sb_has_readonly(sbi))
2104  		F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
2105  	else
2106  		F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
2107  
2108  	F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
2109  	if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <=
2110  							SMALL_VOLUME_SEGMENTS)
2111  		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
2112  	else
2113  		F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
2114  	F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
2115  	F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
2116  	F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
2117  	if (f2fs_sb_has_compression(sbi)) {
2118  		F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
2119  		F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
2120  		F2FS_OPTION(sbi).compress_ext_cnt = 0;
2121  		F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
2122  	}
2123  	F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
2124  	F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL;
2125  
2126  	sbi->sb->s_flags &= ~SB_INLINECRYPT;
2127  
2128  	set_opt(sbi, INLINE_XATTR);
2129  	set_opt(sbi, INLINE_DATA);
2130  	set_opt(sbi, INLINE_DENTRY);
2131  	set_opt(sbi, READ_EXTENT_CACHE);
2132  	set_opt(sbi, NOHEAP);
2133  	clear_opt(sbi, DISABLE_CHECKPOINT);
2134  	set_opt(sbi, MERGE_CHECKPOINT);
2135  	F2FS_OPTION(sbi).unusable_cap = 0;
2136  	sbi->sb->s_flags |= SB_LAZYTIME;
2137  	if (!f2fs_is_readonly(sbi))
2138  		set_opt(sbi, FLUSH_MERGE);
2139  	if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
2140  		set_opt(sbi, DISCARD);
2141  	if (f2fs_sb_has_blkzoned(sbi)) {
2142  		F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
2143  		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION;
2144  	} else {
2145  		F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
2146  		F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK;
2147  	}
2148  
2149  #ifdef CONFIG_F2FS_FS_XATTR
2150  	set_opt(sbi, XATTR_USER);
2151  #endif
2152  #ifdef CONFIG_F2FS_FS_POSIX_ACL
2153  	set_opt(sbi, POSIX_ACL);
2154  #endif
2155  
2156  	f2fs_build_fault_attr(sbi, 0, 0);
2157  }
2158  
2159  #ifdef CONFIG_QUOTA
2160  static int f2fs_enable_quotas(struct super_block *sb);
2161  #endif
2162  
f2fs_disable_checkpoint(struct f2fs_sb_info * sbi)2163  static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
2164  {
2165  	unsigned int s_flags = sbi->sb->s_flags;
2166  	struct cp_control cpc;
2167  	unsigned int gc_mode = sbi->gc_mode;
2168  	int err = 0;
2169  	int ret;
2170  	block_t unusable;
2171  
2172  	if (s_flags & SB_RDONLY) {
2173  		f2fs_err(sbi, "checkpoint=disable on readonly fs");
2174  		return -EINVAL;
2175  	}
2176  	sbi->sb->s_flags |= SB_ACTIVE;
2177  
2178  	/* check if we need more GC first */
2179  	unusable = f2fs_get_unusable_blocks(sbi);
2180  	if (!f2fs_disable_cp_again(sbi, unusable))
2181  		goto skip_gc;
2182  
2183  	f2fs_update_time(sbi, DISABLE_TIME);
2184  
2185  	sbi->gc_mode = GC_URGENT_HIGH;
2186  
2187  	while (!f2fs_time_over(sbi, DISABLE_TIME)) {
2188  		struct f2fs_gc_control gc_control = {
2189  			.victim_segno = NULL_SEGNO,
2190  			.init_gc_type = FG_GC,
2191  			.should_migrate_blocks = false,
2192  			.err_gc_skipped = true,
2193  			.nr_free_secs = 1 };
2194  
2195  		f2fs_down_write(&sbi->gc_lock);
2196  		err = f2fs_gc(sbi, &gc_control);
2197  		if (err == -ENODATA) {
2198  			err = 0;
2199  			break;
2200  		}
2201  		if (err && err != -EAGAIN)
2202  			break;
2203  	}
2204  
2205  	ret = sync_filesystem(sbi->sb);
2206  	if (ret || err) {
2207  		err = ret ? ret : err;
2208  		goto restore_flag;
2209  	}
2210  
2211  	unusable = f2fs_get_unusable_blocks(sbi);
2212  	if (f2fs_disable_cp_again(sbi, unusable)) {
2213  		err = -EAGAIN;
2214  		goto restore_flag;
2215  	}
2216  
2217  skip_gc:
2218  	f2fs_down_write(&sbi->gc_lock);
2219  	cpc.reason = CP_PAUSE;
2220  	set_sbi_flag(sbi, SBI_CP_DISABLED);
2221  	err = f2fs_write_checkpoint(sbi, &cpc);
2222  	if (err)
2223  		goto out_unlock;
2224  
2225  	spin_lock(&sbi->stat_lock);
2226  	sbi->unusable_block_count = unusable;
2227  	spin_unlock(&sbi->stat_lock);
2228  
2229  out_unlock:
2230  	f2fs_up_write(&sbi->gc_lock);
2231  restore_flag:
2232  	sbi->gc_mode = gc_mode;
2233  	sbi->sb->s_flags = s_flags;	/* Restore SB_RDONLY status */
2234  	return err;
2235  }
2236  
f2fs_enable_checkpoint(struct f2fs_sb_info * sbi)2237  static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
2238  {
2239  	int retry = DEFAULT_RETRY_IO_COUNT;
2240  
2241  	/* we should flush all the data to keep data consistency */
2242  	do {
2243  		sync_inodes_sb(sbi->sb);
2244  		f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2245  	} while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
2246  
2247  	if (unlikely(retry < 0))
2248  		f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
2249  
2250  	f2fs_down_write(&sbi->gc_lock);
2251  	f2fs_dirty_to_prefree(sbi);
2252  
2253  	clear_sbi_flag(sbi, SBI_CP_DISABLED);
2254  	set_sbi_flag(sbi, SBI_IS_DIRTY);
2255  	f2fs_up_write(&sbi->gc_lock);
2256  
2257  	f2fs_sync_fs(sbi->sb, 1);
2258  
2259  	/* Let's ensure there's no pending checkpoint anymore */
2260  	f2fs_flush_ckpt_thread(sbi);
2261  }
2262  
f2fs_remount(struct super_block * sb,int * flags,char * data)2263  static int f2fs_remount(struct super_block *sb, int *flags, char *data)
2264  {
2265  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2266  	struct f2fs_mount_info org_mount_opt;
2267  	unsigned long old_sb_flags;
2268  	int err;
2269  	bool need_restart_gc = false, need_stop_gc = false;
2270  	bool need_restart_ckpt = false, need_stop_ckpt = false;
2271  	bool need_restart_flush = false, need_stop_flush = false;
2272  	bool need_restart_discard = false, need_stop_discard = false;
2273  	bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
2274  	bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
2275  	bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
2276  	bool no_io_align = !F2FS_IO_ALIGNED(sbi);
2277  	bool no_atgc = !test_opt(sbi, ATGC);
2278  	bool no_discard = !test_opt(sbi, DISCARD);
2279  	bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
2280  	bool block_unit_discard = f2fs_block_unit_discard(sbi);
2281  #ifdef CONFIG_QUOTA
2282  	int i, j;
2283  #endif
2284  
2285  	/*
2286  	 * Save the old mount options in case we
2287  	 * need to restore them.
2288  	 */
2289  	org_mount_opt = sbi->mount_opt;
2290  	old_sb_flags = sb->s_flags;
2291  
2292  #ifdef CONFIG_QUOTA
2293  	org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
2294  	for (i = 0; i < MAXQUOTAS; i++) {
2295  		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2296  			org_mount_opt.s_qf_names[i] =
2297  				kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
2298  				GFP_KERNEL);
2299  			if (!org_mount_opt.s_qf_names[i]) {
2300  				for (j = 0; j < i; j++)
2301  					kfree(org_mount_opt.s_qf_names[j]);
2302  				return -ENOMEM;
2303  			}
2304  		} else {
2305  			org_mount_opt.s_qf_names[i] = NULL;
2306  		}
2307  	}
2308  #endif
2309  
2310  	/* recover superblocks we couldn't write due to previous RO mount */
2311  	if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
2312  		err = f2fs_commit_super(sbi, false);
2313  		f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
2314  			  err);
2315  		if (!err)
2316  			clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
2317  	}
2318  
2319  	default_options(sbi);
2320  
2321  	/* parse mount options */
2322  	err = parse_options(sb, data, true);
2323  	if (err)
2324  		goto restore_opts;
2325  
2326  	/*
2327  	 * Previous and new state of filesystem is RO,
2328  	 * so skip checking GC and FLUSH_MERGE conditions.
2329  	 */
2330  	if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
2331  		goto skip;
2332  
2333  	if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) {
2334  		err = -EROFS;
2335  		goto restore_opts;
2336  	}
2337  
2338  #ifdef CONFIG_QUOTA
2339  	if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
2340  		err = dquot_suspend(sb, -1);
2341  		if (err < 0)
2342  			goto restore_opts;
2343  	} else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
2344  		/* dquot_resume needs RW */
2345  		sb->s_flags &= ~SB_RDONLY;
2346  		if (sb_any_quota_suspended(sb)) {
2347  			dquot_resume(sb, -1);
2348  		} else if (f2fs_sb_has_quota_ino(sbi)) {
2349  			err = f2fs_enable_quotas(sb);
2350  			if (err)
2351  				goto restore_opts;
2352  		}
2353  	}
2354  #endif
2355  	if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
2356  		err = -EINVAL;
2357  		f2fs_warn(sbi, "LFS is not compatible with IPU");
2358  		goto restore_opts;
2359  	}
2360  
2361  	/* disallow enable atgc dynamically */
2362  	if (no_atgc == !!test_opt(sbi, ATGC)) {
2363  		err = -EINVAL;
2364  		f2fs_warn(sbi, "switch atgc option is not allowed");
2365  		goto restore_opts;
2366  	}
2367  
2368  	/* disallow enable/disable extent_cache dynamically */
2369  	if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) {
2370  		err = -EINVAL;
2371  		f2fs_warn(sbi, "switch extent_cache option is not allowed");
2372  		goto restore_opts;
2373  	}
2374  	/* disallow enable/disable age extent_cache dynamically */
2375  	if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) {
2376  		err = -EINVAL;
2377  		f2fs_warn(sbi, "switch age_extent_cache option is not allowed");
2378  		goto restore_opts;
2379  	}
2380  
2381  	if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
2382  		err = -EINVAL;
2383  		f2fs_warn(sbi, "switch io_bits option is not allowed");
2384  		goto restore_opts;
2385  	}
2386  
2387  	if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
2388  		err = -EINVAL;
2389  		f2fs_warn(sbi, "switch compress_cache option is not allowed");
2390  		goto restore_opts;
2391  	}
2392  
2393  	if (block_unit_discard != f2fs_block_unit_discard(sbi)) {
2394  		err = -EINVAL;
2395  		f2fs_warn(sbi, "switch discard_unit option is not allowed");
2396  		goto restore_opts;
2397  	}
2398  
2399  	if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
2400  		err = -EINVAL;
2401  		f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
2402  		goto restore_opts;
2403  	}
2404  
2405  	/*
2406  	 * We stop the GC thread if FS is mounted as RO
2407  	 * or if background_gc = off is passed in mount
2408  	 * option. Also sync the filesystem.
2409  	 */
2410  	if ((*flags & SB_RDONLY) ||
2411  			(F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
2412  			!test_opt(sbi, GC_MERGE))) {
2413  		if (sbi->gc_thread) {
2414  			f2fs_stop_gc_thread(sbi);
2415  			need_restart_gc = true;
2416  		}
2417  	} else if (!sbi->gc_thread) {
2418  		err = f2fs_start_gc_thread(sbi);
2419  		if (err)
2420  			goto restore_opts;
2421  		need_stop_gc = true;
2422  	}
2423  
2424  	if (*flags & SB_RDONLY) {
2425  		sync_inodes_sb(sb);
2426  
2427  		set_sbi_flag(sbi, SBI_IS_DIRTY);
2428  		set_sbi_flag(sbi, SBI_IS_CLOSE);
2429  		f2fs_sync_fs(sb, 1);
2430  		clear_sbi_flag(sbi, SBI_IS_CLOSE);
2431  	}
2432  
2433  	if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
2434  			!test_opt(sbi, MERGE_CHECKPOINT)) {
2435  		f2fs_stop_ckpt_thread(sbi);
2436  		need_restart_ckpt = true;
2437  	} else {
2438  		/* Flush if the prevous checkpoint, if exists. */
2439  		f2fs_flush_ckpt_thread(sbi);
2440  
2441  		err = f2fs_start_ckpt_thread(sbi);
2442  		if (err) {
2443  			f2fs_err(sbi,
2444  			    "Failed to start F2FS issue_checkpoint_thread (%d)",
2445  			    err);
2446  			goto restore_gc;
2447  		}
2448  		need_stop_ckpt = true;
2449  	}
2450  
2451  	/*
2452  	 * We stop issue flush thread if FS is mounted as RO
2453  	 * or if flush_merge is not passed in mount option.
2454  	 */
2455  	if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
2456  		clear_opt(sbi, FLUSH_MERGE);
2457  		f2fs_destroy_flush_cmd_control(sbi, false);
2458  		need_restart_flush = true;
2459  	} else {
2460  		err = f2fs_create_flush_cmd_control(sbi);
2461  		if (err)
2462  			goto restore_ckpt;
2463  		need_stop_flush = true;
2464  	}
2465  
2466  	if (no_discard == !!test_opt(sbi, DISCARD)) {
2467  		if (test_opt(sbi, DISCARD)) {
2468  			err = f2fs_start_discard_thread(sbi);
2469  			if (err)
2470  				goto restore_flush;
2471  			need_stop_discard = true;
2472  		} else {
2473  			f2fs_stop_discard_thread(sbi);
2474  			f2fs_issue_discard_timeout(sbi);
2475  			need_restart_discard = true;
2476  		}
2477  	}
2478  
2479  	if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) {
2480  		if (test_opt(sbi, DISABLE_CHECKPOINT)) {
2481  			err = f2fs_disable_checkpoint(sbi);
2482  			if (err)
2483  				goto restore_discard;
2484  		} else {
2485  			f2fs_enable_checkpoint(sbi);
2486  		}
2487  	}
2488  
2489  skip:
2490  #ifdef CONFIG_QUOTA
2491  	/* Release old quota file names */
2492  	for (i = 0; i < MAXQUOTAS; i++)
2493  		kfree(org_mount_opt.s_qf_names[i]);
2494  #endif
2495  	/* Update the POSIXACL Flag */
2496  	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
2497  		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
2498  
2499  	limit_reserve_root(sbi);
2500  	adjust_unusable_cap_perc(sbi);
2501  	*flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
2502  	return 0;
2503  restore_discard:
2504  	if (need_restart_discard) {
2505  		if (f2fs_start_discard_thread(sbi))
2506  			f2fs_warn(sbi, "discard has been stopped");
2507  	} else if (need_stop_discard) {
2508  		f2fs_stop_discard_thread(sbi);
2509  	}
2510  restore_flush:
2511  	if (need_restart_flush) {
2512  		if (f2fs_create_flush_cmd_control(sbi))
2513  			f2fs_warn(sbi, "background flush thread has stopped");
2514  	} else if (need_stop_flush) {
2515  		clear_opt(sbi, FLUSH_MERGE);
2516  		f2fs_destroy_flush_cmd_control(sbi, false);
2517  	}
2518  restore_ckpt:
2519  	if (need_restart_ckpt) {
2520  		if (f2fs_start_ckpt_thread(sbi))
2521  			f2fs_warn(sbi, "background ckpt thread has stopped");
2522  	} else if (need_stop_ckpt) {
2523  		f2fs_stop_ckpt_thread(sbi);
2524  	}
2525  restore_gc:
2526  	if (need_restart_gc) {
2527  		if (f2fs_start_gc_thread(sbi))
2528  			f2fs_warn(sbi, "background gc thread has stopped");
2529  	} else if (need_stop_gc) {
2530  		f2fs_stop_gc_thread(sbi);
2531  	}
2532  restore_opts:
2533  #ifdef CONFIG_QUOTA
2534  	F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
2535  	for (i = 0; i < MAXQUOTAS; i++) {
2536  		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
2537  		F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
2538  	}
2539  #endif
2540  	sbi->mount_opt = org_mount_opt;
2541  	sb->s_flags = old_sb_flags;
2542  	return err;
2543  }
2544  
2545  #ifdef CONFIG_QUOTA
f2fs_need_recovery(struct f2fs_sb_info * sbi)2546  static bool f2fs_need_recovery(struct f2fs_sb_info *sbi)
2547  {
2548  	/* need to recovery orphan */
2549  	if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
2550  		return true;
2551  	/* need to recovery data */
2552  	if (test_opt(sbi, DISABLE_ROLL_FORWARD))
2553  		return false;
2554  	if (test_opt(sbi, NORECOVERY))
2555  		return false;
2556  	return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG);
2557  }
2558  
f2fs_recover_quota_begin(struct f2fs_sb_info * sbi)2559  static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi)
2560  {
2561  	bool readonly = f2fs_readonly(sbi->sb);
2562  
2563  	if (!f2fs_need_recovery(sbi))
2564  		return false;
2565  
2566  	/* it doesn't need to check f2fs_sb_has_readonly() */
2567  	if (f2fs_hw_is_readonly(sbi))
2568  		return false;
2569  
2570  	if (readonly) {
2571  		sbi->sb->s_flags &= ~SB_RDONLY;
2572  		set_sbi_flag(sbi, SBI_IS_WRITABLE);
2573  	}
2574  
2575  	/*
2576  	 * Turn on quotas which were not enabled for read-only mounts if
2577  	 * filesystem has quota feature, so that they are updated correctly.
2578  	 */
2579  	return f2fs_enable_quota_files(sbi, readonly);
2580  }
2581  
f2fs_recover_quota_end(struct f2fs_sb_info * sbi,bool quota_enabled)2582  static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi,
2583  						bool quota_enabled)
2584  {
2585  	if (quota_enabled)
2586  		f2fs_quota_off_umount(sbi->sb);
2587  
2588  	if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) {
2589  		clear_sbi_flag(sbi, SBI_IS_WRITABLE);
2590  		sbi->sb->s_flags |= SB_RDONLY;
2591  	}
2592  }
2593  
2594  /* Read data from quotafile */
f2fs_quota_read(struct super_block * sb,int type,char * data,size_t len,loff_t off)2595  static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
2596  			       size_t len, loff_t off)
2597  {
2598  	struct inode *inode = sb_dqopt(sb)->files[type];
2599  	struct address_space *mapping = inode->i_mapping;
2600  	block_t blkidx = F2FS_BYTES_TO_BLK(off);
2601  	int offset = off & (sb->s_blocksize - 1);
2602  	int tocopy;
2603  	size_t toread;
2604  	loff_t i_size = i_size_read(inode);
2605  	struct page *page;
2606  
2607  	if (off > i_size)
2608  		return 0;
2609  
2610  	if (off + len > i_size)
2611  		len = i_size - off;
2612  	toread = len;
2613  	while (toread > 0) {
2614  		tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
2615  repeat:
2616  		page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
2617  		if (IS_ERR(page)) {
2618  			if (PTR_ERR(page) == -ENOMEM) {
2619  				congestion_wait(BLK_RW_ASYNC,
2620  						DEFAULT_IO_TIMEOUT);
2621  				goto repeat;
2622  			}
2623  			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2624  			return PTR_ERR(page);
2625  		}
2626  
2627  		lock_page(page);
2628  
2629  		if (unlikely(page->mapping != mapping)) {
2630  			f2fs_put_page(page, 1);
2631  			goto repeat;
2632  		}
2633  		if (unlikely(!PageUptodate(page))) {
2634  			f2fs_put_page(page, 1);
2635  			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2636  			return -EIO;
2637  		}
2638  
2639  		memcpy_from_page(data, page, offset, tocopy);
2640  		f2fs_put_page(page, 1);
2641  
2642  		offset = 0;
2643  		toread -= tocopy;
2644  		data += tocopy;
2645  		blkidx++;
2646  	}
2647  	return len;
2648  }
2649  
2650  /* Write to quotafile */
f2fs_quota_write(struct super_block * sb,int type,const char * data,size_t len,loff_t off)2651  static ssize_t f2fs_quota_write(struct super_block *sb, int type,
2652  				const char *data, size_t len, loff_t off)
2653  {
2654  	struct inode *inode = sb_dqopt(sb)->files[type];
2655  	struct address_space *mapping = inode->i_mapping;
2656  	const struct address_space_operations *a_ops = mapping->a_ops;
2657  	int offset = off & (sb->s_blocksize - 1);
2658  	size_t towrite = len;
2659  	struct page *page;
2660  	void *fsdata = NULL;
2661  	int err = 0;
2662  	int tocopy;
2663  
2664  	while (towrite > 0) {
2665  		tocopy = min_t(unsigned long, sb->s_blocksize - offset,
2666  								towrite);
2667  retry:
2668  		err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
2669  							&page, &fsdata);
2670  		if (unlikely(err)) {
2671  			if (err == -ENOMEM) {
2672  				f2fs_io_schedule_timeout(DEFAULT_IO_TIMEOUT);
2673  				goto retry;
2674  			}
2675  			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2676  			break;
2677  		}
2678  
2679  		memcpy_to_page(page, offset, data, tocopy);
2680  
2681  		a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
2682  						page, fsdata);
2683  		offset = 0;
2684  		towrite -= tocopy;
2685  		off += tocopy;
2686  		data += tocopy;
2687  		cond_resched();
2688  	}
2689  
2690  	if (len == towrite)
2691  		return err;
2692  	inode->i_mtime = inode->i_ctime = current_time(inode);
2693  	f2fs_mark_inode_dirty_sync(inode, false);
2694  	return len - towrite;
2695  }
2696  
f2fs_dquot_initialize(struct inode * inode)2697  int f2fs_dquot_initialize(struct inode *inode)
2698  {
2699  	if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT))
2700  		return -ESRCH;
2701  
2702  	return dquot_initialize(inode);
2703  }
2704  
f2fs_get_dquots(struct inode * inode)2705  static struct dquot **f2fs_get_dquots(struct inode *inode)
2706  {
2707  	return F2FS_I(inode)->i_dquot;
2708  }
2709  
f2fs_get_reserved_space(struct inode * inode)2710  static qsize_t *f2fs_get_reserved_space(struct inode *inode)
2711  {
2712  	return &F2FS_I(inode)->i_reserved_quota;
2713  }
2714  
f2fs_quota_on_mount(struct f2fs_sb_info * sbi,int type)2715  static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
2716  {
2717  	if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
2718  		f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
2719  		return 0;
2720  	}
2721  
2722  	return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
2723  					F2FS_OPTION(sbi).s_jquota_fmt, type);
2724  }
2725  
f2fs_enable_quota_files(struct f2fs_sb_info * sbi,bool rdonly)2726  int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
2727  {
2728  	int enabled = 0;
2729  	int i, err;
2730  
2731  	if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
2732  		err = f2fs_enable_quotas(sbi->sb);
2733  		if (err) {
2734  			f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
2735  			return 0;
2736  		}
2737  		return 1;
2738  	}
2739  
2740  	for (i = 0; i < MAXQUOTAS; i++) {
2741  		if (F2FS_OPTION(sbi).s_qf_names[i]) {
2742  			err = f2fs_quota_on_mount(sbi, i);
2743  			if (!err) {
2744  				enabled = 1;
2745  				continue;
2746  			}
2747  			f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
2748  				 err, i);
2749  		}
2750  	}
2751  	return enabled;
2752  }
2753  
f2fs_quota_enable(struct super_block * sb,int type,int format_id,unsigned int flags)2754  static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
2755  			     unsigned int flags)
2756  {
2757  	struct inode *qf_inode;
2758  	unsigned long qf_inum;
2759  	int err;
2760  
2761  	BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
2762  
2763  	qf_inum = f2fs_qf_ino(sb, type);
2764  	if (!qf_inum)
2765  		return -EPERM;
2766  
2767  	qf_inode = f2fs_iget(sb, qf_inum);
2768  	if (IS_ERR(qf_inode)) {
2769  		f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
2770  		return PTR_ERR(qf_inode);
2771  	}
2772  
2773  	/* Don't account quota for quota files to avoid recursion */
2774  	qf_inode->i_flags |= S_NOQUOTA;
2775  	err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
2776  	iput(qf_inode);
2777  	return err;
2778  }
2779  
f2fs_enable_quotas(struct super_block * sb)2780  static int f2fs_enable_quotas(struct super_block *sb)
2781  {
2782  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2783  	int type, err = 0;
2784  	unsigned long qf_inum;
2785  	bool quota_mopt[MAXQUOTAS] = {
2786  		test_opt(sbi, USRQUOTA),
2787  		test_opt(sbi, GRPQUOTA),
2788  		test_opt(sbi, PRJQUOTA),
2789  	};
2790  
2791  	if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
2792  		f2fs_err(sbi, "quota file may be corrupted, skip loading it");
2793  		return 0;
2794  	}
2795  
2796  	sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
2797  
2798  	for (type = 0; type < MAXQUOTAS; type++) {
2799  		qf_inum = f2fs_qf_ino(sb, type);
2800  		if (qf_inum) {
2801  			err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
2802  				DQUOT_USAGE_ENABLED |
2803  				(quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
2804  			if (err) {
2805  				f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
2806  					 type, err);
2807  				for (type--; type >= 0; type--)
2808  					dquot_quota_off(sb, type);
2809  				set_sbi_flag(F2FS_SB(sb),
2810  						SBI_QUOTA_NEED_REPAIR);
2811  				return err;
2812  			}
2813  		}
2814  	}
2815  	return 0;
2816  }
2817  
f2fs_quota_sync_file(struct f2fs_sb_info * sbi,int type)2818  static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
2819  {
2820  	struct quota_info *dqopt = sb_dqopt(sbi->sb);
2821  	struct address_space *mapping = dqopt->files[type]->i_mapping;
2822  	int ret = 0;
2823  
2824  	ret = dquot_writeback_dquots(sbi->sb, type);
2825  	if (ret)
2826  		goto out;
2827  
2828  	ret = filemap_fdatawrite(mapping);
2829  	if (ret)
2830  		goto out;
2831  
2832  	/* if we are using journalled quota */
2833  	if (is_journalled_quota(sbi))
2834  		goto out;
2835  
2836  	ret = filemap_fdatawait(mapping);
2837  
2838  	truncate_inode_pages(&dqopt->files[type]->i_data, 0);
2839  out:
2840  	if (ret)
2841  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2842  	return ret;
2843  }
2844  
f2fs_quota_sync(struct super_block * sb,int type)2845  int f2fs_quota_sync(struct super_block *sb, int type)
2846  {
2847  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2848  	struct quota_info *dqopt = sb_dqopt(sb);
2849  	int cnt;
2850  	int ret = 0;
2851  
2852  	/*
2853  	 * Now when everything is written we can discard the pagecache so
2854  	 * that userspace sees the changes.
2855  	 */
2856  	for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
2857  
2858  		if (type != -1 && cnt != type)
2859  			continue;
2860  
2861  		if (!sb_has_quota_active(sb, cnt))
2862  			continue;
2863  
2864  		if (!f2fs_sb_has_quota_ino(sbi))
2865  			inode_lock(dqopt->files[cnt]);
2866  
2867  		/*
2868  		 * do_quotactl
2869  		 *  f2fs_quota_sync
2870  		 *  f2fs_down_read(quota_sem)
2871  		 *  dquot_writeback_dquots()
2872  		 *  f2fs_dquot_commit
2873  		 *			      block_operation
2874  		 *			      f2fs_down_read(quota_sem)
2875  		 */
2876  		f2fs_lock_op(sbi);
2877  		f2fs_down_read(&sbi->quota_sem);
2878  
2879  		ret = f2fs_quota_sync_file(sbi, cnt);
2880  
2881  		f2fs_up_read(&sbi->quota_sem);
2882  		f2fs_unlock_op(sbi);
2883  
2884  		if (!f2fs_sb_has_quota_ino(sbi))
2885  			inode_unlock(dqopt->files[cnt]);
2886  
2887  		if (ret)
2888  			break;
2889  	}
2890  	return ret;
2891  }
2892  
f2fs_quota_on(struct super_block * sb,int type,int format_id,const struct path * path)2893  static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
2894  							const struct path *path)
2895  {
2896  	struct inode *inode;
2897  	int err;
2898  
2899  	/* if quota sysfile exists, deny enabling quota with specific file */
2900  	if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
2901  		f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
2902  		return -EBUSY;
2903  	}
2904  
2905  	err = f2fs_quota_sync(sb, type);
2906  	if (err)
2907  		return err;
2908  
2909  	err = dquot_quota_on(sb, type, format_id, path);
2910  	if (err)
2911  		return err;
2912  
2913  	inode = d_inode(path->dentry);
2914  
2915  	inode_lock(inode);
2916  	F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
2917  	f2fs_set_inode_flags(inode);
2918  	inode_unlock(inode);
2919  	f2fs_mark_inode_dirty_sync(inode, false);
2920  
2921  	return 0;
2922  }
2923  
__f2fs_quota_off(struct super_block * sb,int type)2924  static int __f2fs_quota_off(struct super_block *sb, int type)
2925  {
2926  	struct inode *inode = sb_dqopt(sb)->files[type];
2927  	int err;
2928  
2929  	if (!inode || !igrab(inode))
2930  		return dquot_quota_off(sb, type);
2931  
2932  	err = f2fs_quota_sync(sb, type);
2933  	if (err)
2934  		goto out_put;
2935  
2936  	err = dquot_quota_off(sb, type);
2937  	if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
2938  		goto out_put;
2939  
2940  	inode_lock(inode);
2941  	F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
2942  	f2fs_set_inode_flags(inode);
2943  	inode_unlock(inode);
2944  	f2fs_mark_inode_dirty_sync(inode, false);
2945  out_put:
2946  	iput(inode);
2947  	return err;
2948  }
2949  
f2fs_quota_off(struct super_block * sb,int type)2950  static int f2fs_quota_off(struct super_block *sb, int type)
2951  {
2952  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
2953  	int err;
2954  
2955  	err = __f2fs_quota_off(sb, type);
2956  
2957  	/*
2958  	 * quotactl can shutdown journalled quota, result in inconsistence
2959  	 * between quota record and fs data by following updates, tag the
2960  	 * flag to let fsck be aware of it.
2961  	 */
2962  	if (is_journalled_quota(sbi))
2963  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2964  	return err;
2965  }
2966  
f2fs_quota_off_umount(struct super_block * sb)2967  void f2fs_quota_off_umount(struct super_block *sb)
2968  {
2969  	int type;
2970  	int err;
2971  
2972  	for (type = 0; type < MAXQUOTAS; type++) {
2973  		err = __f2fs_quota_off(sb, type);
2974  		if (err) {
2975  			int ret = dquot_quota_off(sb, type);
2976  
2977  			f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2978  				 type, err, ret);
2979  			set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2980  		}
2981  	}
2982  	/*
2983  	 * In case of checkpoint=disable, we must flush quota blocks.
2984  	 * This can cause NULL exception for node_inode in end_io, since
2985  	 * put_super already dropped it.
2986  	 */
2987  	sync_filesystem(sb);
2988  }
2989  
f2fs_truncate_quota_inode_pages(struct super_block * sb)2990  static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2991  {
2992  	struct quota_info *dqopt = sb_dqopt(sb);
2993  	int type;
2994  
2995  	for (type = 0; type < MAXQUOTAS; type++) {
2996  		if (!dqopt->files[type])
2997  			continue;
2998  		f2fs_inode_synced(dqopt->files[type]);
2999  	}
3000  }
3001  
f2fs_dquot_commit(struct dquot * dquot)3002  static int f2fs_dquot_commit(struct dquot *dquot)
3003  {
3004  	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3005  	int ret;
3006  
3007  	f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
3008  	ret = dquot_commit(dquot);
3009  	if (ret < 0)
3010  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3011  	f2fs_up_read(&sbi->quota_sem);
3012  	return ret;
3013  }
3014  
f2fs_dquot_acquire(struct dquot * dquot)3015  static int f2fs_dquot_acquire(struct dquot *dquot)
3016  {
3017  	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3018  	int ret;
3019  
3020  	f2fs_down_read(&sbi->quota_sem);
3021  	ret = dquot_acquire(dquot);
3022  	if (ret < 0)
3023  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3024  	f2fs_up_read(&sbi->quota_sem);
3025  	return ret;
3026  }
3027  
f2fs_dquot_release(struct dquot * dquot)3028  static int f2fs_dquot_release(struct dquot *dquot)
3029  {
3030  	struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
3031  	int ret = dquot_release(dquot);
3032  
3033  	if (ret < 0)
3034  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3035  	return ret;
3036  }
3037  
f2fs_dquot_mark_dquot_dirty(struct dquot * dquot)3038  static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
3039  {
3040  	struct super_block *sb = dquot->dq_sb;
3041  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3042  	int ret = dquot_mark_dquot_dirty(dquot);
3043  
3044  	/* if we are using journalled quota */
3045  	if (is_journalled_quota(sbi))
3046  		set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
3047  
3048  	return ret;
3049  }
3050  
f2fs_dquot_commit_info(struct super_block * sb,int type)3051  static int f2fs_dquot_commit_info(struct super_block *sb, int type)
3052  {
3053  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3054  	int ret = dquot_commit_info(sb, type);
3055  
3056  	if (ret < 0)
3057  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3058  	return ret;
3059  }
3060  
f2fs_get_projid(struct inode * inode,kprojid_t * projid)3061  static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
3062  {
3063  	*projid = F2FS_I(inode)->i_projid;
3064  	return 0;
3065  }
3066  
3067  static const struct dquot_operations f2fs_quota_operations = {
3068  	.get_reserved_space = f2fs_get_reserved_space,
3069  	.write_dquot	= f2fs_dquot_commit,
3070  	.acquire_dquot	= f2fs_dquot_acquire,
3071  	.release_dquot	= f2fs_dquot_release,
3072  	.mark_dirty	= f2fs_dquot_mark_dquot_dirty,
3073  	.write_info	= f2fs_dquot_commit_info,
3074  	.alloc_dquot	= dquot_alloc,
3075  	.destroy_dquot	= dquot_destroy,
3076  	.get_projid	= f2fs_get_projid,
3077  	.get_next_id	= dquot_get_next_id,
3078  };
3079  
3080  static const struct quotactl_ops f2fs_quotactl_ops = {
3081  	.quota_on	= f2fs_quota_on,
3082  	.quota_off	= f2fs_quota_off,
3083  	.quota_sync	= f2fs_quota_sync,
3084  	.get_state	= dquot_get_state,
3085  	.set_info	= dquot_set_dqinfo,
3086  	.get_dqblk	= dquot_get_dqblk,
3087  	.set_dqblk	= dquot_set_dqblk,
3088  	.get_nextdqblk	= dquot_get_next_dqblk,
3089  };
3090  #else
f2fs_dquot_initialize(struct inode * inode)3091  int f2fs_dquot_initialize(struct inode *inode)
3092  {
3093  	return 0;
3094  }
3095  
f2fs_quota_sync(struct super_block * sb,int type)3096  int f2fs_quota_sync(struct super_block *sb, int type)
3097  {
3098  	return 0;
3099  }
3100  
f2fs_quota_off_umount(struct super_block * sb)3101  void f2fs_quota_off_umount(struct super_block *sb)
3102  {
3103  }
3104  #endif
3105  
3106  static const struct super_operations f2fs_sops = {
3107  	.alloc_inode	= f2fs_alloc_inode,
3108  	.free_inode	= f2fs_free_inode,
3109  	.drop_inode	= f2fs_drop_inode,
3110  	.write_inode	= f2fs_write_inode,
3111  	.dirty_inode	= f2fs_dirty_inode,
3112  	.show_options	= f2fs_show_options,
3113  #ifdef CONFIG_QUOTA
3114  	.quota_read	= f2fs_quota_read,
3115  	.quota_write	= f2fs_quota_write,
3116  	.get_dquots	= f2fs_get_dquots,
3117  #endif
3118  	.evict_inode	= f2fs_evict_inode,
3119  	.put_super	= f2fs_put_super,
3120  	.sync_fs	= f2fs_sync_fs,
3121  	.freeze_fs	= f2fs_freeze,
3122  	.unfreeze_fs	= f2fs_unfreeze,
3123  	.statfs		= f2fs_statfs,
3124  	.remount_fs	= f2fs_remount,
3125  };
3126  
3127  #ifdef CONFIG_FS_ENCRYPTION
f2fs_get_context(struct inode * inode,void * ctx,size_t len)3128  static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
3129  {
3130  	return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3131  				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3132  				ctx, len, NULL);
3133  }
3134  
f2fs_set_context(struct inode * inode,const void * ctx,size_t len,void * fs_data)3135  static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
3136  							void *fs_data)
3137  {
3138  	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3139  
3140  	/*
3141  	 * Encrypting the root directory is not allowed because fsck
3142  	 * expects lost+found directory to exist and remain unencrypted
3143  	 * if LOST_FOUND feature is enabled.
3144  	 *
3145  	 */
3146  	if (f2fs_sb_has_lost_found(sbi) &&
3147  			inode->i_ino == F2FS_ROOT_INO(sbi))
3148  		return -EPERM;
3149  
3150  	return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
3151  				F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
3152  				ctx, len, fs_data, XATTR_CREATE);
3153  }
3154  
f2fs_get_dummy_policy(struct super_block * sb)3155  static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
3156  {
3157  	return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
3158  }
3159  
f2fs_has_stable_inodes(struct super_block * sb)3160  static bool f2fs_has_stable_inodes(struct super_block *sb)
3161  {
3162  	return true;
3163  }
3164  
f2fs_get_ino_and_lblk_bits(struct super_block * sb,int * ino_bits_ret,int * lblk_bits_ret)3165  static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
3166  				       int *ino_bits_ret, int *lblk_bits_ret)
3167  {
3168  	*ino_bits_ret = 8 * sizeof(nid_t);
3169  	*lblk_bits_ret = 8 * sizeof(block_t);
3170  }
3171  
f2fs_get_devices(struct super_block * sb,unsigned int * num_devs)3172  static struct block_device **f2fs_get_devices(struct super_block *sb,
3173  					      unsigned int *num_devs)
3174  {
3175  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3176  	struct block_device **devs;
3177  	int i;
3178  
3179  	if (!f2fs_is_multi_device(sbi))
3180  		return NULL;
3181  
3182  	devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL);
3183  	if (!devs)
3184  		return ERR_PTR(-ENOMEM);
3185  
3186  	for (i = 0; i < sbi->s_ndevs; i++)
3187  		devs[i] = FDEV(i).bdev;
3188  	*num_devs = sbi->s_ndevs;
3189  	return devs;
3190  }
3191  
3192  static const struct fscrypt_operations f2fs_cryptops = {
3193  	.flags			= FS_CFLG_SUPPORTS_SUBBLOCK_DATA_UNITS,
3194  	.key_prefix		= "f2fs:",
3195  	.get_context		= f2fs_get_context,
3196  	.set_context		= f2fs_set_context,
3197  	.get_dummy_policy	= f2fs_get_dummy_policy,
3198  	.empty_dir		= f2fs_empty_dir,
3199  	.has_stable_inodes	= f2fs_has_stable_inodes,
3200  	.get_ino_and_lblk_bits	= f2fs_get_ino_and_lblk_bits,
3201  	.get_devices		= f2fs_get_devices,
3202  };
3203  #endif
3204  
f2fs_nfs_get_inode(struct super_block * sb,u64 ino,u32 generation)3205  static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
3206  		u64 ino, u32 generation)
3207  {
3208  	struct f2fs_sb_info *sbi = F2FS_SB(sb);
3209  	struct inode *inode;
3210  
3211  	if (f2fs_check_nid_range(sbi, ino))
3212  		return ERR_PTR(-ESTALE);
3213  
3214  	/*
3215  	 * f2fs_iget isn't quite right if the inode is currently unallocated!
3216  	 * However f2fs_iget currently does appropriate checks to handle stale
3217  	 * inodes so everything is OK.
3218  	 */
3219  	inode = f2fs_iget(sb, ino);
3220  	if (IS_ERR(inode))
3221  		return ERR_CAST(inode);
3222  	if (unlikely(generation && inode->i_generation != generation)) {
3223  		/* we didn't find the right inode.. */
3224  		iput(inode);
3225  		return ERR_PTR(-ESTALE);
3226  	}
3227  	return inode;
3228  }
3229  
f2fs_fh_to_dentry(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)3230  static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
3231  		int fh_len, int fh_type)
3232  {
3233  	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
3234  				    f2fs_nfs_get_inode);
3235  }
3236  
f2fs_fh_to_parent(struct super_block * sb,struct fid * fid,int fh_len,int fh_type)3237  static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
3238  		int fh_len, int fh_type)
3239  {
3240  	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
3241  				    f2fs_nfs_get_inode);
3242  }
3243  
3244  static const struct export_operations f2fs_export_ops = {
3245  	.fh_to_dentry = f2fs_fh_to_dentry,
3246  	.fh_to_parent = f2fs_fh_to_parent,
3247  	.get_parent = f2fs_get_parent,
3248  };
3249  
max_file_blocks(struct inode * inode)3250  loff_t max_file_blocks(struct inode *inode)
3251  {
3252  	loff_t result = 0;
3253  	loff_t leaf_count;
3254  
3255  	/*
3256  	 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
3257  	 * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
3258  	 * space in inode.i_addr, it will be more safe to reassign
3259  	 * result as zero.
3260  	 */
3261  
3262  	if (inode && f2fs_compressed_file(inode))
3263  		leaf_count = ADDRS_PER_BLOCK(inode);
3264  	else
3265  		leaf_count = DEF_ADDRS_PER_BLOCK;
3266  
3267  	/* two direct node blocks */
3268  	result += (leaf_count * 2);
3269  
3270  	/* two indirect node blocks */
3271  	leaf_count *= NIDS_PER_BLOCK;
3272  	result += (leaf_count * 2);
3273  
3274  	/* one double indirect node block */
3275  	leaf_count *= NIDS_PER_BLOCK;
3276  	result += leaf_count;
3277  
3278  	/*
3279  	 * For compatibility with FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{64,32} with
3280  	 * a 4K crypto data unit, we must restrict the max filesize to what can
3281  	 * fit within U32_MAX + 1 data units.
3282  	 */
3283  
3284  	result = min(result, (((loff_t)U32_MAX + 1) * 4096) >> F2FS_BLKSIZE_BITS);
3285  
3286  	return result;
3287  }
3288  
__f2fs_commit_super(struct buffer_head * bh,struct f2fs_super_block * super)3289  static int __f2fs_commit_super(struct buffer_head *bh,
3290  			struct f2fs_super_block *super)
3291  {
3292  	lock_buffer(bh);
3293  	if (super)
3294  		memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
3295  	set_buffer_dirty(bh);
3296  	unlock_buffer(bh);
3297  
3298  	/* it's rare case, we can do fua all the time */
3299  	return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
3300  }
3301  
sanity_check_area_boundary(struct f2fs_sb_info * sbi,struct buffer_head * bh)3302  static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
3303  					struct buffer_head *bh)
3304  {
3305  	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3306  					(bh->b_data + F2FS_SUPER_OFFSET);
3307  	struct super_block *sb = sbi->sb;
3308  	u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
3309  	u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
3310  	u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
3311  	u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
3312  	u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
3313  	u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
3314  	u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
3315  	u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
3316  	u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
3317  	u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
3318  	u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3319  	u32 segment_count = le32_to_cpu(raw_super->segment_count);
3320  	u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3321  	u64 main_end_blkaddr = main_blkaddr +
3322  				(segment_count_main << log_blocks_per_seg);
3323  	u64 seg_end_blkaddr = segment0_blkaddr +
3324  				(segment_count << log_blocks_per_seg);
3325  
3326  	if (segment0_blkaddr != cp_blkaddr) {
3327  		f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
3328  			  segment0_blkaddr, cp_blkaddr);
3329  		return true;
3330  	}
3331  
3332  	if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
3333  							sit_blkaddr) {
3334  		f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
3335  			  cp_blkaddr, sit_blkaddr,
3336  			  segment_count_ckpt << log_blocks_per_seg);
3337  		return true;
3338  	}
3339  
3340  	if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
3341  							nat_blkaddr) {
3342  		f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
3343  			  sit_blkaddr, nat_blkaddr,
3344  			  segment_count_sit << log_blocks_per_seg);
3345  		return true;
3346  	}
3347  
3348  	if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
3349  							ssa_blkaddr) {
3350  		f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
3351  			  nat_blkaddr, ssa_blkaddr,
3352  			  segment_count_nat << log_blocks_per_seg);
3353  		return true;
3354  	}
3355  
3356  	if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
3357  							main_blkaddr) {
3358  		f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
3359  			  ssa_blkaddr, main_blkaddr,
3360  			  segment_count_ssa << log_blocks_per_seg);
3361  		return true;
3362  	}
3363  
3364  	if (main_end_blkaddr > seg_end_blkaddr) {
3365  		f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
3366  			  main_blkaddr, seg_end_blkaddr,
3367  			  segment_count_main << log_blocks_per_seg);
3368  		return true;
3369  	} else if (main_end_blkaddr < seg_end_blkaddr) {
3370  		int err = 0;
3371  		char *res;
3372  
3373  		/* fix in-memory information all the time */
3374  		raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
3375  				segment0_blkaddr) >> log_blocks_per_seg);
3376  
3377  		if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) {
3378  			set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3379  			res = "internally";
3380  		} else {
3381  			err = __f2fs_commit_super(bh, NULL);
3382  			res = err ? "failed" : "done";
3383  		}
3384  		f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
3385  			  res, main_blkaddr, seg_end_blkaddr,
3386  			  segment_count_main << log_blocks_per_seg);
3387  		if (err)
3388  			return true;
3389  	}
3390  	return false;
3391  }
3392  
sanity_check_raw_super(struct f2fs_sb_info * sbi,struct buffer_head * bh)3393  static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
3394  				struct buffer_head *bh)
3395  {
3396  	block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
3397  	block_t total_sections, blocks_per_seg;
3398  	struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
3399  					(bh->b_data + F2FS_SUPER_OFFSET);
3400  	size_t crc_offset = 0;
3401  	__u32 crc = 0;
3402  
3403  	if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
3404  		f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
3405  			  F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
3406  		return -EINVAL;
3407  	}
3408  
3409  	/* Check checksum_offset and crc in superblock */
3410  	if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
3411  		crc_offset = le32_to_cpu(raw_super->checksum_offset);
3412  		if (crc_offset !=
3413  			offsetof(struct f2fs_super_block, crc)) {
3414  			f2fs_info(sbi, "Invalid SB checksum offset: %zu",
3415  				  crc_offset);
3416  			return -EFSCORRUPTED;
3417  		}
3418  		crc = le32_to_cpu(raw_super->crc);
3419  		if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
3420  			f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
3421  			return -EFSCORRUPTED;
3422  		}
3423  	}
3424  
3425  	/* Currently, support only 4KB block size */
3426  	if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
3427  		f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
3428  			  le32_to_cpu(raw_super->log_blocksize),
3429  			  F2FS_BLKSIZE_BITS);
3430  		return -EFSCORRUPTED;
3431  	}
3432  
3433  	/* check log blocks per segment */
3434  	if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
3435  		f2fs_info(sbi, "Invalid log blocks per segment (%u)",
3436  			  le32_to_cpu(raw_super->log_blocks_per_seg));
3437  		return -EFSCORRUPTED;
3438  	}
3439  
3440  	/* Currently, support 512/1024/2048/4096/16K bytes sector size */
3441  	if (le32_to_cpu(raw_super->log_sectorsize) >
3442  				F2FS_MAX_LOG_SECTOR_SIZE ||
3443  		le32_to_cpu(raw_super->log_sectorsize) <
3444  				F2FS_MIN_LOG_SECTOR_SIZE) {
3445  		f2fs_info(sbi, "Invalid log sectorsize (%u)",
3446  			  le32_to_cpu(raw_super->log_sectorsize));
3447  		return -EFSCORRUPTED;
3448  	}
3449  	if (le32_to_cpu(raw_super->log_sectors_per_block) +
3450  		le32_to_cpu(raw_super->log_sectorsize) !=
3451  			F2FS_MAX_LOG_SECTOR_SIZE) {
3452  		f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
3453  			  le32_to_cpu(raw_super->log_sectors_per_block),
3454  			  le32_to_cpu(raw_super->log_sectorsize));
3455  		return -EFSCORRUPTED;
3456  	}
3457  
3458  	segment_count = le32_to_cpu(raw_super->segment_count);
3459  	segment_count_main = le32_to_cpu(raw_super->segment_count_main);
3460  	segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3461  	secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3462  	total_sections = le32_to_cpu(raw_super->section_count);
3463  
3464  	/* blocks_per_seg should be 512, given the above check */
3465  	blocks_per_seg = BIT(le32_to_cpu(raw_super->log_blocks_per_seg));
3466  
3467  	if (segment_count > F2FS_MAX_SEGMENT ||
3468  				segment_count < F2FS_MIN_SEGMENTS) {
3469  		f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
3470  		return -EFSCORRUPTED;
3471  	}
3472  
3473  	if (total_sections > segment_count_main || total_sections < 1 ||
3474  			segs_per_sec > segment_count || !segs_per_sec) {
3475  		f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
3476  			  segment_count, total_sections, segs_per_sec);
3477  		return -EFSCORRUPTED;
3478  	}
3479  
3480  	if (segment_count_main != total_sections * segs_per_sec) {
3481  		f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
3482  			  segment_count_main, total_sections, segs_per_sec);
3483  		return -EFSCORRUPTED;
3484  	}
3485  
3486  	if ((segment_count / segs_per_sec) < total_sections) {
3487  		f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
3488  			  segment_count, segs_per_sec, total_sections);
3489  		return -EFSCORRUPTED;
3490  	}
3491  
3492  	if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
3493  		f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
3494  			  segment_count, le64_to_cpu(raw_super->block_count));
3495  		return -EFSCORRUPTED;
3496  	}
3497  
3498  	if (RDEV(0).path[0]) {
3499  		block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
3500  		int i = 1;
3501  
3502  		while (i < MAX_DEVICES && RDEV(i).path[0]) {
3503  			dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
3504  			i++;
3505  		}
3506  		if (segment_count != dev_seg_count) {
3507  			f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
3508  					segment_count, dev_seg_count);
3509  			return -EFSCORRUPTED;
3510  		}
3511  	} else {
3512  		if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
3513  					!bdev_is_zoned(sbi->sb->s_bdev)) {
3514  			f2fs_info(sbi, "Zoned block device path is missing");
3515  			return -EFSCORRUPTED;
3516  		}
3517  	}
3518  
3519  	if (secs_per_zone > total_sections || !secs_per_zone) {
3520  		f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
3521  			  secs_per_zone, total_sections);
3522  		return -EFSCORRUPTED;
3523  	}
3524  	if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
3525  			raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
3526  			(le32_to_cpu(raw_super->extension_count) +
3527  			raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
3528  		f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
3529  			  le32_to_cpu(raw_super->extension_count),
3530  			  raw_super->hot_ext_count,
3531  			  F2FS_MAX_EXTENSION);
3532  		return -EFSCORRUPTED;
3533  	}
3534  
3535  	if (le32_to_cpu(raw_super->cp_payload) >=
3536  				(blocks_per_seg - F2FS_CP_PACKS -
3537  				NR_CURSEG_PERSIST_TYPE)) {
3538  		f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
3539  			  le32_to_cpu(raw_super->cp_payload),
3540  			  blocks_per_seg - F2FS_CP_PACKS -
3541  			  NR_CURSEG_PERSIST_TYPE);
3542  		return -EFSCORRUPTED;
3543  	}
3544  
3545  	/* check reserved ino info */
3546  	if (le32_to_cpu(raw_super->node_ino) != 1 ||
3547  		le32_to_cpu(raw_super->meta_ino) != 2 ||
3548  		le32_to_cpu(raw_super->root_ino) != 3) {
3549  		f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
3550  			  le32_to_cpu(raw_super->node_ino),
3551  			  le32_to_cpu(raw_super->meta_ino),
3552  			  le32_to_cpu(raw_super->root_ino));
3553  		return -EFSCORRUPTED;
3554  	}
3555  
3556  	/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
3557  	if (sanity_check_area_boundary(sbi, bh))
3558  		return -EFSCORRUPTED;
3559  
3560  	return 0;
3561  }
3562  
f2fs_sanity_check_ckpt(struct f2fs_sb_info * sbi)3563  int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
3564  {
3565  	unsigned int total, fsmeta;
3566  	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3567  	struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3568  	unsigned int ovp_segments, reserved_segments;
3569  	unsigned int main_segs, blocks_per_seg;
3570  	unsigned int sit_segs, nat_segs;
3571  	unsigned int sit_bitmap_size, nat_bitmap_size;
3572  	unsigned int log_blocks_per_seg;
3573  	unsigned int segment_count_main;
3574  	unsigned int cp_pack_start_sum, cp_payload;
3575  	block_t user_block_count, valid_user_blocks;
3576  	block_t avail_node_count, valid_node_count;
3577  	unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
3578  	int i, j;
3579  
3580  	total = le32_to_cpu(raw_super->segment_count);
3581  	fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
3582  	sit_segs = le32_to_cpu(raw_super->segment_count_sit);
3583  	fsmeta += sit_segs;
3584  	nat_segs = le32_to_cpu(raw_super->segment_count_nat);
3585  	fsmeta += nat_segs;
3586  	fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
3587  	fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
3588  
3589  	if (unlikely(fsmeta >= total))
3590  		return 1;
3591  
3592  	ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
3593  	reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
3594  
3595  	if (!f2fs_sb_has_readonly(sbi) &&
3596  			unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
3597  			ovp_segments == 0 || reserved_segments == 0)) {
3598  		f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
3599  		return 1;
3600  	}
3601  	user_block_count = le64_to_cpu(ckpt->user_block_count);
3602  	segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
3603  			(f2fs_sb_has_readonly(sbi) ? 1 : 0);
3604  	log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3605  	if (!user_block_count || user_block_count >=
3606  			segment_count_main << log_blocks_per_seg) {
3607  		f2fs_err(sbi, "Wrong user_block_count: %u",
3608  			 user_block_count);
3609  		return 1;
3610  	}
3611  
3612  	valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
3613  	if (valid_user_blocks > user_block_count) {
3614  		f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
3615  			 valid_user_blocks, user_block_count);
3616  		return 1;
3617  	}
3618  
3619  	valid_node_count = le32_to_cpu(ckpt->valid_node_count);
3620  	avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
3621  	if (valid_node_count > avail_node_count) {
3622  		f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
3623  			 valid_node_count, avail_node_count);
3624  		return 1;
3625  	}
3626  
3627  	main_segs = le32_to_cpu(raw_super->segment_count_main);
3628  	blocks_per_seg = sbi->blocks_per_seg;
3629  
3630  	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3631  		if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
3632  			le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
3633  			return 1;
3634  
3635  		if (f2fs_sb_has_readonly(sbi))
3636  			goto check_data;
3637  
3638  		for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
3639  			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3640  				le32_to_cpu(ckpt->cur_node_segno[j])) {
3641  				f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
3642  					 i, j,
3643  					 le32_to_cpu(ckpt->cur_node_segno[i]));
3644  				return 1;
3645  			}
3646  		}
3647  	}
3648  check_data:
3649  	for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
3650  		if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
3651  			le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
3652  			return 1;
3653  
3654  		if (f2fs_sb_has_readonly(sbi))
3655  			goto skip_cross;
3656  
3657  		for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
3658  			if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
3659  				le32_to_cpu(ckpt->cur_data_segno[j])) {
3660  				f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
3661  					 i, j,
3662  					 le32_to_cpu(ckpt->cur_data_segno[i]));
3663  				return 1;
3664  			}
3665  		}
3666  	}
3667  	for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
3668  		for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
3669  			if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
3670  				le32_to_cpu(ckpt->cur_data_segno[j])) {
3671  				f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
3672  					 i, j,
3673  					 le32_to_cpu(ckpt->cur_node_segno[i]));
3674  				return 1;
3675  			}
3676  		}
3677  	}
3678  skip_cross:
3679  	sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
3680  	nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
3681  
3682  	if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
3683  		nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
3684  		f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
3685  			 sit_bitmap_size, nat_bitmap_size);
3686  		return 1;
3687  	}
3688  
3689  	cp_pack_start_sum = __start_sum_addr(sbi);
3690  	cp_payload = __cp_payload(sbi);
3691  	if (cp_pack_start_sum < cp_payload + 1 ||
3692  		cp_pack_start_sum > blocks_per_seg - 1 -
3693  			NR_CURSEG_PERSIST_TYPE) {
3694  		f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
3695  			 cp_pack_start_sum);
3696  		return 1;
3697  	}
3698  
3699  	if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
3700  		le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
3701  		f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
3702  			  "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
3703  			  "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
3704  			  le32_to_cpu(ckpt->checksum_offset));
3705  		return 1;
3706  	}
3707  
3708  	nat_blocks = nat_segs << log_blocks_per_seg;
3709  	nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
3710  	nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3711  	if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
3712  		(cp_payload + F2FS_CP_PACKS +
3713  		NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
3714  		f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
3715  			  cp_payload, nat_bits_blocks);
3716  		return 1;
3717  	}
3718  
3719  	if (unlikely(f2fs_cp_error(sbi))) {
3720  		f2fs_err(sbi, "A bug case: need to run fsck");
3721  		return 1;
3722  	}
3723  	return 0;
3724  }
3725  
init_sb_info(struct f2fs_sb_info * sbi)3726  static void init_sb_info(struct f2fs_sb_info *sbi)
3727  {
3728  	struct f2fs_super_block *raw_super = sbi->raw_super;
3729  	int i;
3730  
3731  	sbi->log_sectors_per_block =
3732  		le32_to_cpu(raw_super->log_sectors_per_block);
3733  	sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
3734  	sbi->blocksize = BIT(sbi->log_blocksize);
3735  	sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
3736  	sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg);
3737  	sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
3738  	sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
3739  	sbi->total_sections = le32_to_cpu(raw_super->section_count);
3740  	sbi->total_node_count =
3741  		(le32_to_cpu(raw_super->segment_count_nat) / 2)
3742  			* sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
3743  	F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
3744  	F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
3745  	F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
3746  	sbi->cur_victim_sec = NULL_SECNO;
3747  	sbi->gc_mode = GC_NORMAL;
3748  	sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
3749  	sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
3750  	sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
3751  	sbi->migration_granularity = sbi->segs_per_sec;
3752  	sbi->seq_file_ra_mul = MIN_RA_MUL;
3753  	sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
3754  	sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
3755  	spin_lock_init(&sbi->gc_remaining_trials_lock);
3756  	atomic64_set(&sbi->current_atomic_write, 0);
3757  
3758  	sbi->dir_level = DEF_DIR_LEVEL;
3759  	sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
3760  	sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
3761  	sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
3762  	sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
3763  	sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
3764  	sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
3765  				DEF_UMOUNT_DISCARD_TIMEOUT;
3766  	clear_sbi_flag(sbi, SBI_NEED_FSCK);
3767  
3768  	for (i = 0; i < NR_COUNT_TYPE; i++)
3769  		atomic_set(&sbi->nr_pages[i], 0);
3770  
3771  	for (i = 0; i < META; i++)
3772  		atomic_set(&sbi->wb_sync_req[i], 0);
3773  
3774  	INIT_LIST_HEAD(&sbi->s_list);
3775  	mutex_init(&sbi->umount_mutex);
3776  	init_f2fs_rwsem(&sbi->io_order_lock);
3777  	spin_lock_init(&sbi->cp_lock);
3778  
3779  	sbi->dirty_device = 0;
3780  	spin_lock_init(&sbi->dev_lock);
3781  
3782  	init_f2fs_rwsem(&sbi->sb_lock);
3783  	init_f2fs_rwsem(&sbi->pin_sem);
3784  }
3785  
init_percpu_info(struct f2fs_sb_info * sbi)3786  static int init_percpu_info(struct f2fs_sb_info *sbi)
3787  {
3788  	int err;
3789  
3790  	err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
3791  	if (err)
3792  		return err;
3793  
3794  	err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL);
3795  	if (err)
3796  		goto err_valid_block;
3797  
3798  	err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
3799  								GFP_KERNEL);
3800  	if (err)
3801  		goto err_node_block;
3802  	return 0;
3803  
3804  err_node_block:
3805  	percpu_counter_destroy(&sbi->rf_node_block_count);
3806  err_valid_block:
3807  	percpu_counter_destroy(&sbi->alloc_valid_block_count);
3808  	return err;
3809  }
3810  
3811  #ifdef CONFIG_BLK_DEV_ZONED
3812  
3813  struct f2fs_report_zones_args {
3814  	struct f2fs_sb_info *sbi;
3815  	struct f2fs_dev_info *dev;
3816  };
3817  
f2fs_report_zone_cb(struct blk_zone * zone,unsigned int idx,void * data)3818  static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
3819  			      void *data)
3820  {
3821  	struct f2fs_report_zones_args *rz_args = data;
3822  	block_t unusable_blocks = (zone->len - zone->capacity) >>
3823  					F2FS_LOG_SECTORS_PER_BLOCK;
3824  
3825  	if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
3826  		return 0;
3827  
3828  	set_bit(idx, rz_args->dev->blkz_seq);
3829  	if (!rz_args->sbi->unusable_blocks_per_sec) {
3830  		rz_args->sbi->unusable_blocks_per_sec = unusable_blocks;
3831  		return 0;
3832  	}
3833  	if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) {
3834  		f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n");
3835  		return -EINVAL;
3836  	}
3837  	return 0;
3838  }
3839  
init_blkz_info(struct f2fs_sb_info * sbi,int devi)3840  static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
3841  {
3842  	struct block_device *bdev = FDEV(devi).bdev;
3843  	sector_t nr_sectors = bdev_nr_sectors(bdev);
3844  	struct f2fs_report_zones_args rep_zone_arg;
3845  	u64 zone_sectors;
3846  	int ret;
3847  
3848  	if (!f2fs_sb_has_blkzoned(sbi))
3849  		return 0;
3850  
3851  	zone_sectors = bdev_zone_sectors(bdev);
3852  
3853  	if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
3854  				SECTOR_TO_BLOCK(zone_sectors))
3855  		return -EINVAL;
3856  	sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors);
3857  	FDEV(devi).nr_blkz = div_u64(SECTOR_TO_BLOCK(nr_sectors),
3858  					sbi->blocks_per_blkz);
3859  	if (!bdev_is_zone_start(bdev, nr_sectors))
3860  		FDEV(devi).nr_blkz++;
3861  
3862  	FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
3863  					BITS_TO_LONGS(FDEV(devi).nr_blkz)
3864  					* sizeof(unsigned long),
3865  					GFP_KERNEL);
3866  	if (!FDEV(devi).blkz_seq)
3867  		return -ENOMEM;
3868  
3869  	rep_zone_arg.sbi = sbi;
3870  	rep_zone_arg.dev = &FDEV(devi);
3871  
3872  	ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
3873  				  &rep_zone_arg);
3874  	if (ret < 0)
3875  		return ret;
3876  	return 0;
3877  }
3878  #endif
3879  
3880  /*
3881   * Read f2fs raw super block.
3882   * Because we have two copies of super block, so read both of them
3883   * to get the first valid one. If any one of them is broken, we pass
3884   * them recovery flag back to the caller.
3885   */
read_raw_super_block(struct f2fs_sb_info * sbi,struct f2fs_super_block ** raw_super,int * valid_super_block,int * recovery)3886  static int read_raw_super_block(struct f2fs_sb_info *sbi,
3887  			struct f2fs_super_block **raw_super,
3888  			int *valid_super_block, int *recovery)
3889  {
3890  	struct super_block *sb = sbi->sb;
3891  	int block;
3892  	struct buffer_head *bh;
3893  	struct f2fs_super_block *super;
3894  	int err = 0;
3895  
3896  	super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
3897  	if (!super)
3898  		return -ENOMEM;
3899  
3900  	for (block = 0; block < 2; block++) {
3901  		bh = sb_bread(sb, block);
3902  		if (!bh) {
3903  			f2fs_err(sbi, "Unable to read %dth superblock",
3904  				 block + 1);
3905  			err = -EIO;
3906  			*recovery = 1;
3907  			continue;
3908  		}
3909  
3910  		/* sanity checking of raw super */
3911  		err = sanity_check_raw_super(sbi, bh);
3912  		if (err) {
3913  			f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
3914  				 block + 1);
3915  			brelse(bh);
3916  			*recovery = 1;
3917  			continue;
3918  		}
3919  
3920  		if (!*raw_super) {
3921  			memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
3922  							sizeof(*super));
3923  			*valid_super_block = block;
3924  			*raw_super = super;
3925  		}
3926  		brelse(bh);
3927  	}
3928  
3929  	/* No valid superblock */
3930  	if (!*raw_super)
3931  		kfree(super);
3932  	else
3933  		err = 0;
3934  
3935  	return err;
3936  }
3937  
f2fs_commit_super(struct f2fs_sb_info * sbi,bool recover)3938  int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
3939  {
3940  	struct buffer_head *bh;
3941  	__u32 crc = 0;
3942  	int err;
3943  
3944  	if ((recover && f2fs_readonly(sbi->sb)) ||
3945  				f2fs_hw_is_readonly(sbi)) {
3946  		set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
3947  		return -EROFS;
3948  	}
3949  
3950  	/* we should update superblock crc here */
3951  	if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
3952  		crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
3953  				offsetof(struct f2fs_super_block, crc));
3954  		F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
3955  	}
3956  
3957  	/* write back-up superblock first */
3958  	bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
3959  	if (!bh)
3960  		return -EIO;
3961  	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3962  	brelse(bh);
3963  
3964  	/* if we are in recovery path, skip writing valid superblock */
3965  	if (recover || err)
3966  		return err;
3967  
3968  	/* write current valid superblock */
3969  	bh = sb_bread(sbi->sb, sbi->valid_super_block);
3970  	if (!bh)
3971  		return -EIO;
3972  	err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
3973  	brelse(bh);
3974  	return err;
3975  }
3976  
f2fs_handle_stop(struct f2fs_sb_info * sbi,unsigned char reason)3977  void f2fs_handle_stop(struct f2fs_sb_info *sbi, unsigned char reason)
3978  {
3979  	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
3980  	int err;
3981  
3982  	f2fs_down_write(&sbi->sb_lock);
3983  
3984  	if (raw_super->s_stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0))
3985  		raw_super->s_stop_reason[reason]++;
3986  
3987  	err = f2fs_commit_super(sbi, false);
3988  	if (err)
3989  		f2fs_err(sbi, "f2fs_commit_super fails to record reason:%u err:%d",
3990  								reason, err);
3991  	f2fs_up_write(&sbi->sb_lock);
3992  }
3993  
f2fs_save_errors(struct f2fs_sb_info * sbi,unsigned char flag)3994  void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag)
3995  {
3996  	spin_lock(&sbi->error_lock);
3997  	if (!test_bit(flag, (unsigned long *)sbi->errors)) {
3998  		set_bit(flag, (unsigned long *)sbi->errors);
3999  		sbi->error_dirty = true;
4000  	}
4001  	spin_unlock(&sbi->error_lock);
4002  }
4003  
f2fs_update_errors(struct f2fs_sb_info * sbi)4004  static bool f2fs_update_errors(struct f2fs_sb_info *sbi)
4005  {
4006  	bool need_update = false;
4007  
4008  	spin_lock(&sbi->error_lock);
4009  	if (sbi->error_dirty) {
4010  		memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors,
4011  							MAX_F2FS_ERRORS);
4012  		sbi->error_dirty = false;
4013  		need_update = true;
4014  	}
4015  	spin_unlock(&sbi->error_lock);
4016  
4017  	return need_update;
4018  }
4019  
f2fs_handle_error(struct f2fs_sb_info * sbi,unsigned char error)4020  void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error)
4021  {
4022  	int err;
4023  
4024  	f2fs_save_errors(sbi, error);
4025  
4026  	f2fs_down_write(&sbi->sb_lock);
4027  
4028  	if (!f2fs_update_errors(sbi))
4029  		goto out_unlock;
4030  
4031  	err = f2fs_commit_super(sbi, false);
4032  	if (err)
4033  		f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d",
4034  								error, err);
4035  out_unlock:
4036  	f2fs_up_write(&sbi->sb_lock);
4037  }
4038  
f2fs_scan_devices(struct f2fs_sb_info * sbi)4039  static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
4040  {
4041  	struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4042  	unsigned int max_devices = MAX_DEVICES;
4043  	unsigned int logical_blksize;
4044  	int i;
4045  
4046  	/* Initialize single device information */
4047  	if (!RDEV(0).path[0]) {
4048  		if (!bdev_is_zoned(sbi->sb->s_bdev))
4049  			return 0;
4050  		max_devices = 1;
4051  	}
4052  
4053  	/*
4054  	 * Initialize multiple devices information, or single
4055  	 * zoned block device information.
4056  	 */
4057  	sbi->devs = f2fs_kzalloc(sbi,
4058  				 array_size(max_devices,
4059  					    sizeof(struct f2fs_dev_info)),
4060  				 GFP_KERNEL);
4061  	if (!sbi->devs)
4062  		return -ENOMEM;
4063  
4064  	logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
4065  	sbi->aligned_blksize = true;
4066  
4067  	for (i = 0; i < max_devices; i++) {
4068  
4069  		if (i > 0 && !RDEV(i).path[0])
4070  			break;
4071  
4072  		if (max_devices == 1) {
4073  			/* Single zoned block device mount */
4074  			FDEV(0).bdev =
4075  				blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
4076  					sbi->sb->s_mode, sbi->sb->s_type);
4077  		} else {
4078  			/* Multi-device mount */
4079  			memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
4080  			FDEV(i).total_segments =
4081  				le32_to_cpu(RDEV(i).total_segments);
4082  			if (i == 0) {
4083  				FDEV(i).start_blk = 0;
4084  				FDEV(i).end_blk = FDEV(i).start_blk +
4085  				    (FDEV(i).total_segments <<
4086  				    sbi->log_blocks_per_seg) - 1 +
4087  				    le32_to_cpu(raw_super->segment0_blkaddr);
4088  			} else {
4089  				FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
4090  				FDEV(i).end_blk = FDEV(i).start_blk +
4091  					(FDEV(i).total_segments <<
4092  					sbi->log_blocks_per_seg) - 1;
4093  			}
4094  			FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
4095  					sbi->sb->s_mode, sbi->sb->s_type);
4096  		}
4097  		if (IS_ERR(FDEV(i).bdev))
4098  			return PTR_ERR(FDEV(i).bdev);
4099  
4100  		/* to release errored devices */
4101  		sbi->s_ndevs = i + 1;
4102  
4103  		if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
4104  			sbi->aligned_blksize = false;
4105  
4106  #ifdef CONFIG_BLK_DEV_ZONED
4107  		if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
4108  				!f2fs_sb_has_blkzoned(sbi)) {
4109  			f2fs_err(sbi, "Zoned block device feature not enabled");
4110  			return -EINVAL;
4111  		}
4112  		if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
4113  			if (init_blkz_info(sbi, i)) {
4114  				f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
4115  				return -EINVAL;
4116  			}
4117  			if (max_devices == 1)
4118  				break;
4119  			f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
4120  				  i, FDEV(i).path,
4121  				  FDEV(i).total_segments,
4122  				  FDEV(i).start_blk, FDEV(i).end_blk,
4123  				  bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
4124  				  "Host-aware" : "Host-managed");
4125  			continue;
4126  		}
4127  #endif
4128  		f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
4129  			  i, FDEV(i).path,
4130  			  FDEV(i).total_segments,
4131  			  FDEV(i).start_blk, FDEV(i).end_blk);
4132  	}
4133  	f2fs_info(sbi,
4134  		  "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi));
4135  	return 0;
4136  }
4137  
f2fs_setup_casefold(struct f2fs_sb_info * sbi)4138  static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
4139  {
4140  #ifdef CONFIG_UNICODE
4141  	if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
4142  		const struct f2fs_sb_encodings *encoding_info;
4143  		struct unicode_map *encoding;
4144  		__u16 encoding_flags;
4145  
4146  		if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
4147  					  &encoding_flags)) {
4148  			f2fs_err(sbi,
4149  				 "Encoding requested by superblock is unknown");
4150  			return -EINVAL;
4151  		}
4152  
4153  		encoding = utf8_load(encoding_info->version);
4154  		if (IS_ERR(encoding)) {
4155  			f2fs_err(sbi,
4156  				 "can't mount with superblock charset: %s-%s "
4157  				 "not supported by the kernel. flags: 0x%x.",
4158  				 encoding_info->name, encoding_info->version,
4159  				 encoding_flags);
4160  			return PTR_ERR(encoding);
4161  		}
4162  		f2fs_info(sbi, "Using encoding defined by superblock: "
4163  			 "%s-%s with flags 0x%hx", encoding_info->name,
4164  			 encoding_info->version?:"\b", encoding_flags);
4165  
4166  		sbi->sb->s_encoding = encoding;
4167  		sbi->sb->s_encoding_flags = encoding_flags;
4168  	}
4169  #else
4170  	if (f2fs_sb_has_casefold(sbi)) {
4171  		f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
4172  		return -EINVAL;
4173  	}
4174  #endif
4175  	return 0;
4176  }
4177  
f2fs_tuning_parameters(struct f2fs_sb_info * sbi)4178  static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
4179  {
4180  	/* adjust parameters according to the volume size */
4181  	if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) {
4182  		if (f2fs_block_unit_discard(sbi))
4183  			SM_I(sbi)->dcc_info->discard_granularity =
4184  						MIN_DISCARD_GRANULARITY;
4185  		if (!f2fs_lfs_mode(sbi))
4186  			SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) |
4187  						BIT(F2FS_IPU_HONOR_OPU_WRITE);
4188  	}
4189  
4190  	sbi->readdir_ra = true;
4191  }
4192  
f2fs_fill_super(struct super_block * sb,void * data,int silent)4193  static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
4194  {
4195  	struct f2fs_sb_info *sbi;
4196  	struct f2fs_super_block *raw_super;
4197  	struct inode *root;
4198  	int err;
4199  	bool skip_recovery = false, need_fsck = false;
4200  	char *options = NULL;
4201  	int recovery, i, valid_super_block;
4202  	struct curseg_info *seg_i;
4203  	int retry_cnt = 1;
4204  #ifdef CONFIG_QUOTA
4205  	bool quota_enabled = false;
4206  #endif
4207  
4208  try_onemore:
4209  	err = -EINVAL;
4210  	raw_super = NULL;
4211  	valid_super_block = -1;
4212  	recovery = 0;
4213  
4214  	/* allocate memory for f2fs-specific super block info */
4215  	sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
4216  	if (!sbi)
4217  		return -ENOMEM;
4218  
4219  	sbi->sb = sb;
4220  
4221  	/* initialize locks within allocated memory */
4222  	init_f2fs_rwsem(&sbi->gc_lock);
4223  	mutex_init(&sbi->writepages);
4224  	init_f2fs_rwsem(&sbi->cp_global_sem);
4225  	init_f2fs_rwsem(&sbi->node_write);
4226  	init_f2fs_rwsem(&sbi->node_change);
4227  	spin_lock_init(&sbi->stat_lock);
4228  	init_f2fs_rwsem(&sbi->cp_rwsem);
4229  	init_f2fs_rwsem(&sbi->quota_sem);
4230  	init_waitqueue_head(&sbi->cp_wait);
4231  	spin_lock_init(&sbi->error_lock);
4232  
4233  	for (i = 0; i < NR_INODE_TYPE; i++) {
4234  		INIT_LIST_HEAD(&sbi->inode_list[i]);
4235  		spin_lock_init(&sbi->inode_lock[i]);
4236  	}
4237  	mutex_init(&sbi->flush_lock);
4238  
4239  	/* Load the checksum driver */
4240  	sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
4241  	if (IS_ERR(sbi->s_chksum_driver)) {
4242  		f2fs_err(sbi, "Cannot load crc32 driver.");
4243  		err = PTR_ERR(sbi->s_chksum_driver);
4244  		sbi->s_chksum_driver = NULL;
4245  		goto free_sbi;
4246  	}
4247  
4248  	/* set a block size */
4249  	if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
4250  		f2fs_err(sbi, "unable to set blocksize");
4251  		goto free_sbi;
4252  	}
4253  
4254  	err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
4255  								&recovery);
4256  	if (err)
4257  		goto free_sbi;
4258  
4259  	sb->s_fs_info = sbi;
4260  	sbi->raw_super = raw_super;
4261  
4262  	memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS);
4263  
4264  	/* precompute checksum seed for metadata */
4265  	if (f2fs_sb_has_inode_chksum(sbi))
4266  		sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
4267  						sizeof(raw_super->uuid));
4268  
4269  	default_options(sbi);
4270  	/* parse mount options */
4271  	options = kstrdup((const char *)data, GFP_KERNEL);
4272  	if (data && !options) {
4273  		err = -ENOMEM;
4274  		goto free_sb_buf;
4275  	}
4276  
4277  	err = parse_options(sb, options, false);
4278  	if (err)
4279  		goto free_options;
4280  
4281  	sb->s_maxbytes = max_file_blocks(NULL) <<
4282  				le32_to_cpu(raw_super->log_blocksize);
4283  	sb->s_max_links = F2FS_LINK_MAX;
4284  
4285  	err = f2fs_setup_casefold(sbi);
4286  	if (err)
4287  		goto free_options;
4288  
4289  #ifdef CONFIG_QUOTA
4290  	sb->dq_op = &f2fs_quota_operations;
4291  	sb->s_qcop = &f2fs_quotactl_ops;
4292  	sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
4293  
4294  	if (f2fs_sb_has_quota_ino(sbi)) {
4295  		for (i = 0; i < MAXQUOTAS; i++) {
4296  			if (f2fs_qf_ino(sbi->sb, i))
4297  				sbi->nquota_files++;
4298  		}
4299  	}
4300  #endif
4301  
4302  	sb->s_op = &f2fs_sops;
4303  #ifdef CONFIG_FS_ENCRYPTION
4304  	sb->s_cop = &f2fs_cryptops;
4305  #endif
4306  #ifdef CONFIG_FS_VERITY
4307  	sb->s_vop = &f2fs_verityops;
4308  #endif
4309  	sb->s_xattr = f2fs_xattr_handlers;
4310  	sb->s_export_op = &f2fs_export_ops;
4311  	sb->s_magic = F2FS_SUPER_MAGIC;
4312  	sb->s_time_gran = 1;
4313  	sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
4314  		(test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
4315  	memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
4316  	sb->s_iflags |= SB_I_CGROUPWB;
4317  
4318  	/* init f2fs-specific super block info */
4319  	sbi->valid_super_block = valid_super_block;
4320  
4321  	/* disallow all the data/node/meta page writes */
4322  	set_sbi_flag(sbi, SBI_POR_DOING);
4323  
4324  	err = f2fs_init_write_merge_io(sbi);
4325  	if (err)
4326  		goto free_bio_info;
4327  
4328  	init_sb_info(sbi);
4329  
4330  	err = f2fs_init_iostat(sbi);
4331  	if (err)
4332  		goto free_bio_info;
4333  
4334  	err = init_percpu_info(sbi);
4335  	if (err)
4336  		goto free_iostat;
4337  
4338  	if (F2FS_IO_ALIGNED(sbi)) {
4339  		sbi->write_io_dummy =
4340  			mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
4341  		if (!sbi->write_io_dummy) {
4342  			err = -ENOMEM;
4343  			goto free_percpu;
4344  		}
4345  	}
4346  
4347  	/* init per sbi slab cache */
4348  	err = f2fs_init_xattr_caches(sbi);
4349  	if (err)
4350  		goto free_io_dummy;
4351  	err = f2fs_init_page_array_cache(sbi);
4352  	if (err)
4353  		goto free_xattr_cache;
4354  
4355  	/* get an inode for meta space */
4356  	sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
4357  	if (IS_ERR(sbi->meta_inode)) {
4358  		f2fs_err(sbi, "Failed to read F2FS meta data inode");
4359  		err = PTR_ERR(sbi->meta_inode);
4360  		goto free_page_array_cache;
4361  	}
4362  
4363  	err = f2fs_get_valid_checkpoint(sbi);
4364  	if (err) {
4365  		f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
4366  		goto free_meta_inode;
4367  	}
4368  
4369  	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
4370  		set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
4371  	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
4372  		set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4373  		sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
4374  	}
4375  
4376  	if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
4377  		set_sbi_flag(sbi, SBI_NEED_FSCK);
4378  
4379  	/* Initialize device list */
4380  	err = f2fs_scan_devices(sbi);
4381  	if (err) {
4382  		f2fs_err(sbi, "Failed to find devices");
4383  		goto free_devices;
4384  	}
4385  
4386  	err = f2fs_init_post_read_wq(sbi);
4387  	if (err) {
4388  		f2fs_err(sbi, "Failed to initialize post read workqueue");
4389  		goto free_devices;
4390  	}
4391  
4392  	sbi->total_valid_node_count =
4393  				le32_to_cpu(sbi->ckpt->valid_node_count);
4394  	percpu_counter_set(&sbi->total_valid_inode_count,
4395  				le32_to_cpu(sbi->ckpt->valid_inode_count));
4396  	sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
4397  	sbi->total_valid_block_count =
4398  				le64_to_cpu(sbi->ckpt->valid_block_count);
4399  	sbi->last_valid_block_count = sbi->total_valid_block_count;
4400  	sbi->reserved_blocks = 0;
4401  	sbi->current_reserved_blocks = 0;
4402  	limit_reserve_root(sbi);
4403  	adjust_unusable_cap_perc(sbi);
4404  
4405  	f2fs_init_extent_cache_info(sbi);
4406  
4407  	f2fs_init_ino_entry_info(sbi);
4408  
4409  	f2fs_init_fsync_node_info(sbi);
4410  
4411  	/* setup checkpoint request control and start checkpoint issue thread */
4412  	f2fs_init_ckpt_req_control(sbi);
4413  	if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
4414  			test_opt(sbi, MERGE_CHECKPOINT)) {
4415  		err = f2fs_start_ckpt_thread(sbi);
4416  		if (err) {
4417  			f2fs_err(sbi,
4418  			    "Failed to start F2FS issue_checkpoint_thread (%d)",
4419  			    err);
4420  			goto stop_ckpt_thread;
4421  		}
4422  	}
4423  
4424  	/* setup f2fs internal modules */
4425  	err = f2fs_build_segment_manager(sbi);
4426  	if (err) {
4427  		f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
4428  			 err);
4429  		goto free_sm;
4430  	}
4431  	err = f2fs_build_node_manager(sbi);
4432  	if (err) {
4433  		f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
4434  			 err);
4435  		goto free_nm;
4436  	}
4437  
4438  	err = adjust_reserved_segment(sbi);
4439  	if (err)
4440  		goto free_nm;
4441  
4442  	/* For write statistics */
4443  	sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
4444  
4445  	/* Read accumulated write IO statistics if exists */
4446  	seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
4447  	if (__exist_node_summaries(sbi))
4448  		sbi->kbytes_written =
4449  			le64_to_cpu(seg_i->journal->info.kbytes_written);
4450  
4451  	f2fs_build_gc_manager(sbi);
4452  
4453  	err = f2fs_build_stats(sbi);
4454  	if (err)
4455  		goto free_nm;
4456  
4457  	/* get an inode for node space */
4458  	sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
4459  	if (IS_ERR(sbi->node_inode)) {
4460  		f2fs_err(sbi, "Failed to read node inode");
4461  		err = PTR_ERR(sbi->node_inode);
4462  		goto free_stats;
4463  	}
4464  
4465  	/* read root inode and dentry */
4466  	root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
4467  	if (IS_ERR(root)) {
4468  		f2fs_err(sbi, "Failed to read root inode");
4469  		err = PTR_ERR(root);
4470  		goto free_node_inode;
4471  	}
4472  	if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
4473  			!root->i_size || !root->i_nlink) {
4474  		iput(root);
4475  		err = -EINVAL;
4476  		goto free_node_inode;
4477  	}
4478  
4479  	sb->s_root = d_make_root(root); /* allocate root dentry */
4480  	if (!sb->s_root) {
4481  		err = -ENOMEM;
4482  		goto free_node_inode;
4483  	}
4484  
4485  	err = f2fs_init_compress_inode(sbi);
4486  	if (err)
4487  		goto free_root_inode;
4488  
4489  	err = f2fs_register_sysfs(sbi);
4490  	if (err)
4491  		goto free_compress_inode;
4492  
4493  #ifdef CONFIG_QUOTA
4494  	/* Enable quota usage during mount */
4495  	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
4496  		err = f2fs_enable_quotas(sb);
4497  		if (err)
4498  			f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
4499  	}
4500  
4501  	quota_enabled = f2fs_recover_quota_begin(sbi);
4502  #endif
4503  	/* if there are any orphan inodes, free them */
4504  	err = f2fs_recover_orphan_inodes(sbi);
4505  	if (err)
4506  		goto free_meta;
4507  
4508  	if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
4509  		goto reset_checkpoint;
4510  
4511  	/* recover fsynced data */
4512  	if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
4513  			!test_opt(sbi, NORECOVERY)) {
4514  		/*
4515  		 * mount should be failed, when device has readonly mode, and
4516  		 * previous checkpoint was not done by clean system shutdown.
4517  		 */
4518  		if (f2fs_hw_is_readonly(sbi)) {
4519  			if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4520  				err = f2fs_recover_fsync_data(sbi, true);
4521  				if (err > 0) {
4522  					err = -EROFS;
4523  					f2fs_err(sbi, "Need to recover fsync data, but "
4524  						"write access unavailable, please try "
4525  						"mount w/ disable_roll_forward or norecovery");
4526  				}
4527  				if (err < 0)
4528  					goto free_meta;
4529  			}
4530  			f2fs_info(sbi, "write access unavailable, skipping recovery");
4531  			goto reset_checkpoint;
4532  		}
4533  
4534  		if (need_fsck)
4535  			set_sbi_flag(sbi, SBI_NEED_FSCK);
4536  
4537  		if (skip_recovery)
4538  			goto reset_checkpoint;
4539  
4540  		err = f2fs_recover_fsync_data(sbi, false);
4541  		if (err < 0) {
4542  			if (err != -ENOMEM)
4543  				skip_recovery = true;
4544  			need_fsck = true;
4545  			f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
4546  				 err);
4547  			goto free_meta;
4548  		}
4549  	} else {
4550  		err = f2fs_recover_fsync_data(sbi, true);
4551  
4552  		if (!f2fs_readonly(sb) && err > 0) {
4553  			err = -EINVAL;
4554  			f2fs_err(sbi, "Need to recover fsync data");
4555  			goto free_meta;
4556  		}
4557  	}
4558  
4559  #ifdef CONFIG_QUOTA
4560  	f2fs_recover_quota_end(sbi, quota_enabled);
4561  #endif
4562  
4563  	/*
4564  	 * If the f2fs is not readonly and fsync data recovery succeeds,
4565  	 * check zoned block devices' write pointer consistency.
4566  	 */
4567  	if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
4568  		err = f2fs_check_write_pointer(sbi);
4569  		if (err)
4570  			goto free_meta;
4571  	}
4572  
4573  reset_checkpoint:
4574  	f2fs_init_inmem_curseg(sbi);
4575  
4576  	/* f2fs_recover_fsync_data() cleared this already */
4577  	clear_sbi_flag(sbi, SBI_POR_DOING);
4578  
4579  	if (test_opt(sbi, DISABLE_CHECKPOINT)) {
4580  		err = f2fs_disable_checkpoint(sbi);
4581  		if (err)
4582  			goto sync_free_meta;
4583  	} else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
4584  		f2fs_enable_checkpoint(sbi);
4585  	}
4586  
4587  	/*
4588  	 * If filesystem is not mounted as read-only then
4589  	 * do start the gc_thread.
4590  	 */
4591  	if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
4592  		test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
4593  		/* After POR, we can run background GC thread.*/
4594  		err = f2fs_start_gc_thread(sbi);
4595  		if (err)
4596  			goto sync_free_meta;
4597  	}
4598  	kvfree(options);
4599  
4600  	/* recover broken superblock */
4601  	if (recovery) {
4602  		err = f2fs_commit_super(sbi, true);
4603  		f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
4604  			  sbi->valid_super_block ? 1 : 2, err);
4605  	}
4606  
4607  	f2fs_join_shrinker(sbi);
4608  
4609  	f2fs_tuning_parameters(sbi);
4610  
4611  	f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
4612  		    cur_cp_version(F2FS_CKPT(sbi)));
4613  	f2fs_update_time(sbi, CP_TIME);
4614  	f2fs_update_time(sbi, REQ_TIME);
4615  	clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
4616  
4617  	cleancache_init_fs(sb);
4618  	return 0;
4619  
4620  sync_free_meta:
4621  	/* safe to flush all the data */
4622  	sync_filesystem(sbi->sb);
4623  	retry_cnt = 0;
4624  
4625  free_meta:
4626  #ifdef CONFIG_QUOTA
4627  	f2fs_truncate_quota_inode_pages(sb);
4628  	if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
4629  		f2fs_quota_off_umount(sbi->sb);
4630  #endif
4631  	/*
4632  	 * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
4633  	 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
4634  	 * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
4635  	 * falls into an infinite loop in f2fs_sync_meta_pages().
4636  	 */
4637  	truncate_inode_pages_final(META_MAPPING(sbi));
4638  	/* evict some inodes being cached by GC */
4639  	evict_inodes(sb);
4640  	f2fs_unregister_sysfs(sbi);
4641  free_compress_inode:
4642  	f2fs_destroy_compress_inode(sbi);
4643  free_root_inode:
4644  	dput(sb->s_root);
4645  	sb->s_root = NULL;
4646  free_node_inode:
4647  	f2fs_release_ino_entry(sbi, true);
4648  	truncate_inode_pages_final(NODE_MAPPING(sbi));
4649  	iput(sbi->node_inode);
4650  	sbi->node_inode = NULL;
4651  free_stats:
4652  	f2fs_destroy_stats(sbi);
4653  free_nm:
4654  	/* stop discard thread before destroying node manager */
4655  	f2fs_stop_discard_thread(sbi);
4656  	f2fs_destroy_node_manager(sbi);
4657  free_sm:
4658  	f2fs_destroy_segment_manager(sbi);
4659  stop_ckpt_thread:
4660  	f2fs_stop_ckpt_thread(sbi);
4661  	f2fs_destroy_post_read_wq(sbi);
4662  free_devices:
4663  	destroy_device_list(sbi);
4664  	kvfree(sbi->ckpt);
4665  free_meta_inode:
4666  	make_bad_inode(sbi->meta_inode);
4667  	iput(sbi->meta_inode);
4668  	sbi->meta_inode = NULL;
4669  free_page_array_cache:
4670  	f2fs_destroy_page_array_cache(sbi);
4671  free_xattr_cache:
4672  	f2fs_destroy_xattr_caches(sbi);
4673  free_io_dummy:
4674  	mempool_destroy(sbi->write_io_dummy);
4675  free_percpu:
4676  	destroy_percpu_info(sbi);
4677  free_iostat:
4678  	f2fs_destroy_iostat(sbi);
4679  free_bio_info:
4680  	for (i = 0; i < NR_PAGE_TYPE; i++)
4681  		kvfree(sbi->write_io[i]);
4682  
4683  #ifdef CONFIG_UNICODE
4684  	utf8_unload(sb->s_encoding);
4685  	sb->s_encoding = NULL;
4686  #endif
4687  free_options:
4688  #ifdef CONFIG_QUOTA
4689  	for (i = 0; i < MAXQUOTAS; i++)
4690  		kfree(F2FS_OPTION(sbi).s_qf_names[i]);
4691  #endif
4692  	fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
4693  	kvfree(options);
4694  free_sb_buf:
4695  	kfree(raw_super);
4696  free_sbi:
4697  	if (sbi->s_chksum_driver)
4698  		crypto_free_shash(sbi->s_chksum_driver);
4699  	kfree(sbi);
4700  
4701  	/* give only one another chance */
4702  	if (retry_cnt > 0 && skip_recovery) {
4703  		retry_cnt--;
4704  		shrink_dcache_sb(sb);
4705  		goto try_onemore;
4706  	}
4707  	return err;
4708  }
4709  
f2fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)4710  static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
4711  			const char *dev_name, void *data)
4712  {
4713  	return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
4714  }
4715  
kill_f2fs_super(struct super_block * sb)4716  static void kill_f2fs_super(struct super_block *sb)
4717  {
4718  	if (sb->s_root) {
4719  		struct f2fs_sb_info *sbi = F2FS_SB(sb);
4720  
4721  		set_sbi_flag(sbi, SBI_IS_CLOSE);
4722  		f2fs_stop_gc_thread(sbi);
4723  		f2fs_stop_discard_thread(sbi);
4724  
4725  #ifdef CONFIG_F2FS_FS_COMPRESSION
4726  		/*
4727  		 * latter evict_inode() can bypass checking and invalidating
4728  		 * compress inode cache.
4729  		 */
4730  		if (test_opt(sbi, COMPRESS_CACHE))
4731  			truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
4732  #endif
4733  
4734  		if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
4735  				!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
4736  			struct cp_control cpc = {
4737  				.reason = CP_UMOUNT,
4738  			};
4739  			f2fs_write_checkpoint(sbi, &cpc);
4740  		}
4741  
4742  		if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
4743  			sb->s_flags &= ~SB_RDONLY;
4744  	}
4745  	kill_block_super(sb);
4746  }
4747  
4748  static struct file_system_type f2fs_fs_type = {
4749  	.owner		= THIS_MODULE,
4750  	.name		= "f2fs",
4751  	.mount		= f2fs_mount,
4752  	.kill_sb	= kill_f2fs_super,
4753  	.fs_flags	= FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
4754  };
4755  MODULE_ALIAS_FS("f2fs");
4756  
init_inodecache(void)4757  static int __init init_inodecache(void)
4758  {
4759  	f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
4760  			sizeof(struct f2fs_inode_info), 0,
4761  			SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
4762  	return f2fs_inode_cachep ? 0 : -ENOMEM;
4763  }
4764  
destroy_inodecache(void)4765  static void destroy_inodecache(void)
4766  {
4767  	/*
4768  	 * Make sure all delayed rcu free inodes are flushed before we
4769  	 * destroy cache.
4770  	 */
4771  	rcu_barrier();
4772  	kmem_cache_destroy(f2fs_inode_cachep);
4773  }
4774  
init_f2fs_fs(void)4775  static int __init init_f2fs_fs(void)
4776  {
4777  	int err;
4778  
4779  	if (PAGE_SIZE != F2FS_BLKSIZE) {
4780  		printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
4781  				PAGE_SIZE, F2FS_BLKSIZE);
4782  		return -EINVAL;
4783  	}
4784  
4785  	err = init_inodecache();
4786  	if (err)
4787  		goto fail;
4788  	err = f2fs_create_node_manager_caches();
4789  	if (err)
4790  		goto free_inodecache;
4791  	err = f2fs_create_segment_manager_caches();
4792  	if (err)
4793  		goto free_node_manager_caches;
4794  	err = f2fs_create_checkpoint_caches();
4795  	if (err)
4796  		goto free_segment_manager_caches;
4797  	err = f2fs_create_recovery_cache();
4798  	if (err)
4799  		goto free_checkpoint_caches;
4800  	err = f2fs_create_extent_cache();
4801  	if (err)
4802  		goto free_recovery_cache;
4803  	err = f2fs_create_garbage_collection_cache();
4804  	if (err)
4805  		goto free_extent_cache;
4806  	err = f2fs_init_sysfs();
4807  	if (err)
4808  		goto free_garbage_collection_cache;
4809  	err = register_shrinker(&f2fs_shrinker_info);
4810  	if (err)
4811  		goto free_sysfs;
4812  	err = register_filesystem(&f2fs_fs_type);
4813  	if (err)
4814  		goto free_shrinker;
4815  	f2fs_create_root_stats();
4816  	err = f2fs_init_post_read_processing();
4817  	if (err)
4818  		goto free_root_stats;
4819  	err = f2fs_init_iostat_processing();
4820  	if (err)
4821  		goto free_post_read;
4822  	err = f2fs_init_bio_entry_cache();
4823  	if (err)
4824  		goto free_iostat;
4825  	err = f2fs_init_bioset();
4826  	if (err)
4827  		goto free_bio_entry_cache;
4828  	err = f2fs_init_compress_mempool();
4829  	if (err)
4830  		goto free_bioset;
4831  	err = f2fs_init_compress_cache();
4832  	if (err)
4833  		goto free_compress_mempool;
4834  	err = f2fs_create_casefold_cache();
4835  	if (err)
4836  		goto free_compress_cache;
4837  	return 0;
4838  free_compress_cache:
4839  	f2fs_destroy_compress_cache();
4840  free_compress_mempool:
4841  	f2fs_destroy_compress_mempool();
4842  free_bioset:
4843  	f2fs_destroy_bioset();
4844  free_bio_entry_cache:
4845  	f2fs_destroy_bio_entry_cache();
4846  free_iostat:
4847  	f2fs_destroy_iostat_processing();
4848  free_post_read:
4849  	f2fs_destroy_post_read_processing();
4850  free_root_stats:
4851  	f2fs_destroy_root_stats();
4852  	unregister_filesystem(&f2fs_fs_type);
4853  free_shrinker:
4854  	unregister_shrinker(&f2fs_shrinker_info);
4855  free_sysfs:
4856  	f2fs_exit_sysfs();
4857  free_garbage_collection_cache:
4858  	f2fs_destroy_garbage_collection_cache();
4859  free_extent_cache:
4860  	f2fs_destroy_extent_cache();
4861  free_recovery_cache:
4862  	f2fs_destroy_recovery_cache();
4863  free_checkpoint_caches:
4864  	f2fs_destroy_checkpoint_caches();
4865  free_segment_manager_caches:
4866  	f2fs_destroy_segment_manager_caches();
4867  free_node_manager_caches:
4868  	f2fs_destroy_node_manager_caches();
4869  free_inodecache:
4870  	destroy_inodecache();
4871  fail:
4872  	return err;
4873  }
4874  
exit_f2fs_fs(void)4875  static void __exit exit_f2fs_fs(void)
4876  {
4877  	f2fs_destroy_casefold_cache();
4878  	f2fs_destroy_compress_cache();
4879  	f2fs_destroy_compress_mempool();
4880  	f2fs_destroy_bioset();
4881  	f2fs_destroy_bio_entry_cache();
4882  	f2fs_destroy_iostat_processing();
4883  	f2fs_destroy_post_read_processing();
4884  	f2fs_destroy_root_stats();
4885  	unregister_filesystem(&f2fs_fs_type);
4886  	unregister_shrinker(&f2fs_shrinker_info);
4887  	f2fs_exit_sysfs();
4888  	f2fs_destroy_garbage_collection_cache();
4889  	f2fs_destroy_extent_cache();
4890  	f2fs_destroy_recovery_cache();
4891  	f2fs_destroy_checkpoint_caches();
4892  	f2fs_destroy_segment_manager_caches();
4893  	f2fs_destroy_node_manager_caches();
4894  	destroy_inodecache();
4895  }
4896  
4897  module_init(init_f2fs_fs)
4898  module_exit(exit_f2fs_fs)
4899  
4900  MODULE_AUTHOR("Samsung Electronics's Praesto Team");
4901  MODULE_DESCRIPTION("Flash Friendly File System");
4902  MODULE_LICENSE("GPL");
4903  MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
4904  MODULE_SOFTDEP("pre: crc32");
4905  
4906