Lines Matching full:sbi
67 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, in f2fs_build_fault_attr() argument
70 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr()
251 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) in f2fs_printk() argument
263 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk()
307 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument
309 block_t limit = min((sbi->user_block_count >> 3), in limit_reserve_root()
310 sbi->user_block_count - sbi->reserved_blocks); in limit_reserve_root()
313 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
314 F2FS_OPTION(sbi).root_reserved_blocks > limit) { in limit_reserve_root()
315 F2FS_OPTION(sbi).root_reserved_blocks = limit; in limit_reserve_root()
316 f2fs_info(sbi, "Reduce reserved blocks for root = %u", in limit_reserve_root()
317 F2FS_OPTION(sbi).root_reserved_blocks); in limit_reserve_root()
319 if (!test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
320 (!uid_eq(F2FS_OPTION(sbi).s_resuid, in limit_reserve_root()
322 !gid_eq(F2FS_OPTION(sbi).s_resgid, in limit_reserve_root()
324 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", in limit_reserve_root()
326 F2FS_OPTION(sbi).s_resuid), in limit_reserve_root()
328 F2FS_OPTION(sbi).s_resgid)); in limit_reserve_root()
331 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi) in adjust_reserved_segment() argument
333 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec; in adjust_reserved_segment()
338 if (!F2FS_IO_ALIGNED(sbi)) in adjust_reserved_segment()
342 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi); in adjust_reserved_segment()
347 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) * in adjust_reserved_segment()
348 reserved_segments(sbi); in adjust_reserved_segment()
349 wanted_reserved_segments -= reserved_segments(sbi); in adjust_reserved_segment()
351 avail_user_block_count = sbi->user_block_count - in adjust_reserved_segment()
352 sbi->current_reserved_blocks - in adjust_reserved_segment()
353 F2FS_OPTION(sbi).root_reserved_blocks; in adjust_reserved_segment()
355 if (wanted_reserved_segments * sbi->blocks_per_seg > in adjust_reserved_segment()
357 …f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u… in adjust_reserved_segment()
359 avail_user_block_count >> sbi->log_blocks_per_seg); in adjust_reserved_segment()
363 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments; in adjust_reserved_segment()
365 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u", in adjust_reserved_segment()
371 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) in adjust_unusable_cap_perc() argument
373 if (!F2FS_OPTION(sbi).unusable_cap_perc) in adjust_unusable_cap_perc()
376 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) in adjust_unusable_cap_perc()
377 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; in adjust_unusable_cap_perc()
379 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * in adjust_unusable_cap_perc()
380 F2FS_OPTION(sbi).unusable_cap_perc; in adjust_unusable_cap_perc()
382 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", in adjust_unusable_cap_perc()
383 F2FS_OPTION(sbi).unusable_cap, in adjust_unusable_cap_perc()
384 F2FS_OPTION(sbi).unusable_cap_perc); in adjust_unusable_cap_perc()
400 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_qf_name() local
404 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
405 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_set_qf_name()
408 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_set_qf_name()
409 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); in f2fs_set_qf_name()
415 f2fs_err(sbi, "Not enough memory for storing quotafile name"); in f2fs_set_qf_name()
418 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
419 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) in f2fs_set_qf_name()
422 f2fs_err(sbi, "%s quota file already specified", in f2fs_set_qf_name()
427 f2fs_err(sbi, "quotafile must be on filesystem root"); in f2fs_set_qf_name()
430 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; in f2fs_set_qf_name()
431 set_opt(sbi, QUOTA); in f2fs_set_qf_name()
440 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_clear_qf_name() local
442 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_clear_qf_name()
443 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_clear_qf_name()
446 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); in f2fs_clear_qf_name()
447 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; in f2fs_clear_qf_name()
451 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) in f2fs_check_quota_options() argument
458 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { in f2fs_check_quota_options()
459 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); in f2fs_check_quota_options()
462 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || in f2fs_check_quota_options()
463 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || in f2fs_check_quota_options()
464 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { in f2fs_check_quota_options()
465 if (test_opt(sbi, USRQUOTA) && in f2fs_check_quota_options()
466 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_check_quota_options()
467 clear_opt(sbi, USRQUOTA); in f2fs_check_quota_options()
469 if (test_opt(sbi, GRPQUOTA) && in f2fs_check_quota_options()
470 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_check_quota_options()
471 clear_opt(sbi, GRPQUOTA); in f2fs_check_quota_options()
473 if (test_opt(sbi, PRJQUOTA) && in f2fs_check_quota_options()
474 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_check_quota_options()
475 clear_opt(sbi, PRJQUOTA); in f2fs_check_quota_options()
477 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || in f2fs_check_quota_options()
478 test_opt(sbi, PRJQUOTA)) { in f2fs_check_quota_options()
479 f2fs_err(sbi, "old and new quota format mixing"); in f2fs_check_quota_options()
483 if (!F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
484 f2fs_err(sbi, "journaled quota format not specified"); in f2fs_check_quota_options()
489 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
490 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); in f2fs_check_quota_options()
491 F2FS_OPTION(sbi).s_jquota_fmt = 0; in f2fs_check_quota_options()
502 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_test_dummy_encryption() local
508 &F2FS_OPTION(sbi).dummy_enc_policy; in f2fs_set_test_dummy_encryption()
512 f2fs_warn(sbi, "test_dummy_encryption option not supported"); in f2fs_set_test_dummy_encryption()
516 if (!f2fs_sb_has_encrypt(sbi)) { in f2fs_set_test_dummy_encryption()
517 f2fs_err(sbi, "Encrypt feature is off"); in f2fs_set_test_dummy_encryption()
528 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); in f2fs_set_test_dummy_encryption()
535 f2fs_warn(sbi, in f2fs_set_test_dummy_encryption()
538 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", in f2fs_set_test_dummy_encryption()
541 f2fs_warn(sbi, "Error processing option \"%s\" [%d]", in f2fs_set_test_dummy_encryption()
545 f2fs_warn(sbi, "Test dummy encryption mode enabled"); in f2fs_set_test_dummy_encryption()
550 static bool is_compress_extension_exist(struct f2fs_sb_info *sbi, in is_compress_extension_exist() argument
558 ext = F2FS_OPTION(sbi).extensions; in is_compress_extension_exist()
559 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in is_compress_extension_exist()
561 ext = F2FS_OPTION(sbi).noextensions; in is_compress_extension_exist()
562 ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in is_compress_extension_exist()
580 static int f2fs_test_compress_extension(struct f2fs_sb_info *sbi) in f2fs_test_compress_extension() argument
586 ext = F2FS_OPTION(sbi).extensions; in f2fs_test_compress_extension()
587 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in f2fs_test_compress_extension()
588 noext = F2FS_OPTION(sbi).noextensions; in f2fs_test_compress_extension()
589 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in f2fs_test_compress_extension()
596 f2fs_info(sbi, "Don't allow the nocompress extension specifies all files"); in f2fs_test_compress_extension()
601 …f2fs_info(sbi, "Don't allow the same extension %s appear in both compress and nocompress extension… in f2fs_test_compress_extension()
611 static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_lz4hc_level() argument
617 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
624 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_lz4hc_level()
631 f2fs_info(sbi, "invalid lz4hc compress level: %d", level); in f2fs_set_lz4hc_level()
635 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_lz4hc_level()
639 F2FS_OPTION(sbi).compress_level = 0; in f2fs_set_lz4hc_level()
642 f2fs_info(sbi, "kernel doesn't support lz4hc compression"); in f2fs_set_lz4hc_level()
649 static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str) in f2fs_set_zstd_level() argument
655 F2FS_OPTION(sbi).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL; in f2fs_set_zstd_level()
662 f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>"); in f2fs_set_zstd_level()
669 f2fs_info(sbi, "invalid zstd compress level: %d", level); in f2fs_set_zstd_level()
673 F2FS_OPTION(sbi).compress_level = level; in f2fs_set_zstd_level()
681 struct f2fs_sb_info *sbi = F2FS_SB(sb); in parse_options() local
716 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in parse_options()
718 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; in parse_options()
720 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; in parse_options()
728 set_opt(sbi, DISABLE_ROLL_FORWARD); in parse_options()
732 set_opt(sbi, NORECOVERY); in parse_options()
737 if (!f2fs_hw_support_discard(sbi)) { in parse_options()
738 f2fs_warn(sbi, "device does not support discard"); in parse_options()
741 set_opt(sbi, DISCARD); in parse_options()
744 if (f2fs_hw_should_discard(sbi)) { in parse_options()
745 f2fs_warn(sbi, "discard is required for zoned block devices"); in parse_options()
748 clear_opt(sbi, DISCARD); in parse_options()
751 set_opt(sbi, NOHEAP); in parse_options()
754 clear_opt(sbi, NOHEAP); in parse_options()
758 set_opt(sbi, XATTR_USER); in parse_options()
761 clear_opt(sbi, XATTR_USER); in parse_options()
764 set_opt(sbi, INLINE_XATTR); in parse_options()
767 clear_opt(sbi, INLINE_XATTR); in parse_options()
772 set_opt(sbi, INLINE_XATTR_SIZE); in parse_options()
773 F2FS_OPTION(sbi).inline_xattr_size = arg; in parse_options()
777 f2fs_info(sbi, "user_xattr options not supported"); in parse_options()
780 f2fs_info(sbi, "nouser_xattr options not supported"); in parse_options()
783 f2fs_info(sbi, "inline_xattr options not supported"); in parse_options()
786 f2fs_info(sbi, "noinline_xattr options not supported"); in parse_options()
791 set_opt(sbi, POSIX_ACL); in parse_options()
794 clear_opt(sbi, POSIX_ACL); in parse_options()
798 f2fs_info(sbi, "acl options not supported"); in parse_options()
801 f2fs_info(sbi, "noacl options not supported"); in parse_options()
810 F2FS_OPTION(sbi).active_logs = arg; in parse_options()
813 set_opt(sbi, DISABLE_EXT_IDENTIFY); in parse_options()
816 set_opt(sbi, INLINE_DATA); in parse_options()
819 set_opt(sbi, INLINE_DENTRY); in parse_options()
822 clear_opt(sbi, INLINE_DENTRY); in parse_options()
825 set_opt(sbi, FLUSH_MERGE); in parse_options()
828 clear_opt(sbi, FLUSH_MERGE); in parse_options()
831 set_opt(sbi, NOBARRIER); in parse_options()
834 clear_opt(sbi, NOBARRIER); in parse_options()
837 set_opt(sbi, FASTBOOT); in parse_options()
840 set_opt(sbi, READ_EXTENT_CACHE); in parse_options()
843 clear_opt(sbi, READ_EXTENT_CACHE); in parse_options()
846 clear_opt(sbi, INLINE_DATA); in parse_options()
849 set_opt(sbi, DATA_FLUSH); in parse_options()
854 if (test_opt(sbi, RESERVE_ROOT)) { in parse_options()
855 f2fs_info(sbi, "Preserve previous reserve_root=%u", in parse_options()
856 F2FS_OPTION(sbi).root_reserved_blocks); in parse_options()
858 F2FS_OPTION(sbi).root_reserved_blocks = arg; in parse_options()
859 set_opt(sbi, RESERVE_ROOT); in parse_options()
867 f2fs_err(sbi, "Invalid uid value %d", arg); in parse_options()
870 F2FS_OPTION(sbi).s_resuid = uid; in parse_options()
877 f2fs_err(sbi, "Invalid gid value %d", arg); in parse_options()
880 F2FS_OPTION(sbi).s_resgid = gid; in parse_options()
888 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in parse_options()
890 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in parse_options()
892 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG; in parse_options()
894 F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK; in parse_options()
905 f2fs_warn(sbi, "Not support %ld, larger than %d", in parse_options()
909 F2FS_OPTION(sbi).write_io_size_bits = arg; in parse_options()
915 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE); in parse_options()
916 set_opt(sbi, FAULT_INJECTION); in parse_options()
922 f2fs_build_fault_attr(sbi, 0, arg); in parse_options()
923 set_opt(sbi, FAULT_INJECTION); in parse_options()
927 f2fs_info(sbi, "fault_injection options not supported"); in parse_options()
931 f2fs_info(sbi, "fault_type options not supported"); in parse_options()
943 set_opt(sbi, USRQUOTA); in parse_options()
946 set_opt(sbi, GRPQUOTA); in parse_options()
949 set_opt(sbi, PRJQUOTA); in parse_options()
982 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; in parse_options()
985 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; in parse_options()
988 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; in parse_options()
991 clear_opt(sbi, QUOTA); in parse_options()
992 clear_opt(sbi, USRQUOTA); in parse_options()
993 clear_opt(sbi, GRPQUOTA); in parse_options()
994 clear_opt(sbi, PRJQUOTA); in parse_options()
1011 f2fs_info(sbi, "quota operations not supported"); in parse_options()
1020 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in parse_options()
1022 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in parse_options()
1034 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in parse_options()
1036 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; in parse_options()
1038 F2FS_OPTION(sbi).fsync_mode = in parse_options()
1056 f2fs_info(sbi, "inline encryption not supported"); in parse_options()
1064 F2FS_OPTION(sbi).unusable_cap_perc = arg; in parse_options()
1065 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1070 F2FS_OPTION(sbi).unusable_cap = arg; in parse_options()
1071 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1074 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1077 clear_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
1080 set_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1083 clear_opt(sbi, MERGE_CHECKPOINT); in parse_options()
1087 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1088 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1096 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1097 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1100 f2fs_info(sbi, "kernel doesn't support lzo compression"); in parse_options()
1104 ret = f2fs_set_lz4hc_level(sbi, name); in parse_options()
1109 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1112 f2fs_info(sbi, "kernel doesn't support lz4 compression"); in parse_options()
1116 ret = f2fs_set_zstd_level(sbi, name); in parse_options()
1121 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1124 f2fs_info(sbi, "kernel doesn't support zstd compression"); in parse_options()
1128 F2FS_OPTION(sbi).compress_level = 0; in parse_options()
1129 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
1132 f2fs_info(sbi, "kernel doesn't support lzorle compression"); in parse_options()
1141 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1142 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1149 f2fs_err(sbi, in parse_options()
1153 F2FS_OPTION(sbi).compress_log_size = arg; in parse_options()
1156 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1157 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1164 ext = F2FS_OPTION(sbi).extensions; in parse_options()
1165 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in parse_options()
1169 f2fs_err(sbi, in parse_options()
1175 if (is_compress_extension_exist(sbi, name, true)) { in parse_options()
1181 F2FS_OPTION(sbi).compress_ext_cnt++; in parse_options()
1185 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1186 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1193 noext = F2FS_OPTION(sbi).noextensions; in parse_options()
1194 noext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt; in parse_options()
1198 f2fs_err(sbi, in parse_options()
1204 if (is_compress_extension_exist(sbi, name, false)) { in parse_options()
1210 F2FS_OPTION(sbi).nocompress_ext_cnt++; in parse_options()
1214 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1215 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1218 F2FS_OPTION(sbi).compress_chksum = true; in parse_options()
1221 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1222 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1229 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in parse_options()
1231 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER; in parse_options()
1239 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
1240 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
1243 set_opt(sbi, COMPRESS_CACHE); in parse_options()
1253 f2fs_info(sbi, "compression options not supported"); in parse_options()
1257 set_opt(sbi, ATGC); in parse_options()
1260 set_opt(sbi, GC_MERGE); in parse_options()
1263 clear_opt(sbi, GC_MERGE); in parse_options()
1270 F2FS_OPTION(sbi).discard_unit = in parse_options()
1273 F2FS_OPTION(sbi).discard_unit = in parse_options()
1276 F2FS_OPTION(sbi).discard_unit = in parse_options()
1289 F2FS_OPTION(sbi).memory_mode = in parse_options()
1292 F2FS_OPTION(sbi).memory_mode = in parse_options()
1301 set_opt(sbi, AGE_EXTENT_CACHE); in parse_options()
1308 F2FS_OPTION(sbi).errors = in parse_options()
1311 F2FS_OPTION(sbi).errors = in parse_options()
1314 F2FS_OPTION(sbi).errors = in parse_options()
1323 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", in parse_options()
1330 if (f2fs_check_quota_options(sbi)) in parse_options()
1333 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1334 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1337 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1338 …f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1343 if (f2fs_sb_has_casefold(sbi)) { in parse_options()
1344 f2fs_err(sbi, in parse_options()
1354 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
1356 if (F2FS_OPTION(sbi).discard_unit != in parse_options()
1358 …f2fs_info(sbi, "Zoned block device doesn't need small discard, set discard_unit=section by default… in parse_options()
1359 F2FS_OPTION(sbi).discard_unit = in parse_options()
1363 if (F2FS_OPTION(sbi).fs_mode != FS_MODE_LFS) { in parse_options()
1364 f2fs_info(sbi, "Only lfs mode is allowed with zoned block device feature"); in parse_options()
1368 f2fs_err(sbi, "Zoned block device support is not enabled"); in parse_options()
1374 if (f2fs_test_compress_extension(sbi)) { in parse_options()
1375 f2fs_err(sbi, "invalid compress or nocompress extension"); in parse_options()
1380 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) { in parse_options()
1381 f2fs_err(sbi, "Should set mode=lfs with %luKB-sized IO", in parse_options()
1382 F2FS_IO_SIZE_KB(sbi)); in parse_options()
1386 if (test_opt(sbi, INLINE_XATTR_SIZE)) { in parse_options()
1389 if (!f2fs_sb_has_extra_attr(sbi) || in parse_options()
1390 !f2fs_sb_has_flexible_inline_xattr(sbi)) { in parse_options()
1391 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); in parse_options()
1394 if (!test_opt(sbi, INLINE_XATTR)) { in parse_options()
1395 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); in parse_options()
1402 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || in parse_options()
1403 F2FS_OPTION(sbi).inline_xattr_size > max_size) { in parse_options()
1404 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", in parse_options()
1410 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) { in parse_options()
1411 f2fs_err(sbi, "LFS is not compatible with checkpoint=disable"); in parse_options()
1415 if (test_opt(sbi, ATGC) && f2fs_lfs_mode(sbi)) { in parse_options()
1416 f2fs_err(sbi, "LFS is not compatible with ATGC"); in parse_options()
1420 if (f2fs_is_readonly(sbi) && test_opt(sbi, FLUSH_MERGE)) { in parse_options()
1421 f2fs_err(sbi, "FLUSH_MERGE not compatible with readonly mode"); in parse_options()
1425 if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1426 f2fs_err(sbi, "Allow to mount readonly mode only"); in parse_options()
1464 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_drop_inode() local
1471 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_drop_inode()
1472 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_drop_inode()
1473 inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_drop_inode()
1522 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_dirtied() local
1525 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1530 stat_inc_dirty_inode(sbi, DIRTY_META); in f2fs_inode_dirtied()
1534 &sbi->inode_list[DIRTY_META]); in f2fs_inode_dirtied()
1535 inc_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_dirtied()
1537 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1543 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_synced() local
1545 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1547 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1552 dec_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_synced()
1557 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1567 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dirty_inode() local
1569 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_dirty_inode()
1570 inode->i_ino == F2FS_META_INO(sbi)) in f2fs_dirty_inode()
1585 static void destroy_percpu_info(struct f2fs_sb_info *sbi) in destroy_percpu_info() argument
1587 percpu_counter_destroy(&sbi->total_valid_inode_count); in destroy_percpu_info()
1588 percpu_counter_destroy(&sbi->rf_node_block_count); in destroy_percpu_info()
1589 percpu_counter_destroy(&sbi->alloc_valid_block_count); in destroy_percpu_info()
1592 static void destroy_device_list(struct f2fs_sb_info *sbi) in destroy_device_list() argument
1596 for (i = 0; i < sbi->s_ndevs; i++) { in destroy_device_list()
1598 blkdev_put(FDEV(i).bdev, sbi->sb); in destroy_device_list()
1603 kvfree(sbi->devs); in destroy_device_list()
1608 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_put_super() local
1614 f2fs_unregister_sysfs(sbi); in f2fs_put_super()
1619 mutex_lock(&sbi->umount_mutex); in f2fs_put_super()
1625 f2fs_stop_ckpt_thread(sbi); in f2fs_put_super()
1632 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in f2fs_put_super()
1633 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) { in f2fs_put_super()
1637 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1638 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1642 done = f2fs_issue_discard_timeout(sbi); in f2fs_put_super()
1643 if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) { in f2fs_put_super()
1647 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_put_super()
1648 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1655 f2fs_release_ino_entry(sbi, true); in f2fs_put_super()
1657 f2fs_leave_shrinker(sbi); in f2fs_put_super()
1658 mutex_unlock(&sbi->umount_mutex); in f2fs_put_super()
1661 f2fs_flush_merged_writes(sbi); in f2fs_put_super()
1663 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in f2fs_put_super()
1665 if (err || f2fs_cp_error(sbi)) { in f2fs_put_super()
1666 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_put_super()
1667 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_put_super()
1671 if (!get_pages(sbi, i)) in f2fs_put_super()
1673 f2fs_err(sbi, "detect filesystem reference count leak during " in f2fs_put_super()
1674 "umount, type: %d, count: %lld", i, get_pages(sbi, i)); in f2fs_put_super()
1675 f2fs_bug_on(sbi, 1); in f2fs_put_super()
1678 f2fs_bug_on(sbi, sbi->fsync_node_num); in f2fs_put_super()
1680 f2fs_destroy_compress_inode(sbi); in f2fs_put_super()
1682 iput(sbi->node_inode); in f2fs_put_super()
1683 sbi->node_inode = NULL; in f2fs_put_super()
1685 iput(sbi->meta_inode); in f2fs_put_super()
1686 sbi->meta_inode = NULL; in f2fs_put_super()
1692 f2fs_destroy_stats(sbi); in f2fs_put_super()
1695 f2fs_destroy_node_manager(sbi); in f2fs_put_super()
1696 f2fs_destroy_segment_manager(sbi); in f2fs_put_super()
1698 /* flush s_error_work before sbi destroy */ in f2fs_put_super()
1699 flush_work(&sbi->s_error_work); in f2fs_put_super()
1701 f2fs_destroy_post_read_wq(sbi); in f2fs_put_super()
1703 kvfree(sbi->ckpt); in f2fs_put_super()
1706 if (sbi->s_chksum_driver) in f2fs_put_super()
1707 crypto_free_shash(sbi->s_chksum_driver); in f2fs_put_super()
1708 kfree(sbi->raw_super); in f2fs_put_super()
1710 destroy_device_list(sbi); in f2fs_put_super()
1711 f2fs_destroy_page_array_cache(sbi); in f2fs_put_super()
1712 f2fs_destroy_xattr_caches(sbi); in f2fs_put_super()
1713 mempool_destroy(sbi->write_io_dummy); in f2fs_put_super()
1716 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_put_super()
1718 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_put_super()
1719 destroy_percpu_info(sbi); in f2fs_put_super()
1720 f2fs_destroy_iostat(sbi); in f2fs_put_super()
1722 kvfree(sbi->write_io[i]); in f2fs_put_super()
1726 kfree(sbi); in f2fs_put_super()
1731 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_sync_fs() local
1734 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_fs()
1736 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_sync_fs()
1741 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_sync_fs()
1745 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_sync_fs()
1746 err = f2fs_issue_checkpoint(sbi); in f2fs_sync_fs()
1827 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_statfs() local
1833 total_count = le64_to_cpu(sbi->raw_super->block_count); in f2fs_statfs()
1834 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); in f2fs_statfs()
1836 buf->f_bsize = sbi->blocksize; in f2fs_statfs()
1840 spin_lock(&sbi->stat_lock); in f2fs_statfs()
1842 user_block_count = sbi->user_block_count; in f2fs_statfs()
1843 total_valid_node_count = valid_node_count(sbi); in f2fs_statfs()
1844 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_statfs()
1845 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - in f2fs_statfs()
1846 sbi->current_reserved_blocks; in f2fs_statfs()
1848 if (unlikely(buf->f_bfree <= sbi->unusable_block_count)) in f2fs_statfs()
1851 buf->f_bfree -= sbi->unusable_block_count; in f2fs_statfs()
1852 spin_unlock(&sbi->stat_lock); in f2fs_statfs()
1854 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) in f2fs_statfs()
1856 F2FS_OPTION(sbi).root_reserved_blocks; in f2fs_statfs()
1885 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_quota_options() local
1887 if (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1890 switch (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1904 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_show_quota_options()
1906 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); in f2fs_show_quota_options()
1908 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_show_quota_options()
1910 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); in f2fs_show_quota_options()
1912 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_show_quota_options()
1914 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); in f2fs_show_quota_options()
1922 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_compress_options() local
1926 if (!f2fs_sb_has_compression(sbi)) in f2fs_show_compress_options()
1929 switch (F2FS_OPTION(sbi).compress_algorithm) { in f2fs_show_compress_options()
1945 if (F2FS_OPTION(sbi).compress_level) in f2fs_show_compress_options()
1946 seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level); in f2fs_show_compress_options()
1949 F2FS_OPTION(sbi).compress_log_size); in f2fs_show_compress_options()
1951 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) { in f2fs_show_compress_options()
1953 F2FS_OPTION(sbi).extensions[i]); in f2fs_show_compress_options()
1956 for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) { in f2fs_show_compress_options()
1958 F2FS_OPTION(sbi).noextensions[i]); in f2fs_show_compress_options()
1961 if (F2FS_OPTION(sbi).compress_chksum) in f2fs_show_compress_options()
1964 if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS) in f2fs_show_compress_options()
1966 else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER) in f2fs_show_compress_options()
1969 if (test_opt(sbi, COMPRESS_CACHE)) in f2fs_show_compress_options()
1976 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); in f2fs_show_options() local
1978 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) in f2fs_show_options()
1980 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) in f2fs_show_options()
1982 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) in f2fs_show_options()
1985 if (test_opt(sbi, GC_MERGE)) in f2fs_show_options()
1990 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_show_options()
1992 if (test_opt(sbi, NORECOVERY)) in f2fs_show_options()
1994 if (test_opt(sbi, DISCARD)) { in f2fs_show_options()
1996 if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK) in f2fs_show_options()
1998 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT) in f2fs_show_options()
2000 else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION) in f2fs_show_options()
2005 if (test_opt(sbi, NOHEAP)) in f2fs_show_options()
2010 if (test_opt(sbi, XATTR_USER)) in f2fs_show_options()
2014 if (test_opt(sbi, INLINE_XATTR)) in f2fs_show_options()
2018 if (test_opt(sbi, INLINE_XATTR_SIZE)) in f2fs_show_options()
2020 F2FS_OPTION(sbi).inline_xattr_size); in f2fs_show_options()
2023 if (test_opt(sbi, POSIX_ACL)) in f2fs_show_options()
2028 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) in f2fs_show_options()
2030 if (test_opt(sbi, INLINE_DATA)) in f2fs_show_options()
2034 if (test_opt(sbi, INLINE_DENTRY)) in f2fs_show_options()
2038 if (test_opt(sbi, FLUSH_MERGE)) in f2fs_show_options()
2042 if (test_opt(sbi, NOBARRIER)) in f2fs_show_options()
2046 if (test_opt(sbi, FASTBOOT)) in f2fs_show_options()
2048 if (test_opt(sbi, READ_EXTENT_CACHE)) in f2fs_show_options()
2052 if (test_opt(sbi, AGE_EXTENT_CACHE)) in f2fs_show_options()
2054 if (test_opt(sbi, DATA_FLUSH)) in f2fs_show_options()
2058 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) in f2fs_show_options()
2060 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) in f2fs_show_options()
2062 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG) in f2fs_show_options()
2064 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) in f2fs_show_options()
2066 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); in f2fs_show_options()
2067 if (test_opt(sbi, RESERVE_ROOT)) in f2fs_show_options()
2069 F2FS_OPTION(sbi).root_reserved_blocks, in f2fs_show_options()
2071 F2FS_OPTION(sbi).s_resuid), in f2fs_show_options()
2073 F2FS_OPTION(sbi).s_resgid)); in f2fs_show_options()
2074 if (F2FS_IO_SIZE_BITS(sbi)) in f2fs_show_options()
2076 F2FS_OPTION(sbi).write_io_size_bits); in f2fs_show_options()
2078 if (test_opt(sbi, FAULT_INJECTION)) { in f2fs_show_options()
2080 F2FS_OPTION(sbi).fault_info.inject_rate); in f2fs_show_options()
2082 F2FS_OPTION(sbi).fault_info.inject_type); in f2fs_show_options()
2086 if (test_opt(sbi, QUOTA)) in f2fs_show_options()
2088 if (test_opt(sbi, USRQUOTA)) in f2fs_show_options()
2090 if (test_opt(sbi, GRPQUOTA)) in f2fs_show_options()
2092 if (test_opt(sbi, PRJQUOTA)) in f2fs_show_options()
2095 f2fs_show_quota_options(seq, sbi->sb); in f2fs_show_options()
2097 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); in f2fs_show_options()
2099 if (sbi->sb->s_flags & SB_INLINECRYPT) in f2fs_show_options()
2102 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) in f2fs_show_options()
2104 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in f2fs_show_options()
2107 if (test_opt(sbi, DISABLE_CHECKPOINT)) in f2fs_show_options()
2109 F2FS_OPTION(sbi).unusable_cap); in f2fs_show_options()
2110 if (test_opt(sbi, MERGE_CHECKPOINT)) in f2fs_show_options()
2114 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) in f2fs_show_options()
2116 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) in f2fs_show_options()
2118 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) in f2fs_show_options()
2122 f2fs_show_compress_options(seq, sbi->sb); in f2fs_show_options()
2125 if (test_opt(sbi, ATGC)) in f2fs_show_options()
2128 if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_NORMAL) in f2fs_show_options()
2130 else if (F2FS_OPTION(sbi).memory_mode == MEMORY_MODE_LOW) in f2fs_show_options()
2133 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) in f2fs_show_options()
2135 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE) in f2fs_show_options()
2137 else if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC) in f2fs_show_options()
2143 static void default_options(struct f2fs_sb_info *sbi, bool remount) in default_options() argument
2147 set_opt(sbi, READ_EXTENT_CACHE); in default_options()
2148 clear_opt(sbi, DISABLE_CHECKPOINT); in default_options()
2150 if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) in default_options()
2151 set_opt(sbi, DISCARD); in default_options()
2153 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2154 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_SECTION; in default_options()
2156 F2FS_OPTION(sbi).discard_unit = DISCARD_UNIT_BLOCK; in default_options()
2159 if (f2fs_sb_has_readonly(sbi)) in default_options()
2160 F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE; in default_options()
2162 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE; in default_options()
2164 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; in default_options()
2165 if (le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count_main) <= in default_options()
2167 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in default_options()
2169 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in default_options()
2170 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in default_options()
2171 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); in default_options()
2172 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); in default_options()
2173 if (f2fs_sb_has_compression(sbi)) { in default_options()
2174 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; in default_options()
2175 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; in default_options()
2176 F2FS_OPTION(sbi).compress_ext_cnt = 0; in default_options()
2177 F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS; in default_options()
2179 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in default_options()
2180 F2FS_OPTION(sbi).memory_mode = MEMORY_MODE_NORMAL; in default_options()
2181 F2FS_OPTION(sbi).errors = MOUNT_ERRORS_CONTINUE; in default_options()
2183 sbi->sb->s_flags &= ~SB_INLINECRYPT; in default_options()
2185 set_opt(sbi, INLINE_XATTR); in default_options()
2186 set_opt(sbi, INLINE_DATA); in default_options()
2187 set_opt(sbi, INLINE_DENTRY); in default_options()
2188 set_opt(sbi, NOHEAP); in default_options()
2189 set_opt(sbi, MERGE_CHECKPOINT); in default_options()
2190 F2FS_OPTION(sbi).unusable_cap = 0; in default_options()
2191 sbi->sb->s_flags |= SB_LAZYTIME; in default_options()
2192 if (!f2fs_is_readonly(sbi)) in default_options()
2193 set_opt(sbi, FLUSH_MERGE); in default_options()
2194 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
2195 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in default_options()
2197 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in default_options()
2200 set_opt(sbi, XATTR_USER); in default_options()
2203 set_opt(sbi, POSIX_ACL); in default_options()
2206 f2fs_build_fault_attr(sbi, 0, 0); in default_options()
2213 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_disable_checkpoint() argument
2215 unsigned int s_flags = sbi->sb->s_flags; in f2fs_disable_checkpoint()
2217 unsigned int gc_mode = sbi->gc_mode; in f2fs_disable_checkpoint()
2223 f2fs_err(sbi, "checkpoint=disable on readonly fs"); in f2fs_disable_checkpoint()
2226 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_disable_checkpoint()
2229 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2230 if (!f2fs_disable_cp_again(sbi, unusable)) in f2fs_disable_checkpoint()
2233 f2fs_update_time(sbi, DISABLE_TIME); in f2fs_disable_checkpoint()
2235 sbi->gc_mode = GC_URGENT_HIGH; in f2fs_disable_checkpoint()
2237 while (!f2fs_time_over(sbi, DISABLE_TIME)) { in f2fs_disable_checkpoint()
2245 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2246 stat_inc_gc_call_count(sbi, FOREGROUND); in f2fs_disable_checkpoint()
2247 err = f2fs_gc(sbi, &gc_control); in f2fs_disable_checkpoint()
2256 ret = sync_filesystem(sbi->sb); in f2fs_disable_checkpoint()
2262 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
2263 if (f2fs_disable_cp_again(sbi, unusable)) { in f2fs_disable_checkpoint()
2269 f2fs_down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2271 set_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_disable_checkpoint()
2272 stat_inc_cp_call_count(sbi, TOTAL_CALL); in f2fs_disable_checkpoint()
2273 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_disable_checkpoint()
2277 spin_lock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2278 sbi->unusable_block_count = unusable; in f2fs_disable_checkpoint()
2279 spin_unlock(&sbi->stat_lock); in f2fs_disable_checkpoint()
2282 f2fs_up_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
2284 sbi->gc_mode = gc_mode; in f2fs_disable_checkpoint()
2285 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_disable_checkpoint()
2289 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_enable_checkpoint() argument
2295 sync_inodes_sb(sbi->sb); in f2fs_enable_checkpoint()
2297 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--); in f2fs_enable_checkpoint()
2300 f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); in f2fs_enable_checkpoint()
2302 f2fs_down_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2303 f2fs_dirty_to_prefree(sbi); in f2fs_enable_checkpoint()
2305 clear_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_enable_checkpoint()
2306 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_enable_checkpoint()
2307 f2fs_up_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
2309 f2fs_sync_fs(sbi->sb, 1); in f2fs_enable_checkpoint()
2312 f2fs_flush_ckpt_thread(sbi); in f2fs_enable_checkpoint()
2317 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_remount() local
2325 bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE); in f2fs_remount()
2326 bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE); in f2fs_remount()
2327 bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT); in f2fs_remount()
2328 bool no_io_align = !F2FS_IO_ALIGNED(sbi); in f2fs_remount()
2329 bool no_atgc = !test_opt(sbi, ATGC); in f2fs_remount()
2330 bool no_discard = !test_opt(sbi, DISCARD); in f2fs_remount()
2331 bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE); in f2fs_remount()
2332 bool block_unit_discard = f2fs_block_unit_discard(sbi); in f2fs_remount()
2341 org_mount_opt = sbi->mount_opt; in f2fs_remount()
2345 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; in f2fs_remount()
2347 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_remount()
2349 kstrdup(F2FS_OPTION(sbi).s_qf_names[i], in f2fs_remount()
2363 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { in f2fs_remount()
2364 err = f2fs_commit_super(sbi, false); in f2fs_remount()
2365 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", in f2fs_remount()
2368 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_remount()
2371 default_options(sbi, true); in f2fs_remount()
2379 flush_work(&sbi->s_error_work); in f2fs_remount()
2388 if (f2fs_dev_is_readonly(sbi) && !(*flags & SB_RDONLY)) { in f2fs_remount()
2403 } else if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_remount()
2410 if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) { in f2fs_remount()
2412 f2fs_warn(sbi, "LFS is not compatible with IPU"); in f2fs_remount()
2417 if (no_atgc == !!test_opt(sbi, ATGC)) { in f2fs_remount()
2419 f2fs_warn(sbi, "switch atgc option is not allowed"); in f2fs_remount()
2424 if (no_read_extent_cache == !!test_opt(sbi, READ_EXTENT_CACHE)) { in f2fs_remount()
2426 f2fs_warn(sbi, "switch extent_cache option is not allowed"); in f2fs_remount()
2430 if (no_age_extent_cache == !!test_opt(sbi, AGE_EXTENT_CACHE)) { in f2fs_remount()
2432 f2fs_warn(sbi, "switch age_extent_cache option is not allowed"); in f2fs_remount()
2436 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) { in f2fs_remount()
2438 f2fs_warn(sbi, "switch io_bits option is not allowed"); in f2fs_remount()
2442 if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) { in f2fs_remount()
2444 f2fs_warn(sbi, "switch compress_cache option is not allowed"); in f2fs_remount()
2448 if (block_unit_discard != f2fs_block_unit_discard(sbi)) { in f2fs_remount()
2450 f2fs_warn(sbi, "switch discard_unit option is not allowed"); in f2fs_remount()
2454 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2456 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); in f2fs_remount()
2466 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && in f2fs_remount()
2467 !test_opt(sbi, GC_MERGE))) { in f2fs_remount()
2468 if (sbi->gc_thread) { in f2fs_remount()
2469 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2472 } else if (!sbi->gc_thread) { in f2fs_remount()
2473 err = f2fs_start_gc_thread(sbi); in f2fs_remount()
2482 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_remount()
2483 set_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2485 clear_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
2488 if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) || in f2fs_remount()
2489 !test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_remount()
2490 f2fs_stop_ckpt_thread(sbi); in f2fs_remount()
2494 f2fs_flush_ckpt_thread(sbi); in f2fs_remount()
2496 err = f2fs_start_ckpt_thread(sbi); in f2fs_remount()
2498 f2fs_err(sbi, in f2fs_remount()
2510 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { in f2fs_remount()
2511 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2512 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2515 err = f2fs_create_flush_cmd_control(sbi); in f2fs_remount()
2521 if (no_discard == !!test_opt(sbi, DISCARD)) { in f2fs_remount()
2522 if (test_opt(sbi, DISCARD)) { in f2fs_remount()
2523 err = f2fs_start_discard_thread(sbi); in f2fs_remount()
2528 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2529 f2fs_issue_discard_timeout(sbi); in f2fs_remount()
2534 if (enable_checkpoint == !!test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2535 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
2536 err = f2fs_disable_checkpoint(sbi); in f2fs_remount()
2540 f2fs_enable_checkpoint(sbi); in f2fs_remount()
2552 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_remount()
2554 limit_reserve_root(sbi); in f2fs_remount()
2555 adjust_unusable_cap_perc(sbi); in f2fs_remount()
2560 if (f2fs_start_discard_thread(sbi)) in f2fs_remount()
2561 f2fs_warn(sbi, "discard has been stopped"); in f2fs_remount()
2563 f2fs_stop_discard_thread(sbi); in f2fs_remount()
2567 if (f2fs_create_flush_cmd_control(sbi)) in f2fs_remount()
2568 f2fs_warn(sbi, "background flush thread has stopped"); in f2fs_remount()
2570 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2571 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2575 if (f2fs_start_ckpt_thread(sbi)) in f2fs_remount()
2576 f2fs_warn(sbi, "background ckpt thread has stopped"); in f2fs_remount()
2578 f2fs_stop_ckpt_thread(sbi); in f2fs_remount()
2582 if (f2fs_start_gc_thread(sbi)) in f2fs_remount()
2583 f2fs_warn(sbi, "background gc thread has stopped"); in f2fs_remount()
2585 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2589 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; in f2fs_remount()
2591 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_remount()
2592 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; in f2fs_remount()
2595 sbi->mount_opt = org_mount_opt; in f2fs_remount()
2601 static bool f2fs_need_recovery(struct f2fs_sb_info *sbi) in f2fs_need_recovery() argument
2604 if (is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG)) in f2fs_need_recovery()
2607 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_need_recovery()
2609 if (test_opt(sbi, NORECOVERY)) in f2fs_need_recovery()
2611 return !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG); in f2fs_need_recovery()
2614 static bool f2fs_recover_quota_begin(struct f2fs_sb_info *sbi) in f2fs_recover_quota_begin() argument
2616 bool readonly = f2fs_readonly(sbi->sb); in f2fs_recover_quota_begin()
2618 if (!f2fs_need_recovery(sbi)) in f2fs_recover_quota_begin()
2622 if (f2fs_hw_is_readonly(sbi)) in f2fs_recover_quota_begin()
2626 sbi->sb->s_flags &= ~SB_RDONLY; in f2fs_recover_quota_begin()
2627 set_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_begin()
2634 return f2fs_enable_quota_files(sbi, readonly); in f2fs_recover_quota_begin()
2637 static void f2fs_recover_quota_end(struct f2fs_sb_info *sbi, in f2fs_recover_quota_end() argument
2641 f2fs_quota_off_umount(sbi->sb); in f2fs_recover_quota_end()
2643 if (is_sbi_flag_set(sbi, SBI_IS_WRITABLE)) { in f2fs_recover_quota_end()
2644 clear_sbi_flag(sbi, SBI_IS_WRITABLE); in f2fs_recover_quota_end()
2645 sbi->sb->s_flags |= SB_RDONLY; in f2fs_recover_quota_end()
2769 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) in f2fs_quota_on_mount() argument
2771 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { in f2fs_quota_on_mount()
2772 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); in f2fs_quota_on_mount()
2776 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], in f2fs_quota_on_mount()
2777 F2FS_OPTION(sbi).s_jquota_fmt, type); in f2fs_quota_on_mount()
2780 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) in f2fs_enable_quota_files() argument
2785 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { in f2fs_enable_quota_files()
2786 err = f2fs_enable_quotas(sbi->sb); in f2fs_enable_quota_files()
2788 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); in f2fs_enable_quota_files()
2795 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_enable_quota_files()
2796 err = f2fs_quota_on_mount(sbi, i); in f2fs_enable_quota_files()
2801 f2fs_err(sbi, "Cannot turn on quotas: %d on %d", in f2fs_enable_quota_files()
2845 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_enable_quotas() local
2849 test_opt(sbi, USRQUOTA), in f2fs_enable_quotas()
2850 test_opt(sbi, GRPQUOTA), in f2fs_enable_quotas()
2851 test_opt(sbi, PRJQUOTA), in f2fs_enable_quotas()
2855 f2fs_err(sbi, "quota file may be corrupted, skip loading it"); in f2fs_enable_quotas()
2868 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", in f2fs_enable_quotas()
2881 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) in f2fs_quota_sync_file() argument
2883 struct quota_info *dqopt = sb_dqopt(sbi->sb); in f2fs_quota_sync_file()
2887 ret = dquot_writeback_dquots(sbi->sb, type); in f2fs_quota_sync_file()
2896 if (is_journalled_quota(sbi)) in f2fs_quota_sync_file()
2904 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_sync_file()
2910 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_sync() local
2927 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
2939 f2fs_lock_op(sbi); in f2fs_quota_sync()
2940 f2fs_down_read(&sbi->quota_sem); in f2fs_quota_sync()
2942 ret = f2fs_quota_sync_file(sbi, cnt); in f2fs_quota_sync()
2944 f2fs_up_read(&sbi->quota_sem); in f2fs_quota_sync()
2945 f2fs_unlock_op(sbi); in f2fs_quota_sync()
2947 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
3026 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_off() local
3036 if (is_journalled_quota(sbi)) in f2fs_quota_off()
3037 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_off()
3078 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_commit() local
3081 f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); in f2fs_dquot_commit()
3084 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit()
3085 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_commit()
3091 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_acquire() local
3094 f2fs_down_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3097 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_acquire()
3098 f2fs_up_read(&sbi->quota_sem); in f2fs_dquot_acquire()
3104 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_release() local
3108 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_release()
3115 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_mark_dquot_dirty() local
3119 if (is_journalled_quota(sbi)) in f2fs_dquot_mark_dquot_dirty()
3120 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in f2fs_dquot_mark_dquot_dirty()
3127 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_commit_info() local
3131 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit_info()
3212 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_set_context() local
3220 if (f2fs_sb_has_lost_found(sbi) && in f2fs_set_context()
3221 inode->i_ino == F2FS_ROOT_INO(sbi)) in f2fs_set_context()
3249 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_get_devices() local
3253 if (!f2fs_is_multi_device(sbi)) in f2fs_get_devices()
3256 devs = kmalloc_array(sbi->s_ndevs, sizeof(*devs), GFP_KERNEL); in f2fs_get_devices()
3260 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_devices()
3262 *num_devs = sbi->s_ndevs; in f2fs_get_devices()
3281 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_nfs_get_inode() local
3284 if (f2fs_check_nid_range(sbi, ino)) in f2fs_nfs_get_inode()
3367 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, in sanity_check_area_boundary() argument
3372 struct super_block *sb = sbi->sb; in sanity_check_area_boundary()
3392 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", in sanity_check_area_boundary()
3399 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3407 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3415 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3423 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
3430 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3442 if (f2fs_readonly(sb) || f2fs_hw_is_readonly(sbi)) { in sanity_check_area_boundary()
3443 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in sanity_check_area_boundary()
3449 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
3458 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, in sanity_check_raw_super() argument
3469 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", in sanity_check_raw_super()
3479 f2fs_info(sbi, "Invalid SB checksum offset: %zu", in sanity_check_raw_super()
3484 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { in sanity_check_raw_super()
3485 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); in sanity_check_raw_super()
3492 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", in sanity_check_raw_super()
3500 f2fs_info(sbi, "Invalid log blocks per segment (%u)", in sanity_check_raw_super()
3510 f2fs_info(sbi, "Invalid log sectorsize (%u)", in sanity_check_raw_super()
3517 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", in sanity_check_raw_super()
3534 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); in sanity_check_raw_super()
3540 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", in sanity_check_raw_super()
3546 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)", in sanity_check_raw_super()
3552 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", in sanity_check_raw_super()
3558 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", in sanity_check_raw_super()
3572 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)", in sanity_check_raw_super()
3578 !bdev_is_zoned(sbi->sb->s_bdev)) { in sanity_check_raw_super()
3579 f2fs_info(sbi, "Zoned block device path is missing"); in sanity_check_raw_super()
3585 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", in sanity_check_raw_super()
3593 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", in sanity_check_raw_super()
3603 f2fs_info(sbi, "Insane cp_payload (%u >= %u)", in sanity_check_raw_super()
3614 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", in sanity_check_raw_super()
3622 if (sanity_check_area_boundary(sbi, bh)) in sanity_check_raw_super()
3628 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) in f2fs_sanity_check_ckpt() argument
3631 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_sanity_check_ckpt()
3632 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_sanity_check_ckpt()
3660 if (!f2fs_sb_has_readonly(sbi) && in f2fs_sanity_check_ckpt()
3663 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); in f2fs_sanity_check_ckpt()
3668 (f2fs_sb_has_readonly(sbi) ? 1 : 0); in f2fs_sanity_check_ckpt()
3672 f2fs_err(sbi, "Wrong user_block_count: %u", in f2fs_sanity_check_ckpt()
3679 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", in f2fs_sanity_check_ckpt()
3685 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_sanity_check_ckpt()
3687 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", in f2fs_sanity_check_ckpt()
3693 blocks_per_seg = sbi->blocks_per_seg; in f2fs_sanity_check_ckpt()
3700 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3706 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3719 if (f2fs_sb_has_readonly(sbi)) in f2fs_sanity_check_ckpt()
3725 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3736 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3749 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", in f2fs_sanity_check_ckpt()
3754 cp_pack_start_sum = __start_sum_addr(sbi); in f2fs_sanity_check_ckpt()
3755 cp_payload = __cp_payload(sbi); in f2fs_sanity_check_ckpt()
3759 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", in f2fs_sanity_check_ckpt()
3766 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " in f2fs_sanity_check_ckpt()
3779 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)", in f2fs_sanity_check_ckpt()
3784 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_sanity_check_ckpt()
3785 f2fs_err(sbi, "A bug case: need to run fsck"); in f2fs_sanity_check_ckpt()
3791 static void init_sb_info(struct f2fs_sb_info *sbi) in init_sb_info() argument
3793 struct f2fs_super_block *raw_super = sbi->raw_super; in init_sb_info()
3796 sbi->log_sectors_per_block = in init_sb_info()
3798 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); in init_sb_info()
3799 sbi->blocksize = BIT(sbi->log_blocksize); in init_sb_info()
3800 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); in init_sb_info()
3801 sbi->blocks_per_seg = BIT(sbi->log_blocks_per_seg); in init_sb_info()
3802 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); in init_sb_info()
3803 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); in init_sb_info()
3804 sbi->total_sections = le32_to_cpu(raw_super->section_count); in init_sb_info()
3805 sbi->total_node_count = in init_sb_info()
3807 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; in init_sb_info()
3808 F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino); in init_sb_info()
3809 F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino); in init_sb_info()
3810 F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino); in init_sb_info()
3811 sbi->cur_victim_sec = NULL_SECNO; in init_sb_info()
3812 sbi->gc_mode = GC_NORMAL; in init_sb_info()
3813 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in init_sb_info()
3814 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in init_sb_info()
3815 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; in init_sb_info()
3816 sbi->migration_granularity = sbi->segs_per_sec; in init_sb_info()
3817 sbi->seq_file_ra_mul = MIN_RA_MUL; in init_sb_info()
3818 sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE; in init_sb_info()
3819 sbi->max_fragment_hole = DEF_FRAGMENT_SIZE; in init_sb_info()
3820 spin_lock_init(&sbi->gc_remaining_trials_lock); in init_sb_info()
3821 atomic64_set(&sbi->current_atomic_write, 0); in init_sb_info()
3823 sbi->dir_level = DEF_DIR_LEVEL; in init_sb_info()
3824 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; in init_sb_info()
3825 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3826 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3827 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3828 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; in init_sb_info()
3829 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = in init_sb_info()
3831 clear_sbi_flag(sbi, SBI_NEED_FSCK); in init_sb_info()
3834 atomic_set(&sbi->nr_pages[i], 0); in init_sb_info()
3837 atomic_set(&sbi->wb_sync_req[i], 0); in init_sb_info()
3839 INIT_LIST_HEAD(&sbi->s_list); in init_sb_info()
3840 mutex_init(&sbi->umount_mutex); in init_sb_info()
3841 init_f2fs_rwsem(&sbi->io_order_lock); in init_sb_info()
3842 spin_lock_init(&sbi->cp_lock); in init_sb_info()
3844 sbi->dirty_device = 0; in init_sb_info()
3845 spin_lock_init(&sbi->dev_lock); in init_sb_info()
3847 init_f2fs_rwsem(&sbi->sb_lock); in init_sb_info()
3848 init_f2fs_rwsem(&sbi->pin_sem); in init_sb_info()
3851 static int init_percpu_info(struct f2fs_sb_info *sbi) in init_percpu_info() argument
3855 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); in init_percpu_info()
3859 err = percpu_counter_init(&sbi->rf_node_block_count, 0, GFP_KERNEL); in init_percpu_info()
3863 err = percpu_counter_init(&sbi->total_valid_inode_count, 0, in init_percpu_info()
3870 percpu_counter_destroy(&sbi->rf_node_block_count); in init_percpu_info()
3872 percpu_counter_destroy(&sbi->alloc_valid_block_count); in init_percpu_info()
3879 struct f2fs_sb_info *sbi; member
3894 if (!rz_args->sbi->unusable_blocks_per_sec) { in f2fs_report_zone_cb()
3895 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks; in f2fs_report_zone_cb()
3898 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) { in f2fs_report_zone_cb()
3899 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n"); in f2fs_report_zone_cb()
3905 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) in init_blkz_info() argument
3913 if (!f2fs_sb_has_blkzoned(sbi)) in init_blkz_info()
3918 f2fs_err(sbi, "F2FS does not support non power of 2 zone sizes\n"); in init_blkz_info()
3922 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != in init_blkz_info()
3925 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(zone_sectors); in init_blkz_info()
3927 sbi->blocks_per_blkz); in init_blkz_info()
3931 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, in init_blkz_info()
3938 rep_zone_arg.sbi = sbi; in init_blkz_info()
3955 static int read_raw_super_block(struct f2fs_sb_info *sbi, in read_raw_super_block() argument
3959 struct super_block *sb = sbi->sb; in read_raw_super_block()
3972 f2fs_err(sbi, "Unable to read %dth superblock", in read_raw_super_block()
3980 err = sanity_check_raw_super(sbi, bh); in read_raw_super_block()
3982 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", in read_raw_super_block()
4007 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) in f2fs_commit_super() argument
4013 if ((recover && f2fs_readonly(sbi->sb)) || in f2fs_commit_super()
4014 f2fs_hw_is_readonly(sbi)) { in f2fs_commit_super()
4015 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_commit_super()
4020 if (!recover && f2fs_sb_has_sb_chksum(sbi)) { in f2fs_commit_super()
4021 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi), in f2fs_commit_super()
4023 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); in f2fs_commit_super()
4027 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1); in f2fs_commit_super()
4030 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
4038 bh = sb_bread(sbi->sb, sbi->valid_super_block); in f2fs_commit_super()
4041 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
4046 static void save_stop_reason(struct f2fs_sb_info *sbi, unsigned char reason) in save_stop_reason() argument
4050 spin_lock_irqsave(&sbi->error_lock, flags); in save_stop_reason()
4051 if (sbi->stop_reason[reason] < GENMASK(BITS_PER_BYTE - 1, 0)) in save_stop_reason()
4052 sbi->stop_reason[reason]++; in save_stop_reason()
4053 spin_unlock_irqrestore(&sbi->error_lock, flags); in save_stop_reason()
4056 static void f2fs_record_stop_reason(struct f2fs_sb_info *sbi) in f2fs_record_stop_reason() argument
4058 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_record_stop_reason()
4062 f2fs_down_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4064 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4065 if (sbi->error_dirty) { in f2fs_record_stop_reason()
4066 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_record_stop_reason()
4068 sbi->error_dirty = false; in f2fs_record_stop_reason()
4070 memcpy(raw_super->s_stop_reason, sbi->stop_reason, MAX_STOP_REASON); in f2fs_record_stop_reason()
4071 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_record_stop_reason()
4073 err = f2fs_commit_super(sbi, false); in f2fs_record_stop_reason()
4075 f2fs_up_write(&sbi->sb_lock); in f2fs_record_stop_reason()
4077 f2fs_err(sbi, "f2fs_commit_super fails to record err:%d", err); in f2fs_record_stop_reason()
4080 void f2fs_save_errors(struct f2fs_sb_info *sbi, unsigned char flag) in f2fs_save_errors() argument
4084 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_save_errors()
4085 if (!test_bit(flag, (unsigned long *)sbi->errors)) { in f2fs_save_errors()
4086 set_bit(flag, (unsigned long *)sbi->errors); in f2fs_save_errors()
4087 sbi->error_dirty = true; in f2fs_save_errors()
4089 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_save_errors()
4092 static bool f2fs_update_errors(struct f2fs_sb_info *sbi) in f2fs_update_errors() argument
4097 spin_lock_irqsave(&sbi->error_lock, flags); in f2fs_update_errors()
4098 if (sbi->error_dirty) { in f2fs_update_errors()
4099 memcpy(F2FS_RAW_SUPER(sbi)->s_errors, sbi->errors, in f2fs_update_errors()
4101 sbi->error_dirty = false; in f2fs_update_errors()
4104 spin_unlock_irqrestore(&sbi->error_lock, flags); in f2fs_update_errors()
4109 static void f2fs_record_errors(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_record_errors() argument
4113 f2fs_down_write(&sbi->sb_lock); in f2fs_record_errors()
4115 if (!f2fs_update_errors(sbi)) in f2fs_record_errors()
4118 err = f2fs_commit_super(sbi, false); in f2fs_record_errors()
4120 f2fs_err(sbi, "f2fs_commit_super fails to record errors:%u, err:%d", in f2fs_record_errors()
4123 f2fs_up_write(&sbi->sb_lock); in f2fs_record_errors()
4126 void f2fs_handle_error(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error() argument
4128 f2fs_save_errors(sbi, error); in f2fs_handle_error()
4129 f2fs_record_errors(sbi, error); in f2fs_handle_error()
4132 void f2fs_handle_error_async(struct f2fs_sb_info *sbi, unsigned char error) in f2fs_handle_error_async() argument
4134 f2fs_save_errors(sbi, error); in f2fs_handle_error_async()
4136 if (!sbi->error_dirty) in f2fs_handle_error_async()
4138 if (!test_bit(error, (unsigned long *)sbi->errors)) in f2fs_handle_error_async()
4140 schedule_work(&sbi->s_error_work); in f2fs_handle_error_async()
4149 void f2fs_handle_critical_error(struct f2fs_sb_info *sbi, unsigned char reason, in f2fs_handle_critical_error() argument
4152 struct super_block *sb = sbi->sb; in f2fs_handle_critical_error()
4155 F2FS_OPTION(sbi).errors == MOUNT_ERRORS_CONTINUE; in f2fs_handle_critical_error()
4157 set_ckpt_flags(sbi, CP_ERROR_FLAG); in f2fs_handle_critical_error()
4159 if (!f2fs_hw_is_readonly(sbi)) { in f2fs_handle_critical_error()
4160 save_stop_reason(sbi, reason); in f2fs_handle_critical_error()
4163 schedule_work(&sbi->s_error_work); in f2fs_handle_critical_error()
4165 f2fs_record_stop_reason(sbi); in f2fs_handle_critical_error()
4173 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_PANIC && in f2fs_handle_critical_error()
4175 !is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) in f2fs_handle_critical_error()
4180 set_sbi_flag(sbi, SBI_IS_SHUTDOWN); in f2fs_handle_critical_error()
4186 f2fs_warn(sbi, "Remounting filesystem read-only"); in f2fs_handle_critical_error()
4197 struct f2fs_sb_info *sbi = container_of(work, in f2fs_record_error_work() local
4200 f2fs_record_stop_reason(sbi); in f2fs_record_error_work()
4203 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) in f2fs_scan_devices() argument
4205 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_scan_devices()
4208 blk_mode_t mode = sb_open_mode(sbi->sb->s_flags); in f2fs_scan_devices()
4213 if (!bdev_is_zoned(sbi->sb->s_bdev)) in f2fs_scan_devices()
4222 sbi->devs = f2fs_kzalloc(sbi, in f2fs_scan_devices()
4226 if (!sbi->devs) in f2fs_scan_devices()
4229 logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev); in f2fs_scan_devices()
4230 sbi->aligned_blksize = true; in f2fs_scan_devices()
4234 FDEV(0).bdev = sbi->sb->s_bdev; in f2fs_scan_devices()
4247 sbi->log_blocks_per_seg) - 1 + in f2fs_scan_devices()
4253 sbi->log_blocks_per_seg) - 1; in f2fs_scan_devices()
4255 mode, sbi->sb, NULL); in f2fs_scan_devices()
4262 sbi->s_ndevs = i + 1; in f2fs_scan_devices()
4265 sbi->aligned_blksize = false; in f2fs_scan_devices()
4269 !f2fs_sb_has_blkzoned(sbi)) { in f2fs_scan_devices()
4270 f2fs_err(sbi, "Zoned block device feature not enabled"); in f2fs_scan_devices()
4274 if (init_blkz_info(sbi, i)) { in f2fs_scan_devices()
4275 f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); in f2fs_scan_devices()
4280 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", in f2fs_scan_devices()
4289 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", in f2fs_scan_devices()
4294 f2fs_info(sbi, in f2fs_scan_devices()
4295 "IO Block Size: %8ld KB", F2FS_IO_SIZE_KB(sbi)); in f2fs_scan_devices()
4299 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) in f2fs_setup_casefold() argument
4302 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { in f2fs_setup_casefold()
4307 encoding_info = f2fs_sb_read_encoding(sbi->raw_super); in f2fs_setup_casefold()
4309 f2fs_err(sbi, in f2fs_setup_casefold()
4314 encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags); in f2fs_setup_casefold()
4317 f2fs_err(sbi, in f2fs_setup_casefold()
4327 f2fs_info(sbi, "Using encoding defined by superblock: " in f2fs_setup_casefold()
4334 sbi->sb->s_encoding = encoding; in f2fs_setup_casefold()
4335 sbi->sb->s_encoding_flags = encoding_flags; in f2fs_setup_casefold()
4338 if (f2fs_sb_has_casefold(sbi)) { in f2fs_setup_casefold()
4339 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); in f2fs_setup_casefold()
4346 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) in f2fs_tuning_parameters() argument
4349 if (MAIN_SEGS(sbi) <= SMALL_VOLUME_SEGMENTS) { in f2fs_tuning_parameters()
4350 if (f2fs_block_unit_discard(sbi)) in f2fs_tuning_parameters()
4351 SM_I(sbi)->dcc_info->discard_granularity = in f2fs_tuning_parameters()
4353 if (!f2fs_lfs_mode(sbi)) in f2fs_tuning_parameters()
4354 SM_I(sbi)->ipu_policy = BIT(F2FS_IPU_FORCE) | in f2fs_tuning_parameters()
4358 sbi->readdir_ra = true; in f2fs_tuning_parameters()
4363 struct f2fs_sb_info *sbi; in f2fs_fill_super() local
4383 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); in f2fs_fill_super()
4384 if (!sbi) in f2fs_fill_super()
4387 sbi->sb = sb; in f2fs_fill_super()
4390 init_f2fs_rwsem(&sbi->gc_lock); in f2fs_fill_super()
4391 mutex_init(&sbi->writepages); in f2fs_fill_super()
4392 init_f2fs_rwsem(&sbi->cp_global_sem); in f2fs_fill_super()
4393 init_f2fs_rwsem(&sbi->node_write); in f2fs_fill_super()
4394 init_f2fs_rwsem(&sbi->node_change); in f2fs_fill_super()
4395 spin_lock_init(&sbi->stat_lock); in f2fs_fill_super()
4396 init_f2fs_rwsem(&sbi->cp_rwsem); in f2fs_fill_super()
4397 init_f2fs_rwsem(&sbi->quota_sem); in f2fs_fill_super()
4398 init_waitqueue_head(&sbi->cp_wait); in f2fs_fill_super()
4399 spin_lock_init(&sbi->error_lock); in f2fs_fill_super()
4402 INIT_LIST_HEAD(&sbi->inode_list[i]); in f2fs_fill_super()
4403 spin_lock_init(&sbi->inode_lock[i]); in f2fs_fill_super()
4405 mutex_init(&sbi->flush_lock); in f2fs_fill_super()
4408 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); in f2fs_fill_super()
4409 if (IS_ERR(sbi->s_chksum_driver)) { in f2fs_fill_super()
4410 f2fs_err(sbi, "Cannot load crc32 driver."); in f2fs_fill_super()
4411 err = PTR_ERR(sbi->s_chksum_driver); in f2fs_fill_super()
4412 sbi->s_chksum_driver = NULL; in f2fs_fill_super()
4418 f2fs_err(sbi, "unable to set blocksize"); in f2fs_fill_super()
4422 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, in f2fs_fill_super()
4427 sb->s_fs_info = sbi; in f2fs_fill_super()
4428 sbi->raw_super = raw_super; in f2fs_fill_super()
4430 INIT_WORK(&sbi->s_error_work, f2fs_record_error_work); in f2fs_fill_super()
4431 memcpy(sbi->errors, raw_super->s_errors, MAX_F2FS_ERRORS); in f2fs_fill_super()
4432 memcpy(sbi->stop_reason, raw_super->s_stop_reason, MAX_STOP_REASON); in f2fs_fill_super()
4435 if (f2fs_sb_has_inode_chksum(sbi)) in f2fs_fill_super()
4436 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, in f2fs_fill_super()
4439 default_options(sbi, false); in f2fs_fill_super()
4455 err = f2fs_setup_casefold(sbi); in f2fs_fill_super()
4464 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_fill_super()
4466 if (f2fs_qf_ino(sbi->sb, i)) in f2fs_fill_super()
4467 sbi->nquota_files++; in f2fs_fill_super()
4484 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_fill_super()
4489 sbi->valid_super_block = valid_super_block; in f2fs_fill_super()
4492 set_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4494 err = f2fs_init_write_merge_io(sbi); in f2fs_fill_super()
4498 init_sb_info(sbi); in f2fs_fill_super()
4500 err = f2fs_init_iostat(sbi); in f2fs_fill_super()
4504 err = init_percpu_info(sbi); in f2fs_fill_super()
4508 if (F2FS_IO_ALIGNED(sbi)) { in f2fs_fill_super()
4509 sbi->write_io_dummy = in f2fs_fill_super()
4510 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); in f2fs_fill_super()
4511 if (!sbi->write_io_dummy) { in f2fs_fill_super()
4517 /* init per sbi slab cache */ in f2fs_fill_super()
4518 err = f2fs_init_xattr_caches(sbi); in f2fs_fill_super()
4521 err = f2fs_init_page_array_cache(sbi); in f2fs_fill_super()
4526 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); in f2fs_fill_super()
4527 if (IS_ERR(sbi->meta_inode)) { in f2fs_fill_super()
4528 f2fs_err(sbi, "Failed to read F2FS meta data inode"); in f2fs_fill_super()
4529 err = PTR_ERR(sbi->meta_inode); in f2fs_fill_super()
4533 err = f2fs_get_valid_checkpoint(sbi); in f2fs_fill_super()
4535 f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); in f2fs_fill_super()
4539 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG)) in f2fs_fill_super()
4540 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_fill_super()
4541 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) { in f2fs_fill_super()
4542 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4543 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; in f2fs_fill_super()
4546 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) in f2fs_fill_super()
4547 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4550 err = f2fs_scan_devices(sbi); in f2fs_fill_super()
4552 f2fs_err(sbi, "Failed to find devices"); in f2fs_fill_super()
4556 err = f2fs_init_post_read_wq(sbi); in f2fs_fill_super()
4558 f2fs_err(sbi, "Failed to initialize post read workqueue"); in f2fs_fill_super()
4562 sbi->total_valid_node_count = in f2fs_fill_super()
4563 le32_to_cpu(sbi->ckpt->valid_node_count); in f2fs_fill_super()
4564 percpu_counter_set(&sbi->total_valid_inode_count, in f2fs_fill_super()
4565 le32_to_cpu(sbi->ckpt->valid_inode_count)); in f2fs_fill_super()
4566 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); in f2fs_fill_super()
4567 sbi->total_valid_block_count = in f2fs_fill_super()
4568 le64_to_cpu(sbi->ckpt->valid_block_count); in f2fs_fill_super()
4569 sbi->last_valid_block_count = sbi->total_valid_block_count; in f2fs_fill_super()
4570 sbi->reserved_blocks = 0; in f2fs_fill_super()
4571 sbi->current_reserved_blocks = 0; in f2fs_fill_super()
4572 limit_reserve_root(sbi); in f2fs_fill_super()
4573 adjust_unusable_cap_perc(sbi); in f2fs_fill_super()
4575 f2fs_init_extent_cache_info(sbi); in f2fs_fill_super()
4577 f2fs_init_ino_entry_info(sbi); in f2fs_fill_super()
4579 f2fs_init_fsync_node_info(sbi); in f2fs_fill_super()
4582 f2fs_init_ckpt_req_control(sbi); in f2fs_fill_super()
4583 if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) && in f2fs_fill_super()
4584 test_opt(sbi, MERGE_CHECKPOINT)) { in f2fs_fill_super()
4585 err = f2fs_start_ckpt_thread(sbi); in f2fs_fill_super()
4587 f2fs_err(sbi, in f2fs_fill_super()
4595 err = f2fs_build_segment_manager(sbi); in f2fs_fill_super()
4597 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", in f2fs_fill_super()
4601 err = f2fs_build_node_manager(sbi); in f2fs_fill_super()
4603 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", in f2fs_fill_super()
4608 err = adjust_reserved_segment(sbi); in f2fs_fill_super()
4613 sbi->sectors_written_start = f2fs_get_sectors_written(sbi); in f2fs_fill_super()
4616 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in f2fs_fill_super()
4617 if (__exist_node_summaries(sbi)) in f2fs_fill_super()
4618 sbi->kbytes_written = in f2fs_fill_super()
4621 f2fs_build_gc_manager(sbi); in f2fs_fill_super()
4623 err = f2fs_build_stats(sbi); in f2fs_fill_super()
4628 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); in f2fs_fill_super()
4629 if (IS_ERR(sbi->node_inode)) { in f2fs_fill_super()
4630 f2fs_err(sbi, "Failed to read node inode"); in f2fs_fill_super()
4631 err = PTR_ERR(sbi->node_inode); in f2fs_fill_super()
4636 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); in f2fs_fill_super()
4638 f2fs_err(sbi, "Failed to read root inode"); in f2fs_fill_super()
4655 err = f2fs_init_compress_inode(sbi); in f2fs_fill_super()
4659 err = f2fs_register_sysfs(sbi); in f2fs_fill_super()
4665 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4668 f2fs_err(sbi, "Cannot turn on quotas: error %d", err); in f2fs_fill_super()
4671 quota_enabled = f2fs_recover_quota_begin(sbi); in f2fs_fill_super()
4674 err = f2fs_recover_orphan_inodes(sbi); in f2fs_fill_super()
4678 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) in f2fs_fill_super()
4682 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && in f2fs_fill_super()
4683 !test_opt(sbi, NORECOVERY)) { in f2fs_fill_super()
4688 if (f2fs_hw_is_readonly(sbi)) { in f2fs_fill_super()
4689 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in f2fs_fill_super()
4690 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4693 f2fs_err(sbi, "Need to recover fsync data, but " in f2fs_fill_super()
4700 f2fs_info(sbi, "write access unavailable, skipping recovery"); in f2fs_fill_super()
4705 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
4710 err = f2fs_recover_fsync_data(sbi, false); in f2fs_fill_super()
4715 f2fs_err(sbi, "Cannot recover all fsync data errno=%d", in f2fs_fill_super()
4720 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
4724 f2fs_err(sbi, "Need to recover fsync data"); in f2fs_fill_super()
4730 f2fs_recover_quota_end(sbi, quota_enabled); in f2fs_fill_super()
4737 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) { in f2fs_fill_super()
4738 err = f2fs_check_write_pointer(sbi); in f2fs_fill_super()
4744 f2fs_init_inmem_curseg(sbi); in f2fs_fill_super()
4747 clear_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
4749 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_fill_super()
4750 err = f2fs_disable_checkpoint(sbi); in f2fs_fill_super()
4753 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { in f2fs_fill_super()
4754 f2fs_enable_checkpoint(sbi); in f2fs_fill_super()
4761 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || in f2fs_fill_super()
4762 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { in f2fs_fill_super()
4764 err = f2fs_start_gc_thread(sbi); in f2fs_fill_super()
4772 err = f2fs_commit_super(sbi, true); in f2fs_fill_super()
4773 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", in f2fs_fill_super()
4774 sbi->valid_super_block ? 1 : 2, err); in f2fs_fill_super()
4777 f2fs_join_shrinker(sbi); in f2fs_fill_super()
4779 f2fs_tuning_parameters(sbi); in f2fs_fill_super()
4781 f2fs_notice(sbi, "Mounted with checkpoint version = %llx", in f2fs_fill_super()
4782 cur_cp_version(F2FS_CKPT(sbi))); in f2fs_fill_super()
4783 f2fs_update_time(sbi, CP_TIME); in f2fs_fill_super()
4784 f2fs_update_time(sbi, REQ_TIME); in f2fs_fill_super()
4785 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
4790 sync_filesystem(sbi->sb); in f2fs_fill_super()
4796 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) in f2fs_fill_super()
4797 f2fs_quota_off_umount(sbi->sb); in f2fs_fill_super()
4805 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_fill_super()
4808 f2fs_unregister_sysfs(sbi); in f2fs_fill_super()
4810 f2fs_destroy_compress_inode(sbi); in f2fs_fill_super()
4815 f2fs_release_ino_entry(sbi, true); in f2fs_fill_super()
4816 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_fill_super()
4817 iput(sbi->node_inode); in f2fs_fill_super()
4818 sbi->node_inode = NULL; in f2fs_fill_super()
4820 f2fs_destroy_stats(sbi); in f2fs_fill_super()
4823 f2fs_stop_discard_thread(sbi); in f2fs_fill_super()
4824 f2fs_destroy_node_manager(sbi); in f2fs_fill_super()
4826 f2fs_destroy_segment_manager(sbi); in f2fs_fill_super()
4828 f2fs_stop_ckpt_thread(sbi); in f2fs_fill_super()
4829 /* flush s_error_work before sbi destroy */ in f2fs_fill_super()
4830 flush_work(&sbi->s_error_work); in f2fs_fill_super()
4831 f2fs_destroy_post_read_wq(sbi); in f2fs_fill_super()
4833 destroy_device_list(sbi); in f2fs_fill_super()
4834 kvfree(sbi->ckpt); in f2fs_fill_super()
4836 make_bad_inode(sbi->meta_inode); in f2fs_fill_super()
4837 iput(sbi->meta_inode); in f2fs_fill_super()
4838 sbi->meta_inode = NULL; in f2fs_fill_super()
4840 f2fs_destroy_page_array_cache(sbi); in f2fs_fill_super()
4842 f2fs_destroy_xattr_caches(sbi); in f2fs_fill_super()
4844 mempool_destroy(sbi->write_io_dummy); in f2fs_fill_super()
4846 destroy_percpu_info(sbi); in f2fs_fill_super()
4848 f2fs_destroy_iostat(sbi); in f2fs_fill_super()
4851 kvfree(sbi->write_io[i]); in f2fs_fill_super()
4860 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_fill_super()
4862 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_fill_super()
4867 if (sbi->s_chksum_driver) in f2fs_fill_super()
4868 crypto_free_shash(sbi->s_chksum_driver); in f2fs_fill_super()
4869 kfree(sbi); in f2fs_fill_super()
4889 struct f2fs_sb_info *sbi = F2FS_SB(sb); in kill_f2fs_super() local
4891 set_sbi_flag(sbi, SBI_IS_CLOSE); in kill_f2fs_super()
4892 f2fs_stop_gc_thread(sbi); in kill_f2fs_super()
4893 f2fs_stop_discard_thread(sbi); in kill_f2fs_super()
4900 if (test_opt(sbi, COMPRESS_CACHE)) in kill_f2fs_super()
4901 truncate_inode_pages_final(COMPRESS_MAPPING(sbi)); in kill_f2fs_super()
4904 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in kill_f2fs_super()
4905 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in kill_f2fs_super()
4909 stat_inc_cp_call_count(sbi, TOTAL_CALL); in kill_f2fs_super()
4910 f2fs_write_checkpoint(sbi, &cpc); in kill_f2fs_super()
4913 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb)) in kill_f2fs_super()