• Home
  • Raw
  • Download

Lines Matching full:sbi

74 void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,  in f2fs_build_fault_attr()  argument
77 struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info; in f2fs_build_fault_attr()
238 void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) in f2fs_printk() argument
250 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf); in f2fs_printk()
285 static inline void limit_reserve_root(struct f2fs_sb_info *sbi) in limit_reserve_root() argument
287 block_t limit = min((sbi->user_block_count >> 3), in limit_reserve_root()
288 sbi->user_block_count - sbi->reserved_blocks); in limit_reserve_root()
291 if (test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
292 F2FS_OPTION(sbi).root_reserved_blocks > limit) { in limit_reserve_root()
293 F2FS_OPTION(sbi).root_reserved_blocks = limit; in limit_reserve_root()
294 f2fs_info(sbi, "Reduce reserved blocks for root = %u", in limit_reserve_root()
295 F2FS_OPTION(sbi).root_reserved_blocks); in limit_reserve_root()
297 if (!test_opt(sbi, RESERVE_ROOT) && in limit_reserve_root()
298 (!uid_eq(F2FS_OPTION(sbi).s_resuid, in limit_reserve_root()
300 !gid_eq(F2FS_OPTION(sbi).s_resgid, in limit_reserve_root()
302 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", in limit_reserve_root()
304 F2FS_OPTION(sbi).s_resuid), in limit_reserve_root()
306 F2FS_OPTION(sbi).s_resgid)); in limit_reserve_root()
309 static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi) in adjust_reserved_segment() argument
311 unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec; in adjust_reserved_segment()
316 if (!F2FS_IO_ALIGNED(sbi)) in adjust_reserved_segment()
320 avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi); in adjust_reserved_segment()
325 wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) * in adjust_reserved_segment()
326 reserved_segments(sbi); in adjust_reserved_segment()
327 wanted_reserved_segments -= reserved_segments(sbi); in adjust_reserved_segment()
329 avail_user_block_count = sbi->user_block_count - in adjust_reserved_segment()
330 sbi->current_reserved_blocks - in adjust_reserved_segment()
331 F2FS_OPTION(sbi).root_reserved_blocks; in adjust_reserved_segment()
333 if (wanted_reserved_segments * sbi->blocks_per_seg > in adjust_reserved_segment()
335 …f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u… in adjust_reserved_segment()
337 avail_user_block_count >> sbi->log_blocks_per_seg); in adjust_reserved_segment()
341 SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments; in adjust_reserved_segment()
343 f2fs_info(sbi, "IO align feature needs additional reserved segment: %u", in adjust_reserved_segment()
349 static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi) in adjust_unusable_cap_perc() argument
351 if (!F2FS_OPTION(sbi).unusable_cap_perc) in adjust_unusable_cap_perc()
354 if (F2FS_OPTION(sbi).unusable_cap_perc == 100) in adjust_unusable_cap_perc()
355 F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count; in adjust_unusable_cap_perc()
357 F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) * in adjust_unusable_cap_perc()
358 F2FS_OPTION(sbi).unusable_cap_perc; in adjust_unusable_cap_perc()
360 f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%", in adjust_unusable_cap_perc()
361 F2FS_OPTION(sbi).unusable_cap, in adjust_unusable_cap_perc()
362 F2FS_OPTION(sbi).unusable_cap_perc); in adjust_unusable_cap_perc()
378 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_qf_name() local
382 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
383 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_set_qf_name()
386 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_set_qf_name()
387 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name"); in f2fs_set_qf_name()
393 f2fs_err(sbi, "Not enough memory for storing quotafile name"); in f2fs_set_qf_name()
396 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_set_qf_name()
397 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) in f2fs_set_qf_name()
400 f2fs_err(sbi, "%s quota file already specified", in f2fs_set_qf_name()
405 f2fs_err(sbi, "quotafile must be on filesystem root"); in f2fs_set_qf_name()
408 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; in f2fs_set_qf_name()
409 set_opt(sbi, QUOTA); in f2fs_set_qf_name()
418 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_clear_qf_name() local
420 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { in f2fs_clear_qf_name()
421 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); in f2fs_clear_qf_name()
424 kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); in f2fs_clear_qf_name()
425 F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; in f2fs_clear_qf_name()
429 static int f2fs_check_quota_options(struct f2fs_sb_info *sbi) in f2fs_check_quota_options() argument
436 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { in f2fs_check_quota_options()
437 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement."); in f2fs_check_quota_options()
440 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || in f2fs_check_quota_options()
441 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] || in f2fs_check_quota_options()
442 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) { in f2fs_check_quota_options()
443 if (test_opt(sbi, USRQUOTA) && in f2fs_check_quota_options()
444 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_check_quota_options()
445 clear_opt(sbi, USRQUOTA); in f2fs_check_quota_options()
447 if (test_opt(sbi, GRPQUOTA) && in f2fs_check_quota_options()
448 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_check_quota_options()
449 clear_opt(sbi, GRPQUOTA); in f2fs_check_quota_options()
451 if (test_opt(sbi, PRJQUOTA) && in f2fs_check_quota_options()
452 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_check_quota_options()
453 clear_opt(sbi, PRJQUOTA); in f2fs_check_quota_options()
455 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || in f2fs_check_quota_options()
456 test_opt(sbi, PRJQUOTA)) { in f2fs_check_quota_options()
457 f2fs_err(sbi, "old and new quota format mixing"); in f2fs_check_quota_options()
461 if (!F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
462 f2fs_err(sbi, "journaled quota format not specified"); in f2fs_check_quota_options()
467 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_check_quota_options()
468 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt"); in f2fs_check_quota_options()
469 F2FS_OPTION(sbi).s_jquota_fmt = 0; in f2fs_check_quota_options()
480 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_set_test_dummy_encryption() local
484 if (!f2fs_sb_has_encrypt(sbi)) { in f2fs_set_test_dummy_encryption()
485 f2fs_err(sbi, "Encrypt feature is off"); in f2fs_set_test_dummy_encryption()
495 if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) { in f2fs_set_test_dummy_encryption()
496 f2fs_warn(sbi, "Can't set test_dummy_encryption on remount"); in f2fs_set_test_dummy_encryption()
500 sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_set_test_dummy_encryption()
503 f2fs_warn(sbi, in f2fs_set_test_dummy_encryption()
506 f2fs_warn(sbi, "Value of option \"%s\" is unrecognized", in f2fs_set_test_dummy_encryption()
509 f2fs_warn(sbi, "Error processing option \"%s\" [%d]", in f2fs_set_test_dummy_encryption()
513 f2fs_warn(sbi, "Test dummy encryption mode enabled"); in f2fs_set_test_dummy_encryption()
515 f2fs_warn(sbi, "Test dummy encryption mount option ignored"); in f2fs_set_test_dummy_encryption()
522 struct f2fs_sb_info *sbi = F2FS_SB(sb); in parse_options() local
555 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in parse_options()
557 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF; in parse_options()
559 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; in parse_options()
567 set_opt(sbi, DISABLE_ROLL_FORWARD); in parse_options()
571 set_opt(sbi, NORECOVERY); in parse_options()
576 set_opt(sbi, DISCARD); in parse_options()
579 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
580 f2fs_warn(sbi, "discard is required for zoned block devices"); in parse_options()
583 clear_opt(sbi, DISCARD); in parse_options()
586 set_opt(sbi, NOHEAP); in parse_options()
589 clear_opt(sbi, NOHEAP); in parse_options()
593 set_opt(sbi, XATTR_USER); in parse_options()
596 clear_opt(sbi, XATTR_USER); in parse_options()
599 set_opt(sbi, INLINE_XATTR); in parse_options()
602 clear_opt(sbi, INLINE_XATTR); in parse_options()
607 set_opt(sbi, INLINE_XATTR_SIZE); in parse_options()
608 F2FS_OPTION(sbi).inline_xattr_size = arg; in parse_options()
612 f2fs_info(sbi, "user_xattr options not supported"); in parse_options()
615 f2fs_info(sbi, "nouser_xattr options not supported"); in parse_options()
618 f2fs_info(sbi, "inline_xattr options not supported"); in parse_options()
621 f2fs_info(sbi, "noinline_xattr options not supported"); in parse_options()
626 set_opt(sbi, POSIX_ACL); in parse_options()
629 clear_opt(sbi, POSIX_ACL); in parse_options()
633 f2fs_info(sbi, "acl options not supported"); in parse_options()
636 f2fs_info(sbi, "noacl options not supported"); in parse_options()
645 F2FS_OPTION(sbi).active_logs = arg; in parse_options()
648 set_opt(sbi, DISABLE_EXT_IDENTIFY); in parse_options()
651 set_opt(sbi, INLINE_DATA); in parse_options()
654 set_opt(sbi, INLINE_DENTRY); in parse_options()
657 clear_opt(sbi, INLINE_DENTRY); in parse_options()
660 set_opt(sbi, FLUSH_MERGE); in parse_options()
663 clear_opt(sbi, FLUSH_MERGE); in parse_options()
666 set_opt(sbi, NOBARRIER); in parse_options()
669 set_opt(sbi, FASTBOOT); in parse_options()
672 set_opt(sbi, EXTENT_CACHE); in parse_options()
675 clear_opt(sbi, EXTENT_CACHE); in parse_options()
678 clear_opt(sbi, INLINE_DATA); in parse_options()
681 set_opt(sbi, DATA_FLUSH); in parse_options()
686 if (test_opt(sbi, RESERVE_ROOT)) { in parse_options()
687 f2fs_info(sbi, "Preserve previous reserve_root=%u", in parse_options()
688 F2FS_OPTION(sbi).root_reserved_blocks); in parse_options()
690 F2FS_OPTION(sbi).root_reserved_blocks = arg; in parse_options()
691 set_opt(sbi, RESERVE_ROOT); in parse_options()
699 f2fs_err(sbi, "Invalid uid value %d", arg); in parse_options()
702 F2FS_OPTION(sbi).s_resuid = uid; in parse_options()
709 f2fs_err(sbi, "Invalid gid value %d", arg); in parse_options()
712 F2FS_OPTION(sbi).s_resgid = gid; in parse_options()
720 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
721 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); in parse_options()
725 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in parse_options()
727 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in parse_options()
738 f2fs_warn(sbi, "Not support %d, larger than %d", in parse_options()
742 F2FS_OPTION(sbi).write_io_size_bits = arg; in parse_options()
748 f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE); in parse_options()
749 set_opt(sbi, FAULT_INJECTION); in parse_options()
755 f2fs_build_fault_attr(sbi, 0, arg); in parse_options()
756 set_opt(sbi, FAULT_INJECTION); in parse_options()
760 f2fs_info(sbi, "fault_injection options not supported"); in parse_options()
764 f2fs_info(sbi, "fault_type options not supported"); in parse_options()
776 set_opt(sbi, USRQUOTA); in parse_options()
779 set_opt(sbi, GRPQUOTA); in parse_options()
782 set_opt(sbi, PRJQUOTA); in parse_options()
815 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD; in parse_options()
818 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0; in parse_options()
821 F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1; in parse_options()
824 clear_opt(sbi, QUOTA); in parse_options()
825 clear_opt(sbi, USRQUOTA); in parse_options()
826 clear_opt(sbi, GRPQUOTA); in parse_options()
827 clear_opt(sbi, PRJQUOTA); in parse_options()
844 f2fs_info(sbi, "quota operations not supported"); in parse_options()
852 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER; in parse_options()
854 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; in parse_options()
856 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; in parse_options()
869 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in parse_options()
871 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in parse_options()
883 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in parse_options()
885 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT; in parse_options()
887 F2FS_OPTION(sbi).fsync_mode = in parse_options()
905 f2fs_info(sbi, "inline encryption not supported"); in parse_options()
913 F2FS_OPTION(sbi).unusable_cap_perc = arg; in parse_options()
914 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
919 F2FS_OPTION(sbi).unusable_cap = arg; in parse_options()
920 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
923 set_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
926 clear_opt(sbi, DISABLE_CHECKPOINT); in parse_options()
930 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
931 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
938 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
941 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
944 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
947 F2FS_OPTION(sbi).compress_algorithm = in parse_options()
956 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
957 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
964 f2fs_err(sbi, in parse_options()
968 F2FS_OPTION(sbi).compress_log_size = arg; in parse_options()
971 if (!f2fs_sb_has_compression(sbi)) { in parse_options()
972 f2fs_info(sbi, "Image doesn't support compression"); in parse_options()
979 ext = F2FS_OPTION(sbi).extensions; in parse_options()
980 ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt; in parse_options()
984 f2fs_err(sbi, in parse_options()
991 F2FS_OPTION(sbi).compress_ext_cnt++; in parse_options()
998 f2fs_info(sbi, "compression options not supported"); in parse_options()
1002 set_opt(sbi, ATGC); in parse_options()
1005 set_opt(sbi, GC_MERGE); in parse_options()
1008 clear_opt(sbi, GC_MERGE); in parse_options()
1011 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", in parse_options()
1017 if (f2fs_check_quota_options(sbi)) in parse_options()
1020 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1021 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1024 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { in parse_options()
1025 …f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA"); in parse_options()
1030 if (f2fs_sb_has_casefold(sbi)) { in parse_options()
1031 f2fs_err(sbi, in parse_options()
1042 if (f2fs_sb_has_blkzoned(sbi)) { in parse_options()
1043 f2fs_err(sbi, "Zoned block device support is not enabled"); in parse_options()
1048 if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) { in parse_options()
1049 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO", in parse_options()
1050 F2FS_IO_SIZE_KB(sbi)); in parse_options()
1054 if (test_opt(sbi, INLINE_XATTR_SIZE)) { in parse_options()
1057 if (!f2fs_sb_has_extra_attr(sbi) || in parse_options()
1058 !f2fs_sb_has_flexible_inline_xattr(sbi)) { in parse_options()
1059 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off"); in parse_options()
1062 if (!test_opt(sbi, INLINE_XATTR)) { in parse_options()
1063 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option"); in parse_options()
1070 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || in parse_options()
1071 F2FS_OPTION(sbi).inline_xattr_size > max_size) { in parse_options()
1072 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d", in parse_options()
1078 if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) { in parse_options()
1079 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n"); in parse_options()
1086 if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE) in parse_options()
1087 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; in parse_options()
1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_drop_inode() local
1133 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { in f2fs_drop_inode()
1134 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_drop_inode()
1135 inode->i_ino == F2FS_META_INO(sbi)) { in f2fs_drop_inode()
1188 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_dirtied() local
1191 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1196 stat_inc_dirty_inode(sbi, DIRTY_META); in f2fs_inode_dirtied()
1200 &sbi->inode_list[DIRTY_META]); in f2fs_inode_dirtied()
1201 inc_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_dirtied()
1203 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_dirtied()
1209 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_inode_synced() local
1211 spin_lock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1213 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1218 dec_page_count(sbi, F2FS_DIRTY_IMETA); in f2fs_inode_synced()
1223 spin_unlock(&sbi->inode_lock[DIRTY_META]); in f2fs_inode_synced()
1233 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_dirty_inode() local
1235 if (inode->i_ino == F2FS_NODE_INO(sbi) || in f2fs_dirty_inode()
1236 inode->i_ino == F2FS_META_INO(sbi)) in f2fs_dirty_inode()
1254 static void destroy_percpu_info(struct f2fs_sb_info *sbi) in destroy_percpu_info() argument
1256 percpu_counter_destroy(&sbi->alloc_valid_block_count); in destroy_percpu_info()
1257 percpu_counter_destroy(&sbi->total_valid_inode_count); in destroy_percpu_info()
1260 static void destroy_device_list(struct f2fs_sb_info *sbi) in destroy_device_list() argument
1264 for (i = 0; i < sbi->s_ndevs; i++) { in destroy_device_list()
1270 kvfree(sbi->devs); in destroy_device_list()
1275 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_put_super() local
1280 f2fs_unregister_sysfs(sbi); in f2fs_put_super()
1285 mutex_lock(&sbi->umount_mutex); in f2fs_put_super()
1292 if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in f2fs_put_super()
1293 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) { in f2fs_put_super()
1297 f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1301 dropped = f2fs_issue_discard_timeout(sbi); in f2fs_put_super()
1303 if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) && in f2fs_put_super()
1304 !sbi->discard_blks && !dropped) { in f2fs_put_super()
1308 f2fs_write_checkpoint(sbi, &cpc); in f2fs_put_super()
1315 f2fs_release_ino_entry(sbi, true); in f2fs_put_super()
1317 f2fs_leave_shrinker(sbi); in f2fs_put_super()
1318 mutex_unlock(&sbi->umount_mutex); in f2fs_put_super()
1321 f2fs_flush_merged_writes(sbi); in f2fs_put_super()
1323 f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA); in f2fs_put_super()
1325 f2fs_bug_on(sbi, sbi->fsync_node_num); in f2fs_put_super()
1327 iput(sbi->node_inode); in f2fs_put_super()
1328 sbi->node_inode = NULL; in f2fs_put_super()
1330 iput(sbi->meta_inode); in f2fs_put_super()
1331 sbi->meta_inode = NULL; in f2fs_put_super()
1337 f2fs_destroy_stats(sbi); in f2fs_put_super()
1340 f2fs_destroy_node_manager(sbi); in f2fs_put_super()
1341 f2fs_destroy_segment_manager(sbi); in f2fs_put_super()
1343 f2fs_destroy_post_read_wq(sbi); in f2fs_put_super()
1345 kvfree(sbi->ckpt); in f2fs_put_super()
1348 if (sbi->s_chksum_driver) in f2fs_put_super()
1349 crypto_free_shash(sbi->s_chksum_driver); in f2fs_put_super()
1350 kfree(sbi->raw_super); in f2fs_put_super()
1352 destroy_device_list(sbi); in f2fs_put_super()
1353 f2fs_destroy_page_array_cache(sbi); in f2fs_put_super()
1354 f2fs_destroy_xattr_caches(sbi); in f2fs_put_super()
1355 mempool_destroy(sbi->write_io_dummy); in f2fs_put_super()
1358 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_put_super()
1360 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_put_super()
1361 destroy_percpu_info(sbi); in f2fs_put_super()
1363 kvfree(sbi->write_io[i]); in f2fs_put_super()
1367 kfree(sbi); in f2fs_put_super()
1372 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_sync_fs() local
1375 if (unlikely(f2fs_cp_error(sbi))) in f2fs_sync_fs()
1377 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) in f2fs_sync_fs()
1382 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) in f2fs_sync_fs()
1388 cpc.reason = __get_cp_reason(sbi); in f2fs_sync_fs()
1390 down_write(&sbi->gc_lock); in f2fs_sync_fs()
1391 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_sync_fs()
1392 up_write(&sbi->gc_lock); in f2fs_sync_fs()
1467 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_statfs() local
1472 total_count = le64_to_cpu(sbi->raw_super->block_count); in f2fs_statfs()
1473 user_block_count = sbi->user_block_count; in f2fs_statfs()
1474 start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr); in f2fs_statfs()
1476 buf->f_bsize = sbi->blocksize; in f2fs_statfs()
1479 buf->f_bfree = user_block_count - valid_user_blocks(sbi) - in f2fs_statfs()
1480 sbi->current_reserved_blocks; in f2fs_statfs()
1482 spin_lock(&sbi->stat_lock); in f2fs_statfs()
1483 if (unlikely(buf->f_bfree <= sbi->unusable_block_count)) in f2fs_statfs()
1486 buf->f_bfree -= sbi->unusable_block_count; in f2fs_statfs()
1487 spin_unlock(&sbi->stat_lock); in f2fs_statfs()
1489 if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks) in f2fs_statfs()
1491 F2FS_OPTION(sbi).root_reserved_blocks; in f2fs_statfs()
1495 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_statfs()
1502 buf->f_ffree = min(avail_node_count - valid_node_count(sbi), in f2fs_statfs()
1522 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_quota_options() local
1524 if (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1527 switch (F2FS_OPTION(sbi).s_jquota_fmt) { in f2fs_show_quota_options()
1541 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA]) in f2fs_show_quota_options()
1543 F2FS_OPTION(sbi).s_qf_names[USRQUOTA]); in f2fs_show_quota_options()
1545 if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]) in f2fs_show_quota_options()
1547 F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]); in f2fs_show_quota_options()
1549 if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) in f2fs_show_quota_options()
1551 F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]); in f2fs_show_quota_options()
1558 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_show_compress_options() local
1562 if (!f2fs_sb_has_compression(sbi)) in f2fs_show_compress_options()
1565 switch (F2FS_OPTION(sbi).compress_algorithm) { in f2fs_show_compress_options()
1582 F2FS_OPTION(sbi).compress_log_size); in f2fs_show_compress_options()
1584 for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) { in f2fs_show_compress_options()
1586 F2FS_OPTION(sbi).extensions[i]); in f2fs_show_compress_options()
1592 struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb); in f2fs_show_options() local
1594 if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC) in f2fs_show_options()
1596 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON) in f2fs_show_options()
1598 else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF) in f2fs_show_options()
1601 if (test_opt(sbi, GC_MERGE)) in f2fs_show_options()
1604 if (test_opt(sbi, DISABLE_ROLL_FORWARD)) in f2fs_show_options()
1606 if (test_opt(sbi, NORECOVERY)) in f2fs_show_options()
1608 if (test_opt(sbi, DISCARD)) in f2fs_show_options()
1612 if (test_opt(sbi, NOHEAP)) in f2fs_show_options()
1617 if (test_opt(sbi, XATTR_USER)) in f2fs_show_options()
1621 if (test_opt(sbi, INLINE_XATTR)) in f2fs_show_options()
1625 if (test_opt(sbi, INLINE_XATTR_SIZE)) in f2fs_show_options()
1627 F2FS_OPTION(sbi).inline_xattr_size); in f2fs_show_options()
1630 if (test_opt(sbi, POSIX_ACL)) in f2fs_show_options()
1635 if (test_opt(sbi, DISABLE_EXT_IDENTIFY)) in f2fs_show_options()
1637 if (test_opt(sbi, INLINE_DATA)) in f2fs_show_options()
1641 if (test_opt(sbi, INLINE_DENTRY)) in f2fs_show_options()
1645 if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE)) in f2fs_show_options()
1647 if (test_opt(sbi, NOBARRIER)) in f2fs_show_options()
1649 if (test_opt(sbi, FASTBOOT)) in f2fs_show_options()
1651 if (test_opt(sbi, EXTENT_CACHE)) in f2fs_show_options()
1655 if (test_opt(sbi, DATA_FLUSH)) in f2fs_show_options()
1659 if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE) in f2fs_show_options()
1661 else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS) in f2fs_show_options()
1663 seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs); in f2fs_show_options()
1664 if (test_opt(sbi, RESERVE_ROOT)) in f2fs_show_options()
1666 F2FS_OPTION(sbi).root_reserved_blocks, in f2fs_show_options()
1668 F2FS_OPTION(sbi).s_resuid), in f2fs_show_options()
1670 F2FS_OPTION(sbi).s_resgid)); in f2fs_show_options()
1671 if (F2FS_IO_SIZE_BITS(sbi)) in f2fs_show_options()
1673 F2FS_OPTION(sbi).write_io_size_bits); in f2fs_show_options()
1675 if (test_opt(sbi, FAULT_INJECTION)) { in f2fs_show_options()
1677 F2FS_OPTION(sbi).fault_info.inject_rate); in f2fs_show_options()
1679 F2FS_OPTION(sbi).fault_info.inject_type); in f2fs_show_options()
1683 if (test_opt(sbi, QUOTA)) in f2fs_show_options()
1685 if (test_opt(sbi, USRQUOTA)) in f2fs_show_options()
1687 if (test_opt(sbi, GRPQUOTA)) in f2fs_show_options()
1689 if (test_opt(sbi, PRJQUOTA)) in f2fs_show_options()
1692 f2fs_show_quota_options(seq, sbi->sb); in f2fs_show_options()
1693 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) in f2fs_show_options()
1695 else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) in f2fs_show_options()
1698 fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); in f2fs_show_options()
1700 if (sbi->sb->s_flags & SB_INLINECRYPT) in f2fs_show_options()
1703 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) in f2fs_show_options()
1705 else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) in f2fs_show_options()
1708 if (test_opt(sbi, DISABLE_CHECKPOINT)) in f2fs_show_options()
1710 F2FS_OPTION(sbi).unusable_cap); in f2fs_show_options()
1711 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) in f2fs_show_options()
1713 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) in f2fs_show_options()
1715 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) in f2fs_show_options()
1719 f2fs_show_compress_options(seq, sbi->sb); in f2fs_show_options()
1722 if (test_opt(sbi, ATGC)) in f2fs_show_options()
1727 static void default_options(struct f2fs_sb_info *sbi) in default_options() argument
1730 F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE; in default_options()
1731 F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS; in default_options()
1732 F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF; in default_options()
1733 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT; in default_options()
1734 F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX; in default_options()
1735 F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID); in default_options()
1736 F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID); in default_options()
1737 F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4; in default_options()
1738 F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE; in default_options()
1739 F2FS_OPTION(sbi).compress_ext_cnt = 0; in default_options()
1740 F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; in default_options()
1742 sbi->sb->s_flags &= ~SB_INLINECRYPT; in default_options()
1744 set_opt(sbi, INLINE_XATTR); in default_options()
1745 set_opt(sbi, INLINE_DATA); in default_options()
1746 set_opt(sbi, INLINE_DENTRY); in default_options()
1747 set_opt(sbi, EXTENT_CACHE); in default_options()
1748 set_opt(sbi, NOHEAP); in default_options()
1749 clear_opt(sbi, DISABLE_CHECKPOINT); in default_options()
1750 F2FS_OPTION(sbi).unusable_cap = 0; in default_options()
1751 sbi->sb->s_flags |= SB_LAZYTIME; in default_options()
1752 set_opt(sbi, FLUSH_MERGE); in default_options()
1753 set_opt(sbi, DISCARD); in default_options()
1754 if (f2fs_sb_has_blkzoned(sbi)) in default_options()
1755 F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; in default_options()
1757 F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; in default_options()
1760 set_opt(sbi, XATTR_USER); in default_options()
1763 set_opt(sbi, POSIX_ACL); in default_options()
1766 f2fs_build_fault_attr(sbi, 0, 0); in default_options()
1773 static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_disable_checkpoint() argument
1775 unsigned int s_flags = sbi->sb->s_flags; in f2fs_disable_checkpoint()
1782 f2fs_err(sbi, "checkpoint=disable on readonly fs"); in f2fs_disable_checkpoint()
1785 sbi->sb->s_flags |= SB_ACTIVE; in f2fs_disable_checkpoint()
1787 f2fs_update_time(sbi, DISABLE_TIME); in f2fs_disable_checkpoint()
1789 while (!f2fs_time_over(sbi, DISABLE_TIME)) { in f2fs_disable_checkpoint()
1790 down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
1791 err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); in f2fs_disable_checkpoint()
1800 ret = sync_filesystem(sbi->sb); in f2fs_disable_checkpoint()
1806 unusable = f2fs_get_unusable_blocks(sbi); in f2fs_disable_checkpoint()
1807 if (f2fs_disable_cp_again(sbi, unusable)) { in f2fs_disable_checkpoint()
1812 down_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
1814 set_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_disable_checkpoint()
1815 err = f2fs_write_checkpoint(sbi, &cpc); in f2fs_disable_checkpoint()
1819 spin_lock(&sbi->stat_lock); in f2fs_disable_checkpoint()
1820 sbi->unusable_block_count = unusable; in f2fs_disable_checkpoint()
1821 spin_unlock(&sbi->stat_lock); in f2fs_disable_checkpoint()
1824 up_write(&sbi->gc_lock); in f2fs_disable_checkpoint()
1826 sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ in f2fs_disable_checkpoint()
1830 static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) in f2fs_enable_checkpoint() argument
1836 sync_inodes_sb(sbi->sb); in f2fs_enable_checkpoint()
1839 } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--); in f2fs_enable_checkpoint()
1842 f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); in f2fs_enable_checkpoint()
1844 down_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
1845 f2fs_dirty_to_prefree(sbi); in f2fs_enable_checkpoint()
1847 clear_sbi_flag(sbi, SBI_CP_DISABLED); in f2fs_enable_checkpoint()
1848 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_enable_checkpoint()
1849 up_write(&sbi->gc_lock); in f2fs_enable_checkpoint()
1851 f2fs_sync_fs(sbi->sb, 1); in f2fs_enable_checkpoint()
1856 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_remount() local
1862 bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE); in f2fs_remount()
1863 bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT); in f2fs_remount()
1864 bool no_io_align = !F2FS_IO_ALIGNED(sbi); in f2fs_remount()
1865 bool no_atgc = !test_opt(sbi, ATGC); in f2fs_remount()
1875 org_mount_opt = sbi->mount_opt; in f2fs_remount()
1879 org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt; in f2fs_remount()
1881 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_remount()
1883 kstrdup(F2FS_OPTION(sbi).s_qf_names[i], in f2fs_remount()
1897 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { in f2fs_remount()
1898 err = f2fs_commit_super(sbi, false); in f2fs_remount()
1899 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d", in f2fs_remount()
1902 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_remount()
1905 default_options(sbi); in f2fs_remount()
1912 disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT); in f2fs_remount()
1931 } else if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_remount()
1939 if (no_atgc == !!test_opt(sbi, ATGC)) { in f2fs_remount()
1941 f2fs_warn(sbi, "switch atgc option is not allowed"); in f2fs_remount()
1946 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { in f2fs_remount()
1948 f2fs_warn(sbi, "switch extent_cache option is not allowed"); in f2fs_remount()
1952 if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) { in f2fs_remount()
1954 f2fs_warn(sbi, "switch io_bits option is not allowed"); in f2fs_remount()
1958 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
1960 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only"); in f2fs_remount()
1970 (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF && in f2fs_remount()
1971 !test_opt(sbi, GC_MERGE))) { in f2fs_remount()
1972 if (sbi->gc_thread) { in f2fs_remount()
1973 f2fs_stop_gc_thread(sbi); in f2fs_remount()
1976 } else if (!sbi->gc_thread) { in f2fs_remount()
1977 err = f2fs_start_gc_thread(sbi); in f2fs_remount()
1984 F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) { in f2fs_remount()
1988 set_sbi_flag(sbi, SBI_IS_DIRTY); in f2fs_remount()
1989 set_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
1991 clear_sbi_flag(sbi, SBI_IS_CLOSE); in f2fs_remount()
1995 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_remount()
1996 err = f2fs_disable_checkpoint(sbi); in f2fs_remount()
2000 f2fs_enable_checkpoint(sbi); in f2fs_remount()
2008 if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) { in f2fs_remount()
2009 clear_opt(sbi, FLUSH_MERGE); in f2fs_remount()
2010 f2fs_destroy_flush_cmd_control(sbi, false); in f2fs_remount()
2012 err = f2fs_create_flush_cmd_control(sbi); in f2fs_remount()
2024 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_remount()
2026 limit_reserve_root(sbi); in f2fs_remount()
2027 adjust_unusable_cap_perc(sbi); in f2fs_remount()
2032 if (f2fs_start_gc_thread(sbi)) in f2fs_remount()
2033 f2fs_warn(sbi, "background gc thread has stopped"); in f2fs_remount()
2035 f2fs_stop_gc_thread(sbi); in f2fs_remount()
2039 F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; in f2fs_remount()
2041 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_remount()
2042 F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; in f2fs_remount()
2045 sbi->mount_opt = org_mount_opt; in f2fs_remount()
2165 static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) in f2fs_quota_on_mount() argument
2167 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { in f2fs_quota_on_mount()
2168 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it"); in f2fs_quota_on_mount()
2172 return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type], in f2fs_quota_on_mount()
2173 F2FS_OPTION(sbi).s_jquota_fmt, type); in f2fs_quota_on_mount()
2176 int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly) in f2fs_enable_quota_files() argument
2181 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { in f2fs_enable_quota_files()
2182 err = f2fs_enable_quotas(sbi->sb); in f2fs_enable_quota_files()
2184 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err); in f2fs_enable_quota_files()
2191 if (F2FS_OPTION(sbi).s_qf_names[i]) { in f2fs_enable_quota_files()
2192 err = f2fs_quota_on_mount(sbi, i); in f2fs_enable_quota_files()
2197 f2fs_err(sbi, "Cannot turn on quotas: %d on %d", in f2fs_enable_quota_files()
2232 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_enable_quotas() local
2236 test_opt(sbi, USRQUOTA), in f2fs_enable_quotas()
2237 test_opt(sbi, GRPQUOTA), in f2fs_enable_quotas()
2238 test_opt(sbi, PRJQUOTA), in f2fs_enable_quotas()
2242 f2fs_err(sbi, "quota file may be corrupted, skip loading it"); in f2fs_enable_quotas()
2255 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.", in f2fs_enable_quotas()
2268 static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type) in f2fs_quota_sync_file() argument
2270 struct quota_info *dqopt = sb_dqopt(sbi->sb); in f2fs_quota_sync_file()
2274 ret = dquot_writeback_dquots(sbi->sb, type); in f2fs_quota_sync_file()
2283 if (is_journalled_quota(sbi)) in f2fs_quota_sync_file()
2291 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_sync_file()
2297 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_sync() local
2314 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
2326 f2fs_lock_op(sbi); in f2fs_quota_sync()
2327 down_read(&sbi->quota_sem); in f2fs_quota_sync()
2329 ret = f2fs_quota_sync_file(sbi, cnt); in f2fs_quota_sync()
2331 up_read(&sbi->quota_sem); in f2fs_quota_sync()
2332 f2fs_unlock_op(sbi); in f2fs_quota_sync()
2334 if (!f2fs_sb_has_quota_ino(sbi)) in f2fs_quota_sync()
2402 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_quota_off() local
2412 if (is_journalled_quota(sbi)) in f2fs_quota_off()
2413 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_quota_off()
2454 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_commit() local
2457 down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); in f2fs_dquot_commit()
2460 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit()
2461 up_read(&sbi->quota_sem); in f2fs_dquot_commit()
2467 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_acquire() local
2470 down_read(&sbi->quota_sem); in f2fs_dquot_acquire()
2473 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_acquire()
2474 up_read(&sbi->quota_sem); in f2fs_dquot_acquire()
2480 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); in f2fs_dquot_release() local
2484 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_release()
2491 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_mark_dquot_dirty() local
2495 if (is_journalled_quota(sbi)) in f2fs_dquot_mark_dquot_dirty()
2496 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); in f2fs_dquot_mark_dquot_dirty()
2503 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_dquot_commit_info() local
2507 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_dquot_commit_info()
2583 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); in f2fs_set_context() local
2591 if (f2fs_sb_has_lost_found(sbi) && in f2fs_set_context()
2592 inode->i_ino == F2FS_ROOT_INO(sbi)) in f2fs_set_context()
2619 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_get_num_devices() local
2621 if (f2fs_is_multi_device(sbi)) in f2fs_get_num_devices()
2622 return sbi->s_ndevs; in f2fs_get_num_devices()
2629 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_get_devices() local
2632 for (i = 0; i < sbi->s_ndevs; i++) in f2fs_get_devices()
2653 struct f2fs_sb_info *sbi = F2FS_SB(sb); in f2fs_nfs_get_inode() local
2656 if (f2fs_check_nid_range(sbi, ino)) in f2fs_nfs_get_inode()
2734 static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, in sanity_check_area_boundary() argument
2739 struct super_block *sb = sbi->sb; in sanity_check_area_boundary()
2759 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)", in sanity_check_area_boundary()
2766 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
2774 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
2782 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
2790 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", in sanity_check_area_boundary()
2797 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
2810 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in sanity_check_area_boundary()
2816 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)", in sanity_check_area_boundary()
2825 static int sanity_check_raw_super(struct f2fs_sb_info *sbi, in sanity_check_raw_super() argument
2836 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)", in sanity_check_raw_super()
2846 f2fs_info(sbi, "Invalid SB checksum offset: %zu", in sanity_check_raw_super()
2851 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { in sanity_check_raw_super()
2852 f2fs_info(sbi, "Invalid SB checksum value: %u", crc); in sanity_check_raw_super()
2859 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB", in sanity_check_raw_super()
2866 f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u", in sanity_check_raw_super()
2874 f2fs_info(sbi, "Invalid log blocks per segment (%u)", in sanity_check_raw_super()
2884 f2fs_info(sbi, "Invalid log sectorsize (%u)", in sanity_check_raw_super()
2891 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)", in sanity_check_raw_super()
2908 f2fs_info(sbi, "Invalid segment count (%u)", segment_count); in sanity_check_raw_super()
2914 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)", in sanity_check_raw_super()
2920 f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)", in sanity_check_raw_super()
2926 f2fs_info(sbi, "Small segment_count (%u < %u * %u)", in sanity_check_raw_super()
2932 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)", in sanity_check_raw_super()
2946 f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)", in sanity_check_raw_super()
2952 !bdev_is_zoned(sbi->sb->s_bdev)) { in sanity_check_raw_super()
2953 f2fs_info(sbi, "Zoned block device path is missing"); in sanity_check_raw_super()
2959 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)", in sanity_check_raw_super()
2967 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)", in sanity_check_raw_super()
2977 f2fs_info(sbi, "Insane cp_payload (%u >= %u)", in sanity_check_raw_super()
2988 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", in sanity_check_raw_super()
2996 if (sanity_check_area_boundary(sbi, bh)) in sanity_check_raw_super()
3002 int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi) in f2fs_sanity_check_ckpt() argument
3005 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_sanity_check_ckpt()
3006 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); in f2fs_sanity_check_ckpt()
3036 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version"); in f2fs_sanity_check_ckpt()
3045 f2fs_err(sbi, "Wrong user_block_count: %u", in f2fs_sanity_check_ckpt()
3052 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u", in f2fs_sanity_check_ckpt()
3058 avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; in f2fs_sanity_check_ckpt()
3060 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u", in f2fs_sanity_check_ckpt()
3066 blocks_per_seg = sbi->blocks_per_seg; in f2fs_sanity_check_ckpt()
3075 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3089 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3100 f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u", in f2fs_sanity_check_ckpt()
3113 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u", in f2fs_sanity_check_ckpt()
3118 cp_pack_start_sum = __start_sum_addr(sbi); in f2fs_sanity_check_ckpt()
3119 cp_payload = __cp_payload(sbi); in f2fs_sanity_check_ckpt()
3123 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u", in f2fs_sanity_check_ckpt()
3130 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, " in f2fs_sanity_check_ckpt()
3143 f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)", in f2fs_sanity_check_ckpt()
3148 if (unlikely(f2fs_cp_error(sbi))) { in f2fs_sanity_check_ckpt()
3149 f2fs_err(sbi, "A bug case: need to run fsck"); in f2fs_sanity_check_ckpt()
3155 static void init_sb_info(struct f2fs_sb_info *sbi) in init_sb_info() argument
3157 struct f2fs_super_block *raw_super = sbi->raw_super; in init_sb_info()
3160 sbi->log_sectors_per_block = in init_sb_info()
3162 sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize); in init_sb_info()
3163 sbi->blocksize = 1 << sbi->log_blocksize; in init_sb_info()
3164 sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); in init_sb_info()
3165 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg; in init_sb_info()
3166 sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec); in init_sb_info()
3167 sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone); in init_sb_info()
3168 sbi->total_sections = le32_to_cpu(raw_super->section_count); in init_sb_info()
3169 sbi->total_node_count = in init_sb_info()
3171 * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK; in init_sb_info()
3172 sbi->root_ino_num = le32_to_cpu(raw_super->root_ino); in init_sb_info()
3173 sbi->node_ino_num = le32_to_cpu(raw_super->node_ino); in init_sb_info()
3174 sbi->meta_ino_num = le32_to_cpu(raw_super->meta_ino); in init_sb_info()
3175 sbi->cur_victim_sec = NULL_SECNO; in init_sb_info()
3176 sbi->next_victim_seg[BG_GC] = NULL_SEGNO; in init_sb_info()
3177 sbi->next_victim_seg[FG_GC] = NULL_SEGNO; in init_sb_info()
3178 sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; in init_sb_info()
3179 sbi->migration_granularity = sbi->segs_per_sec; in init_sb_info()
3181 sbi->dir_level = DEF_DIR_LEVEL; in init_sb_info()
3182 sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; in init_sb_info()
3183 sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3184 sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3185 sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL; in init_sb_info()
3186 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL; in init_sb_info()
3187 sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] = in init_sb_info()
3189 clear_sbi_flag(sbi, SBI_NEED_FSCK); in init_sb_info()
3192 atomic_set(&sbi->nr_pages[i], 0); in init_sb_info()
3195 atomic_set(&sbi->wb_sync_req[i], 0); in init_sb_info()
3197 INIT_LIST_HEAD(&sbi->s_list); in init_sb_info()
3198 mutex_init(&sbi->umount_mutex); in init_sb_info()
3199 init_rwsem(&sbi->io_order_lock); in init_sb_info()
3200 spin_lock_init(&sbi->cp_lock); in init_sb_info()
3202 sbi->dirty_device = 0; in init_sb_info()
3203 spin_lock_init(&sbi->dev_lock); in init_sb_info()
3205 init_rwsem(&sbi->sb_lock); in init_sb_info()
3206 init_rwsem(&sbi->pin_sem); in init_sb_info()
3209 static int init_percpu_info(struct f2fs_sb_info *sbi) in init_percpu_info() argument
3213 err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); in init_percpu_info()
3217 err = percpu_counter_init(&sbi->total_valid_inode_count, 0, in init_percpu_info()
3220 percpu_counter_destroy(&sbi->alloc_valid_block_count); in init_percpu_info()
3228 struct f2fs_sb_info *sbi; member
3243 if (!rz_args->sbi->unusable_blocks_per_sec) { in f2fs_report_zone_cb()
3244 rz_args->sbi->unusable_blocks_per_sec = unusable_blocks; in f2fs_report_zone_cb()
3247 if (rz_args->sbi->unusable_blocks_per_sec != unusable_blocks) { in f2fs_report_zone_cb()
3248 f2fs_err(rz_args->sbi, "F2FS supports single zone capacity\n"); in f2fs_report_zone_cb()
3254 static int init_blkz_info(struct f2fs_sb_info *sbi, int devi) in init_blkz_info() argument
3261 if (!f2fs_sb_has_blkzoned(sbi)) in init_blkz_info()
3264 if (sbi->blocks_per_blkz && sbi->blocks_per_blkz != in init_blkz_info()
3267 sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)); in init_blkz_info()
3268 if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz != in init_blkz_info()
3269 __ilog2_u32(sbi->blocks_per_blkz)) in init_blkz_info()
3271 sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz); in init_blkz_info()
3273 sbi->log_blocks_per_blkz; in init_blkz_info()
3277 FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi, in init_blkz_info()
3284 rep_zone_arg.sbi = sbi; in init_blkz_info()
3301 static int read_raw_super_block(struct f2fs_sb_info *sbi, in read_raw_super_block() argument
3305 struct super_block *sb = sbi->sb; in read_raw_super_block()
3318 f2fs_err(sbi, "Unable to read %dth superblock", in read_raw_super_block()
3326 err = sanity_check_raw_super(sbi, bh); in read_raw_super_block()
3328 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock", in read_raw_super_block()
3353 int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) in f2fs_commit_super() argument
3359 if ((recover && f2fs_readonly(sbi->sb)) || in f2fs_commit_super()
3360 bdev_read_only(sbi->sb->s_bdev)) { in f2fs_commit_super()
3361 set_sbi_flag(sbi, SBI_NEED_SB_WRITE); in f2fs_commit_super()
3366 if (!recover && f2fs_sb_has_sb_chksum(sbi)) { in f2fs_commit_super()
3367 crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi), in f2fs_commit_super()
3369 F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc); in f2fs_commit_super()
3373 bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1); in f2fs_commit_super()
3376 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
3384 bh = sb_bread(sbi->sb, sbi->valid_super_block); in f2fs_commit_super()
3387 err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); in f2fs_commit_super()
3392 static int f2fs_scan_devices(struct f2fs_sb_info *sbi) in f2fs_scan_devices() argument
3394 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi); in f2fs_scan_devices()
3400 if (!bdev_is_zoned(sbi->sb->s_bdev)) in f2fs_scan_devices()
3409 sbi->devs = f2fs_kzalloc(sbi, in f2fs_scan_devices()
3413 if (!sbi->devs) in f2fs_scan_devices()
3424 blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev, in f2fs_scan_devices()
3425 sbi->sb->s_mode, sbi->sb->s_type); in f2fs_scan_devices()
3435 sbi->log_blocks_per_seg) - 1 + in f2fs_scan_devices()
3441 sbi->log_blocks_per_seg) - 1; in f2fs_scan_devices()
3444 sbi->sb->s_mode, sbi->sb->s_type); in f2fs_scan_devices()
3450 sbi->s_ndevs = i + 1; in f2fs_scan_devices()
3454 !f2fs_sb_has_blkzoned(sbi)) { in f2fs_scan_devices()
3455 f2fs_err(sbi, "Zoned block device feature not enabled\n"); in f2fs_scan_devices()
3459 if (init_blkz_info(sbi, i)) { in f2fs_scan_devices()
3460 f2fs_err(sbi, "Failed to initialize F2FS blkzone information"); in f2fs_scan_devices()
3465 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", in f2fs_scan_devices()
3474 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x", in f2fs_scan_devices()
3479 f2fs_info(sbi, in f2fs_scan_devices()
3480 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi)); in f2fs_scan_devices()
3484 static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) in f2fs_setup_casefold() argument
3487 if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { in f2fs_setup_casefold()
3492 if (f2fs_sb_has_encrypt(sbi)) { in f2fs_setup_casefold()
3493 f2fs_err(sbi, in f2fs_setup_casefold()
3498 if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info, in f2fs_setup_casefold()
3500 f2fs_err(sbi, in f2fs_setup_casefold()
3507 f2fs_err(sbi, in f2fs_setup_casefold()
3514 f2fs_info(sbi, "Using encoding defined by superblock: " in f2fs_setup_casefold()
3518 sbi->sb->s_encoding = encoding; in f2fs_setup_casefold()
3519 sbi->sb->s_encoding_flags = encoding_flags; in f2fs_setup_casefold()
3520 sbi->sb->s_d_op = &f2fs_dentry_ops; in f2fs_setup_casefold()
3523 if (f2fs_sb_has_casefold(sbi)) { in f2fs_setup_casefold()
3524 f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); in f2fs_setup_casefold()
3531 static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi) in f2fs_tuning_parameters() argument
3533 struct f2fs_sm_info *sm_i = SM_I(sbi); in f2fs_tuning_parameters()
3537 F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; in f2fs_tuning_parameters()
3542 sbi->readdir_ra = 1; in f2fs_tuning_parameters()
3546 static void f2fs_init_grading_ssr(struct f2fs_sb_info *sbi) in f2fs_init_grading_ssr() argument
3548 u32 total_blocks = le64_to_cpu(sbi->raw_super->block_count) >> 18; in f2fs_init_grading_ssr()
3551 sbi->hot_cold_params.hot_data_lower_limit = SSR_HD_SAPCE_LIMIT_128G; in f2fs_init_grading_ssr()
3552 sbi->hot_cold_params.hot_data_waterline = SSR_HD_WATERLINE_128G; in f2fs_init_grading_ssr()
3553 sbi->hot_cold_params.warm_data_lower_limit = SSR_WD_SAPCE_LIMIT_128G; in f2fs_init_grading_ssr()
3554 sbi->hot_cold_params.warm_data_waterline = SSR_WD_WATERLINE_128G; in f2fs_init_grading_ssr()
3555 sbi->hot_cold_params.hot_node_lower_limit = SSR_HD_SAPCE_LIMIT_128G; in f2fs_init_grading_ssr()
3556 sbi->hot_cold_params.hot_node_waterline = SSR_HN_WATERLINE_128G; in f2fs_init_grading_ssr()
3557 sbi->hot_cold_params.warm_node_lower_limit = SSR_WN_SAPCE_LIMIT_128G; in f2fs_init_grading_ssr()
3558 sbi->hot_cold_params.warm_node_waterline = SSR_WN_WATERLINE_128G; in f2fs_init_grading_ssr()
3559 sbi->hot_cold_params.enable = GRADING_SSR_OFF; in f2fs_init_grading_ssr()
3561 sbi->hot_cold_params.hot_data_lower_limit = SSR_DEFALT_SPACE_LIMIT; in f2fs_init_grading_ssr()
3562 sbi->hot_cold_params.hot_data_waterline = SSR_DEFALT_WATERLINE; in f2fs_init_grading_ssr()
3563 sbi->hot_cold_params.warm_data_lower_limit = SSR_DEFALT_SPACE_LIMIT; in f2fs_init_grading_ssr()
3564 sbi->hot_cold_params.warm_data_waterline = SSR_DEFALT_WATERLINE; in f2fs_init_grading_ssr()
3565 sbi->hot_cold_params.hot_node_lower_limit = SSR_DEFALT_SPACE_LIMIT; in f2fs_init_grading_ssr()
3566 sbi->hot_cold_params.hot_node_waterline = SSR_DEFALT_WATERLINE; in f2fs_init_grading_ssr()
3567 sbi->hot_cold_params.warm_node_lower_limit = SSR_DEFALT_SPACE_LIMIT; in f2fs_init_grading_ssr()
3568 sbi->hot_cold_params.warm_node_waterline = SSR_DEFALT_WATERLINE; in f2fs_init_grading_ssr()
3569 sbi->hot_cold_params.enable = GRADING_SSR_OFF; in f2fs_init_grading_ssr()
3576 struct f2fs_sb_info *sbi; in f2fs_fill_super() local
3593 sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); in f2fs_fill_super()
3594 if (!sbi) in f2fs_fill_super()
3597 sbi->sb = sb; in f2fs_fill_super()
3600 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); in f2fs_fill_super()
3601 if (IS_ERR(sbi->s_chksum_driver)) { in f2fs_fill_super()
3602 f2fs_err(sbi, "Cannot load crc32 driver."); in f2fs_fill_super()
3603 err = PTR_ERR(sbi->s_chksum_driver); in f2fs_fill_super()
3604 sbi->s_chksum_driver = NULL; in f2fs_fill_super()
3610 f2fs_err(sbi, "unable to set blocksize"); in f2fs_fill_super()
3614 err = read_raw_super_block(sbi, &raw_super, &valid_super_block, in f2fs_fill_super()
3619 sb->s_fs_info = sbi; in f2fs_fill_super()
3620 sbi->raw_super = raw_super; in f2fs_fill_super()
3623 if (f2fs_sb_has_inode_chksum(sbi)) in f2fs_fill_super()
3624 sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid, in f2fs_fill_super()
3627 default_options(sbi); in f2fs_fill_super()
3639 sbi->max_file_blocks = max_file_blocks(); in f2fs_fill_super()
3640 sb->s_maxbytes = sbi->max_file_blocks << in f2fs_fill_super()
3644 err = f2fs_setup_casefold(sbi); in f2fs_fill_super()
3653 if (f2fs_sb_has_quota_ino(sbi)) { in f2fs_fill_super()
3655 if (f2fs_qf_ino(sbi->sb, i)) in f2fs_fill_super()
3656 sbi->nquota_files++; in f2fs_fill_super()
3673 (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0); in f2fs_fill_super()
3678 sbi->valid_super_block = valid_super_block; in f2fs_fill_super()
3679 init_rwsem(&sbi->gc_lock); in f2fs_fill_super()
3680 mutex_init(&sbi->writepages); in f2fs_fill_super()
3681 mutex_init(&sbi->cp_mutex); in f2fs_fill_super()
3682 init_rwsem(&sbi->node_write); in f2fs_fill_super()
3683 init_rwsem(&sbi->node_change); in f2fs_fill_super()
3686 set_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
3687 spin_lock_init(&sbi->stat_lock); in f2fs_fill_super()
3690 spin_lock_init(&sbi->iostat_lock); in f2fs_fill_super()
3691 sbi->iostat_enable = false; in f2fs_fill_super()
3692 sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS; in f2fs_fill_super()
3698 sbi->write_io[i] = in f2fs_fill_super()
3699 f2fs_kmalloc(sbi, in f2fs_fill_super()
3703 if (!sbi->write_io[i]) { in f2fs_fill_super()
3709 init_rwsem(&sbi->write_io[i][j].io_rwsem); in f2fs_fill_super()
3710 sbi->write_io[i][j].sbi = sbi; in f2fs_fill_super()
3711 sbi->write_io[i][j].bio = NULL; in f2fs_fill_super()
3712 spin_lock_init(&sbi->write_io[i][j].io_lock); in f2fs_fill_super()
3713 INIT_LIST_HEAD(&sbi->write_io[i][j].io_list); in f2fs_fill_super()
3714 INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list); in f2fs_fill_super()
3715 init_rwsem(&sbi->write_io[i][j].bio_list_lock); in f2fs_fill_super()
3719 init_rwsem(&sbi->cp_rwsem); in f2fs_fill_super()
3720 init_rwsem(&sbi->quota_sem); in f2fs_fill_super()
3721 init_waitqueue_head(&sbi->cp_wait); in f2fs_fill_super()
3722 init_sb_info(sbi); in f2fs_fill_super()
3724 err = init_percpu_info(sbi); in f2fs_fill_super()
3728 if (F2FS_IO_ALIGNED(sbi)) { in f2fs_fill_super()
3729 sbi->write_io_dummy = in f2fs_fill_super()
3730 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0); in f2fs_fill_super()
3731 if (!sbi->write_io_dummy) { in f2fs_fill_super()
3737 /* init per sbi slab cache */ in f2fs_fill_super()
3738 err = f2fs_init_xattr_caches(sbi); in f2fs_fill_super()
3741 err = f2fs_init_page_array_cache(sbi); in f2fs_fill_super()
3746 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); in f2fs_fill_super()
3747 if (IS_ERR(sbi->meta_inode)) { in f2fs_fill_super()
3748 f2fs_err(sbi, "Failed to read F2FS meta data inode"); in f2fs_fill_super()
3749 err = PTR_ERR(sbi->meta_inode); in f2fs_fill_super()
3753 err = f2fs_get_valid_checkpoint(sbi); in f2fs_fill_super()
3755 f2fs_err(sbi, "Failed to get valid F2FS checkpoint"); in f2fs_fill_super()
3759 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG)) in f2fs_fill_super()
3760 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); in f2fs_fill_super()
3761 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) { in f2fs_fill_super()
3762 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
3763 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; in f2fs_fill_super()
3766 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG)) in f2fs_fill_super()
3767 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
3770 err = f2fs_scan_devices(sbi); in f2fs_fill_super()
3772 f2fs_err(sbi, "Failed to find devices"); in f2fs_fill_super()
3776 err = f2fs_init_post_read_wq(sbi); in f2fs_fill_super()
3778 f2fs_err(sbi, "Failed to initialize post read workqueue"); in f2fs_fill_super()
3782 sbi->total_valid_node_count = in f2fs_fill_super()
3783 le32_to_cpu(sbi->ckpt->valid_node_count); in f2fs_fill_super()
3784 percpu_counter_set(&sbi->total_valid_inode_count, in f2fs_fill_super()
3785 le32_to_cpu(sbi->ckpt->valid_inode_count)); in f2fs_fill_super()
3786 sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); in f2fs_fill_super()
3787 sbi->total_valid_block_count = in f2fs_fill_super()
3788 le64_to_cpu(sbi->ckpt->valid_block_count); in f2fs_fill_super()
3789 sbi->last_valid_block_count = sbi->total_valid_block_count; in f2fs_fill_super()
3790 sbi->reserved_blocks = 0; in f2fs_fill_super()
3791 sbi->current_reserved_blocks = 0; in f2fs_fill_super()
3792 limit_reserve_root(sbi); in f2fs_fill_super()
3793 adjust_unusable_cap_perc(sbi); in f2fs_fill_super()
3796 INIT_LIST_HEAD(&sbi->inode_list[i]); in f2fs_fill_super()
3797 spin_lock_init(&sbi->inode_lock[i]); in f2fs_fill_super()
3799 mutex_init(&sbi->flush_lock); in f2fs_fill_super()
3801 f2fs_init_extent_cache_info(sbi); in f2fs_fill_super()
3803 f2fs_init_ino_entry_info(sbi); in f2fs_fill_super()
3805 f2fs_init_fsync_node_info(sbi); in f2fs_fill_super()
3808 err = f2fs_build_segment_manager(sbi); in f2fs_fill_super()
3810 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)", in f2fs_fill_super()
3814 err = f2fs_build_node_manager(sbi); in f2fs_fill_super()
3816 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)", in f2fs_fill_super()
3821 err = adjust_reserved_segment(sbi); in f2fs_fill_super()
3827 sbi->sectors_written_start = in f2fs_fill_super()
3832 seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); in f2fs_fill_super()
3833 if (__exist_node_summaries(sbi)) in f2fs_fill_super()
3834 sbi->kbytes_written = in f2fs_fill_super()
3837 f2fs_build_gc_manager(sbi); in f2fs_fill_super()
3839 err = f2fs_build_stats(sbi); in f2fs_fill_super()
3844 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); in f2fs_fill_super()
3845 if (IS_ERR(sbi->node_inode)) { in f2fs_fill_super()
3846 f2fs_err(sbi, "Failed to read node inode"); in f2fs_fill_super()
3847 err = PTR_ERR(sbi->node_inode); in f2fs_fill_super()
3852 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); in f2fs_fill_super()
3854 f2fs_err(sbi, "Failed to read root inode"); in f2fs_fill_super()
3871 f2fs_init_grading_ssr(sbi); in f2fs_fill_super()
3873 err = f2fs_register_sysfs(sbi); in f2fs_fill_super()
3879 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { in f2fs_fill_super()
3882 f2fs_err(sbi, "Cannot turn on quotas: error %d", err); in f2fs_fill_super()
3886 err = f2fs_recover_orphan_inodes(sbi); in f2fs_fill_super()
3890 if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG))) in f2fs_fill_super()
3894 if (!test_opt(sbi, DISABLE_ROLL_FORWARD) && in f2fs_fill_super()
3895 !test_opt(sbi, NORECOVERY)) { in f2fs_fill_super()
3900 if (f2fs_hw_is_readonly(sbi)) { in f2fs_fill_super()
3901 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in f2fs_fill_super()
3903 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable"); in f2fs_fill_super()
3906 f2fs_info(sbi, "write access unavailable, skipping recovery"); in f2fs_fill_super()
3911 set_sbi_flag(sbi, SBI_NEED_FSCK); in f2fs_fill_super()
3916 err = f2fs_recover_fsync_data(sbi, false); in f2fs_fill_super()
3921 f2fs_err(sbi, "Cannot recover all fsync data errno=%d", in f2fs_fill_super()
3926 err = f2fs_recover_fsync_data(sbi, true); in f2fs_fill_super()
3930 f2fs_err(sbi, "Need to recover fsync data"); in f2fs_fill_super()
3939 if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) { in f2fs_fill_super()
3940 err = f2fs_check_write_pointer(sbi); in f2fs_fill_super()
3946 f2fs_init_inmem_curseg(sbi); in f2fs_fill_super()
3949 clear_sbi_flag(sbi, SBI_POR_DOING); in f2fs_fill_super()
3951 if (test_opt(sbi, DISABLE_CHECKPOINT)) { in f2fs_fill_super()
3952 err = f2fs_disable_checkpoint(sbi); in f2fs_fill_super()
3955 } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) { in f2fs_fill_super()
3956 f2fs_enable_checkpoint(sbi); in f2fs_fill_super()
3963 if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF || in f2fs_fill_super()
3964 test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) { in f2fs_fill_super()
3966 err = f2fs_start_gc_thread(sbi); in f2fs_fill_super()
3974 err = f2fs_commit_super(sbi, true); in f2fs_fill_super()
3975 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d", in f2fs_fill_super()
3976 sbi->valid_super_block ? 1 : 2, err); in f2fs_fill_super()
3979 f2fs_join_shrinker(sbi); in f2fs_fill_super()
3981 f2fs_tuning_parameters(sbi); in f2fs_fill_super()
3983 f2fs_notice(sbi, "Mounted with checkpoint version = %llx", in f2fs_fill_super()
3984 cur_cp_version(F2FS_CKPT(sbi))); in f2fs_fill_super()
3985 f2fs_update_time(sbi, CP_TIME); in f2fs_fill_super()
3986 f2fs_update_time(sbi, REQ_TIME); in f2fs_fill_super()
3987 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); in f2fs_fill_super()
3992 sync_filesystem(sbi->sb); in f2fs_fill_super()
3998 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) in f2fs_fill_super()
3999 f2fs_quota_off_umount(sbi->sb); in f2fs_fill_super()
4007 truncate_inode_pages_final(META_MAPPING(sbi)); in f2fs_fill_super()
4010 f2fs_unregister_sysfs(sbi); in f2fs_fill_super()
4015 f2fs_release_ino_entry(sbi, true); in f2fs_fill_super()
4016 truncate_inode_pages_final(NODE_MAPPING(sbi)); in f2fs_fill_super()
4017 iput(sbi->node_inode); in f2fs_fill_super()
4018 sbi->node_inode = NULL; in f2fs_fill_super()
4020 f2fs_destroy_stats(sbi); in f2fs_fill_super()
4022 f2fs_destroy_node_manager(sbi); in f2fs_fill_super()
4024 f2fs_destroy_segment_manager(sbi); in f2fs_fill_super()
4025 f2fs_destroy_post_read_wq(sbi); in f2fs_fill_super()
4027 destroy_device_list(sbi); in f2fs_fill_super()
4028 kvfree(sbi->ckpt); in f2fs_fill_super()
4030 make_bad_inode(sbi->meta_inode); in f2fs_fill_super()
4031 iput(sbi->meta_inode); in f2fs_fill_super()
4032 sbi->meta_inode = NULL; in f2fs_fill_super()
4034 f2fs_destroy_page_array_cache(sbi); in f2fs_fill_super()
4036 f2fs_destroy_xattr_caches(sbi); in f2fs_fill_super()
4038 mempool_destroy(sbi->write_io_dummy); in f2fs_fill_super()
4040 destroy_percpu_info(sbi); in f2fs_fill_super()
4043 kvfree(sbi->write_io[i]); in f2fs_fill_super()
4052 kfree(F2FS_OPTION(sbi).s_qf_names[i]); in f2fs_fill_super()
4054 fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy); in f2fs_fill_super()
4059 if (sbi->s_chksum_driver) in f2fs_fill_super()
4060 crypto_free_shash(sbi->s_chksum_driver); in f2fs_fill_super()
4061 kfree(sbi); in f2fs_fill_super()
4081 struct f2fs_sb_info *sbi = F2FS_SB(sb); in kill_f2fs_super() local
4083 set_sbi_flag(sbi, SBI_IS_CLOSE); in kill_f2fs_super()
4084 f2fs_stop_gc_thread(sbi); in kill_f2fs_super()
4085 f2fs_stop_discard_thread(sbi); in kill_f2fs_super()
4087 if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) || in kill_f2fs_super()
4088 !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { in kill_f2fs_super()
4092 f2fs_write_checkpoint(sbi, &cpc); in kill_f2fs_super()
4095 if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb)) in kill_f2fs_super()