1 /**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include "fsck.h"
12 #include "node.h"
13 #include "xattr.h"
14 #include <locale.h>
15 #include <stdbool.h>
16 #ifdef HAVE_LINUX_POSIX_ACL_H
17 #include <linux/posix_acl.h>
18 #endif
19 #ifdef HAVE_SYS_ACL_H
20 #include <sys/acl.h>
21 #endif
22
23 #ifndef ACL_UNDEFINED_TAG
24 #define ACL_UNDEFINED_TAG (0x00)
25 #define ACL_USER_OBJ (0x01)
26 #define ACL_USER (0x02)
27 #define ACL_GROUP_OBJ (0x04)
28 #define ACL_GROUP (0x08)
29 #define ACL_MASK (0x10)
30 #define ACL_OTHER (0x20)
31 #endif
32
get_free_segments(struct f2fs_sb_info * sbi)33 u32 get_free_segments(struct f2fs_sb_info *sbi)
34 {
35 u32 i, free_segs = 0;
36
37 for (i = 0; i < TOTAL_SEGS(sbi); i++) {
38 struct seg_entry *se = get_seg_entry(sbi, i);
39
40 if (se->valid_blocks == 0x0 && !IS_CUR_SEGNO(sbi, i))
41 free_segs++;
42 }
43 return free_segs;
44 }
45
update_free_segments(struct f2fs_sb_info * sbi)46 void update_free_segments(struct f2fs_sb_info *sbi)
47 {
48 char *progress = "-*|*-";
49 static int i = 0;
50
51 if (c.dbg_lv)
52 return;
53
54 MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
55 fflush(stdout);
56 i++;
57 }
58
59 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
print_acl(const u8 * value,int size)60 static void print_acl(const u8 *value, int size)
61 {
62 const struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
63 const struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
64 const u8 *end = value + size;
65 int i, count;
66
67 if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) {
68 MSG(0, "Invalid ACL version [0x%x : 0x%x]\n",
69 le32_to_cpu(hdr->a_version), F2FS_ACL_VERSION);
70 return;
71 }
72
73 count = f2fs_acl_count(size);
74 if (count <= 0) {
75 MSG(0, "Invalid ACL value size %d\n", size);
76 return;
77 }
78
79 for (i = 0; i < count; i++) {
80 if ((u8 *)entry > end) {
81 MSG(0, "Invalid ACL entries count %d\n", count);
82 return;
83 }
84
85 switch (le16_to_cpu(entry->e_tag)) {
86 case ACL_USER_OBJ:
87 case ACL_GROUP_OBJ:
88 case ACL_MASK:
89 case ACL_OTHER:
90 MSG(0, "tag:0x%x perm:0x%x\n",
91 le16_to_cpu(entry->e_tag),
92 le16_to_cpu(entry->e_perm));
93 entry = (struct f2fs_acl_entry *)((char *)entry +
94 sizeof(struct f2fs_acl_entry_short));
95 break;
96 case ACL_USER:
97 MSG(0, "tag:0x%x perm:0x%x uid:%u\n",
98 le16_to_cpu(entry->e_tag),
99 le16_to_cpu(entry->e_perm),
100 le32_to_cpu(entry->e_id));
101 entry = (struct f2fs_acl_entry *)((char *)entry +
102 sizeof(struct f2fs_acl_entry));
103 break;
104 case ACL_GROUP:
105 MSG(0, "tag:0x%x perm:0x%x gid:%u\n",
106 le16_to_cpu(entry->e_tag),
107 le16_to_cpu(entry->e_perm),
108 le32_to_cpu(entry->e_id));
109 entry = (struct f2fs_acl_entry *)((char *)entry +
110 sizeof(struct f2fs_acl_entry));
111 break;
112 default:
113 MSG(0, "Unknown ACL tag 0x%x\n",
114 le16_to_cpu(entry->e_tag));
115 return;
116 }
117 }
118 }
119 #endif /* HAVE_LINUX_POSIX_ACL_H || HAVE_SYS_ACL_H */
120
print_xattr_entry(const struct f2fs_xattr_entry * ent)121 static void print_xattr_entry(const struct f2fs_xattr_entry *ent)
122 {
123 const u8 *value = (const u8 *)&ent->e_name[ent->e_name_len];
124 const int size = le16_to_cpu(ent->e_value_size);
125 const struct fscrypt_context *ctx;
126 int i;
127
128 MSG(0, "\nxattr: e_name_index:%d e_name:", ent->e_name_index);
129 for (i = 0; i < ent->e_name_len; i++)
130 MSG(0, "%c", ent->e_name[i]);
131 MSG(0, " e_name_len:%d e_value_size:%d e_value:\n",
132 ent->e_name_len, size);
133
134 switch (ent->e_name_index) {
135 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
136 case F2FS_XATTR_INDEX_POSIX_ACL_ACCESS:
137 case F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT:
138 print_acl(value, size);
139 return;
140 #endif
141 case F2FS_XATTR_INDEX_ENCRYPTION:
142 ctx = (const struct fscrypt_context *)value;
143 if (size != sizeof(*ctx) ||
144 ctx->format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
145 break;
146 MSG(0, "format: %d\n", ctx->format);
147 MSG(0, "contents_encryption_mode: 0x%x\n", ctx->contents_encryption_mode);
148 MSG(0, "filenames_encryption_mode: 0x%x\n", ctx->filenames_encryption_mode);
149 MSG(0, "flags: 0x%x\n", ctx->flags);
150 MSG(0, "master_key_descriptor: ");
151 for (i = 0; i < FS_KEY_DESCRIPTOR_SIZE; i++)
152 MSG(0, "%02X", ctx->master_key_descriptor[i]);
153 MSG(0, "\nnonce: ");
154 for (i = 0; i < FS_KEY_DERIVATION_NONCE_SIZE; i++)
155 MSG(0, "%02X", ctx->nonce[i]);
156 MSG(0, "\n");
157 return;
158 }
159 for (i = 0; i < size; i++)
160 MSG(0, "%02X", value[i]);
161 MSG(0, "\n");
162 }
163
print_inode_info(struct f2fs_sb_info * sbi,struct f2fs_node * node,int name)164 void print_inode_info(struct f2fs_sb_info *sbi,
165 struct f2fs_node *node, int name)
166 {
167 struct f2fs_inode *inode = &node->i;
168 void *xattr_addr;
169 struct f2fs_xattr_entry *ent;
170 char en[F2FS_PRINT_NAMELEN];
171 unsigned int i = 0;
172 u32 namelen = le32_to_cpu(inode->i_namelen);
173 int enc_name = file_enc_name(inode);
174 int ofs = get_extra_isize(node);
175
176 pretty_print_filename(inode->i_name, namelen, en, enc_name);
177 if (name && en[0]) {
178 MSG(0, " - File name : %s%s\n", en,
179 enc_name ? " <encrypted>" : "");
180 setlocale(LC_ALL, "");
181 MSG(0, " - File size : %'llu (bytes)\n",
182 le64_to_cpu(inode->i_size));
183 return;
184 }
185
186 DISP_u32(inode, i_mode);
187 DISP_u32(inode, i_advise);
188 DISP_u32(inode, i_uid);
189 DISP_u32(inode, i_gid);
190 DISP_u32(inode, i_links);
191 DISP_u64(inode, i_size);
192 DISP_u64(inode, i_blocks);
193
194 DISP_u64(inode, i_atime);
195 DISP_u32(inode, i_atime_nsec);
196 DISP_u64(inode, i_ctime);
197 DISP_u32(inode, i_ctime_nsec);
198 DISP_u64(inode, i_mtime);
199 DISP_u32(inode, i_mtime_nsec);
200
201 DISP_u32(inode, i_generation);
202 DISP_u32(inode, i_current_depth);
203 DISP_u32(inode, i_xattr_nid);
204 DISP_u32(inode, i_flags);
205 DISP_u32(inode, i_inline);
206 DISP_u32(inode, i_pino);
207 DISP_u32(inode, i_dir_level);
208
209 if (en[0]) {
210 DISP_u32(inode, i_namelen);
211 printf("%-30s\t\t[%s]\n", "i_name", en);
212 }
213
214 printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
215 le32_to_cpu(inode->i_ext.fofs),
216 le32_to_cpu(inode->i_ext.blk_addr),
217 le32_to_cpu(inode->i_ext.len));
218
219 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
220 DISP_u16(inode, i_extra_isize);
221 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
222 DISP_u16(inode, i_inline_xattr_size);
223 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
224 DISP_u32(inode, i_projid);
225 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
226 DISP_u32(inode, i_inode_checksum);
227 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
228 DISP_u64(inode, i_crtime);
229 DISP_u32(inode, i_crtime_nsec);
230 }
231 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
232 DISP_u64(inode, i_compr_blocks);
233 DISP_u32(inode, i_compress_algrithm);
234 DISP_u32(inode, i_log_cluster_size);
235 DISP_u32(inode, i_padding);
236 }
237 }
238
239 for (i = 0; i < ADDRS_PER_INODE(inode); i++) {
240 block_t blkaddr;
241 char *flag = "";
242
243 if (i + ofs >= DEF_ADDRS_PER_INODE)
244 break;
245
246 blkaddr = le32_to_cpu(inode->i_addr[i + ofs]);
247
248 if (blkaddr == 0x0)
249 continue;
250 if (blkaddr == COMPRESS_ADDR)
251 flag = "cluster flag";
252 else if (blkaddr == NEW_ADDR)
253 flag = "reserved flag";
254 printf("i_addr[0x%x] %-16s\t\t[0x%8x : %u]\n", i + ofs, flag,
255 blkaddr, blkaddr);
256 }
257
258 DISP_u32(inode, i_nid[0]); /* direct */
259 DISP_u32(inode, i_nid[1]); /* direct */
260 DISP_u32(inode, i_nid[2]); /* indirect */
261 DISP_u32(inode, i_nid[3]); /* indirect */
262 DISP_u32(inode, i_nid[4]); /* double indirect */
263
264 xattr_addr = read_all_xattrs(sbi, node);
265 if (xattr_addr) {
266 list_for_each_xattr(ent, xattr_addr) {
267 print_xattr_entry(ent);
268 }
269 free(xattr_addr);
270 }
271
272 printf("\n");
273 }
274
print_node_info(struct f2fs_sb_info * sbi,struct f2fs_node * node_block,int verbose)275 void print_node_info(struct f2fs_sb_info *sbi,
276 struct f2fs_node *node_block, int verbose)
277 {
278 nid_t ino = le32_to_cpu(node_block->footer.ino);
279 nid_t nid = le32_to_cpu(node_block->footer.nid);
280 /* Is this inode? */
281 if (ino == nid) {
282 DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
283 print_inode_info(sbi, node_block, verbose);
284 } else {
285 int i;
286 u32 *dump_blk = (u32 *)node_block;
287 DBG(verbose,
288 "Node ID [0x%x:%u] is direct node or indirect node.\n",
289 nid, nid);
290 for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
291 MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
292 i, dump_blk[i], dump_blk[i]);
293 }
294 }
295
DISP_label(u_int16_t * name)296 static void DISP_label(u_int16_t *name)
297 {
298 char buffer[MAX_VOLUME_NAME];
299
300 utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
301 printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
302 }
303
print_raw_sb_info(struct f2fs_super_block * sb)304 void print_raw_sb_info(struct f2fs_super_block *sb)
305 {
306 if (!c.dbg_lv)
307 return;
308
309 printf("\n");
310 printf("+--------------------------------------------------------+\n");
311 printf("| Super block |\n");
312 printf("+--------------------------------------------------------+\n");
313
314 DISP_u32(sb, magic);
315 DISP_u32(sb, major_ver);
316
317 DISP_label(sb->volume_name);
318
319 DISP_u32(sb, minor_ver);
320 DISP_u32(sb, log_sectorsize);
321 DISP_u32(sb, log_sectors_per_block);
322
323 DISP_u32(sb, log_blocksize);
324 DISP_u32(sb, log_blocks_per_seg);
325 DISP_u32(sb, segs_per_sec);
326 DISP_u32(sb, secs_per_zone);
327 DISP_u32(sb, checksum_offset);
328 DISP_u64(sb, block_count);
329
330 DISP_u32(sb, section_count);
331 DISP_u32(sb, segment_count);
332 DISP_u32(sb, segment_count_ckpt);
333 DISP_u32(sb, segment_count_sit);
334 DISP_u32(sb, segment_count_nat);
335
336 DISP_u32(sb, segment_count_ssa);
337 DISP_u32(sb, segment_count_main);
338 DISP_u32(sb, segment0_blkaddr);
339
340 DISP_u32(sb, cp_blkaddr);
341 DISP_u32(sb, sit_blkaddr);
342 DISP_u32(sb, nat_blkaddr);
343 DISP_u32(sb, ssa_blkaddr);
344 DISP_u32(sb, main_blkaddr);
345
346 DISP_u32(sb, root_ino);
347 DISP_u32(sb, node_ino);
348 DISP_u32(sb, meta_ino);
349 DISP_u32(sb, cp_payload);
350 DISP_u32(sb, crc);
351 DISP("%-.256s", sb, version);
352 printf("\n");
353 }
354
print_ckpt_info(struct f2fs_sb_info * sbi)355 void print_ckpt_info(struct f2fs_sb_info *sbi)
356 {
357 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
358
359 if (!c.dbg_lv)
360 return;
361
362 printf("\n");
363 printf("+--------------------------------------------------------+\n");
364 printf("| Checkpoint |\n");
365 printf("+--------------------------------------------------------+\n");
366
367 DISP_u64(cp, checkpoint_ver);
368 DISP_u64(cp, user_block_count);
369 DISP_u64(cp, valid_block_count);
370 DISP_u32(cp, rsvd_segment_count);
371 DISP_u32(cp, overprov_segment_count);
372 DISP_u32(cp, free_segment_count);
373
374 DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
375 DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
376 DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
377 DISP_u32(cp, cur_node_segno[0]);
378 DISP_u32(cp, cur_node_segno[1]);
379 DISP_u32(cp, cur_node_segno[2]);
380
381 DISP_u32(cp, cur_node_blkoff[0]);
382 DISP_u32(cp, cur_node_blkoff[1]);
383 DISP_u32(cp, cur_node_blkoff[2]);
384
385
386 DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
387 DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
388 DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
389 DISP_u32(cp, cur_data_segno[0]);
390 DISP_u32(cp, cur_data_segno[1]);
391 DISP_u32(cp, cur_data_segno[2]);
392
393 DISP_u32(cp, cur_data_blkoff[0]);
394 DISP_u32(cp, cur_data_blkoff[1]);
395 DISP_u32(cp, cur_data_blkoff[2]);
396
397 DISP_u32(cp, ckpt_flags);
398 DISP_u32(cp, cp_pack_total_block_count);
399 DISP_u32(cp, cp_pack_start_sum);
400 DISP_u32(cp, valid_node_count);
401 DISP_u32(cp, valid_inode_count);
402 DISP_u32(cp, next_free_nid);
403 DISP_u32(cp, sit_ver_bitmap_bytesize);
404 DISP_u32(cp, nat_ver_bitmap_bytesize);
405 DISP_u32(cp, checksum_offset);
406 DISP_u64(cp, elapsed_time);
407
408 DISP_u32(cp, sit_nat_version_bitmap[0]);
409 printf("\n\n");
410 }
411
print_cp_state(u32 flag)412 void print_cp_state(u32 flag)
413 {
414 MSG(0, "Info: checkpoint state = %x : ", flag);
415 if (flag & CP_QUOTA_NEED_FSCK_FLAG)
416 MSG(0, "%s", " quota_need_fsck");
417 if (flag & CP_LARGE_NAT_BITMAP_FLAG)
418 MSG(0, "%s", " large_nat_bitmap");
419 if (flag & CP_NOCRC_RECOVERY_FLAG)
420 MSG(0, "%s", " allow_nocrc");
421 if (flag & CP_TRIMMED_FLAG)
422 MSG(0, "%s", " trimmed");
423 if (flag & CP_NAT_BITS_FLAG)
424 MSG(0, "%s", " nat_bits");
425 if (flag & CP_CRC_RECOVERY_FLAG)
426 MSG(0, "%s", " crc");
427 if (flag & CP_FASTBOOT_FLAG)
428 MSG(0, "%s", " fastboot");
429 if (flag & CP_FSCK_FLAG)
430 MSG(0, "%s", " fsck");
431 if (flag & CP_ERROR_FLAG)
432 MSG(0, "%s", " error");
433 if (flag & CP_COMPACT_SUM_FLAG)
434 MSG(0, "%s", " compacted_summary");
435 if (flag & CP_ORPHAN_PRESENT_FLAG)
436 MSG(0, "%s", " orphan_inodes");
437 if (flag & CP_DISABLED_FLAG)
438 MSG(0, "%s", " disabled");
439 if (flag & CP_RESIZEFS_FLAG)
440 MSG(0, "%s", " resizefs");
441 if (flag & CP_UMOUNT_FLAG)
442 MSG(0, "%s", " unmount");
443 else
444 MSG(0, "%s", " sudden-power-off");
445 MSG(0, "\n");
446 }
447
print_sb_state(struct f2fs_super_block * sb)448 void print_sb_state(struct f2fs_super_block *sb)
449 {
450 __le32 f = sb->feature;
451 int i;
452
453 MSG(0, "Info: superblock features = %x : ", f);
454 if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
455 MSG(0, "%s", " encrypt");
456 }
457 if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
458 MSG(0, "%s", " verity");
459 }
460 if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
461 MSG(0, "%s", " blkzoned");
462 }
463 if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
464 MSG(0, "%s", " extra_attr");
465 }
466 if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
467 MSG(0, "%s", " project_quota");
468 }
469 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
470 MSG(0, "%s", " inode_checksum");
471 }
472 if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
473 MSG(0, "%s", " flexible_inline_xattr");
474 }
475 if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
476 MSG(0, "%s", " quota_ino");
477 }
478 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
479 MSG(0, "%s", " inode_crtime");
480 }
481 if (f & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
482 MSG(0, "%s", " lost_found");
483 }
484 if (f & cpu_to_le32(F2FS_FEATURE_SB_CHKSUM)) {
485 MSG(0, "%s", " sb_checksum");
486 }
487 if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
488 MSG(0, "%s", " casefold");
489 }
490 if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
491 MSG(0, "%s", " compression");
492 }
493 MSG(0, "\n");
494 MSG(0, "Info: superblock encrypt level = %d, salt = ",
495 sb->encryption_level);
496 for (i = 0; i < 16; i++)
497 MSG(0, "%02x", sb->encrypt_pw_salt[i]);
498 MSG(0, "\n");
499 }
500
is_valid_data_blkaddr(block_t blkaddr)501 static inline bool is_valid_data_blkaddr(block_t blkaddr)
502 {
503 if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR ||
504 blkaddr == COMPRESS_ADDR)
505 return 0;
506 return 1;
507 }
508
f2fs_is_valid_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)509 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
510 block_t blkaddr, int type)
511 {
512 switch (type) {
513 case META_NAT:
514 break;
515 case META_SIT:
516 if (blkaddr >= SIT_BLK_CNT(sbi))
517 return 0;
518 break;
519 case META_SSA:
520 if (blkaddr >= MAIN_BLKADDR(sbi) ||
521 blkaddr < SM_I(sbi)->ssa_blkaddr)
522 return 0;
523 break;
524 case META_CP:
525 if (blkaddr >= SIT_I(sbi)->sit_base_addr ||
526 blkaddr < __start_cp_addr(sbi))
527 return 0;
528 break;
529 case META_POR:
530 if (blkaddr >= MAX_BLKADDR(sbi) ||
531 blkaddr < MAIN_BLKADDR(sbi))
532 return 0;
533 break;
534 default:
535 ASSERT(0);
536 }
537
538 return 1;
539 }
540
541 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
542 unsigned int start);
543
544 /*
545 * Readahead CP/NAT/SIT/SSA pages
546 */
f2fs_ra_meta_pages(struct f2fs_sb_info * sbi,block_t start,int nrpages,int type)547 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
548 int type)
549 {
550 block_t blkno = start;
551 block_t blkaddr, start_blk = 0, len = 0;
552
553 for (; nrpages-- > 0; blkno++) {
554
555 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
556 goto out;
557
558 switch (type) {
559 case META_NAT:
560 if (blkno >= NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))
561 blkno = 0;
562 /* get nat block addr */
563 blkaddr = current_nat_addr(sbi,
564 blkno * NAT_ENTRY_PER_BLOCK, NULL);
565 break;
566 case META_SIT:
567 /* get sit block addr */
568 blkaddr = current_sit_addr(sbi,
569 blkno * SIT_ENTRY_PER_BLOCK);
570 break;
571 case META_SSA:
572 case META_CP:
573 case META_POR:
574 blkaddr = blkno;
575 break;
576 default:
577 ASSERT(0);
578 }
579
580 if (!len) {
581 start_blk = blkaddr;
582 len = 1;
583 } else if (start_blk + len == blkaddr) {
584 len++;
585 } else {
586 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
587 len << F2FS_BLKSIZE_BITS);
588 }
589 }
590 out:
591 if (len)
592 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
593 len << F2FS_BLKSIZE_BITS);
594 return blkno - start;
595 }
596
update_superblock(struct f2fs_super_block * sb,int sb_mask)597 void update_superblock(struct f2fs_super_block *sb, int sb_mask)
598 {
599 int addr, ret;
600 u_int8_t *buf;
601 u32 old_crc, new_crc;
602
603 buf = calloc(BLOCK_SZ, 1);
604 ASSERT(buf);
605
606 if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
607 old_crc = get_sb(crc);
608 new_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
609 SB_CHKSUM_OFFSET);
610 set_sb(crc, new_crc);
611 MSG(1, "Info: SB CRC is updated (0x%x -> 0x%x)\n",
612 old_crc, new_crc);
613 }
614
615 memcpy(buf + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
616 for (addr = SB0_ADDR; addr < SB_MAX_ADDR; addr++) {
617 if (SB_MASK(addr) & sb_mask) {
618 ret = dev_write_block(buf, addr);
619 ASSERT(ret >= 0);
620 }
621 }
622
623 free(buf);
624 DBG(0, "Info: Done to update superblock\n");
625 }
626
sanity_check_area_boundary(struct f2fs_super_block * sb,enum SB_ADDR sb_addr)627 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
628 enum SB_ADDR sb_addr)
629 {
630 u32 segment0_blkaddr = get_sb(segment0_blkaddr);
631 u32 cp_blkaddr = get_sb(cp_blkaddr);
632 u32 sit_blkaddr = get_sb(sit_blkaddr);
633 u32 nat_blkaddr = get_sb(nat_blkaddr);
634 u32 ssa_blkaddr = get_sb(ssa_blkaddr);
635 u32 main_blkaddr = get_sb(main_blkaddr);
636 u32 segment_count_ckpt = get_sb(segment_count_ckpt);
637 u32 segment_count_sit = get_sb(segment_count_sit);
638 u32 segment_count_nat = get_sb(segment_count_nat);
639 u32 segment_count_ssa = get_sb(segment_count_ssa);
640 u32 segment_count_main = get_sb(segment_count_main);
641 u32 segment_count = get_sb(segment_count);
642 u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
643 u64 main_end_blkaddr = main_blkaddr +
644 (segment_count_main << log_blocks_per_seg);
645 u64 seg_end_blkaddr = segment0_blkaddr +
646 (segment_count << log_blocks_per_seg);
647
648 if (segment0_blkaddr != cp_blkaddr) {
649 MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
650 segment0_blkaddr, cp_blkaddr);
651 return -1;
652 }
653
654 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
655 sit_blkaddr) {
656 MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
657 cp_blkaddr, sit_blkaddr,
658 segment_count_ckpt << log_blocks_per_seg);
659 return -1;
660 }
661
662 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
663 nat_blkaddr) {
664 MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
665 sit_blkaddr, nat_blkaddr,
666 segment_count_sit << log_blocks_per_seg);
667 return -1;
668 }
669
670 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
671 ssa_blkaddr) {
672 MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
673 nat_blkaddr, ssa_blkaddr,
674 segment_count_nat << log_blocks_per_seg);
675 return -1;
676 }
677
678 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
679 main_blkaddr) {
680 MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
681 ssa_blkaddr, main_blkaddr,
682 segment_count_ssa << log_blocks_per_seg);
683 return -1;
684 }
685
686 if (main_end_blkaddr > seg_end_blkaddr) {
687 MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
688 main_blkaddr,
689 segment0_blkaddr +
690 (segment_count << log_blocks_per_seg),
691 segment_count_main << log_blocks_per_seg);
692 return -1;
693 } else if (main_end_blkaddr < seg_end_blkaddr) {
694 set_sb(segment_count, (main_end_blkaddr -
695 segment0_blkaddr) >> log_blocks_per_seg);
696
697 update_superblock(sb, SB_MASK(sb_addr));
698 MSG(0, "Info: Fix alignment: start(%u) end(%u) block(%u)\n",
699 main_blkaddr,
700 segment0_blkaddr +
701 (segment_count << log_blocks_per_seg),
702 segment_count_main << log_blocks_per_seg);
703 }
704 return 0;
705 }
706
verify_sb_chksum(struct f2fs_super_block * sb)707 static int verify_sb_chksum(struct f2fs_super_block *sb)
708 {
709 if (SB_CHKSUM_OFFSET != get_sb(checksum_offset)) {
710 MSG(0, "\tInvalid SB CRC offset: %u\n",
711 get_sb(checksum_offset));
712 return -1;
713 }
714 if (f2fs_crc_valid(get_sb(crc), sb,
715 get_sb(checksum_offset))) {
716 MSG(0, "\tInvalid SB CRC: 0x%x\n", get_sb(crc));
717 return -1;
718 }
719 return 0;
720 }
721
sanity_check_raw_super(struct f2fs_super_block * sb,enum SB_ADDR sb_addr)722 int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
723 {
724 unsigned int blocksize;
725 unsigned int segment_count, segs_per_sec, secs_per_zone;
726 unsigned int total_sections, blocks_per_seg;
727
728 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) &&
729 verify_sb_chksum(sb))
730 return -1;
731
732 if (F2FS_SUPER_MAGIC != get_sb(magic)) {
733 MSG(0, "Magic Mismatch, valid(0x%x) - read(0x%x)\n",
734 F2FS_SUPER_MAGIC, get_sb(magic));
735 return -1;
736 }
737
738 if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) {
739 MSG(0, "Invalid page_cache_size (%d), supports only 4KB\n",
740 PAGE_CACHE_SIZE);
741 return -1;
742 }
743
744 blocksize = 1 << get_sb(log_blocksize);
745 if (F2FS_BLKSIZE != blocksize) {
746 MSG(0, "Invalid blocksize (%u), supports only 4KB\n",
747 blocksize);
748 return -1;
749 }
750
751 /* check log blocks per segment */
752 if (get_sb(log_blocks_per_seg) != 9) {
753 MSG(0, "Invalid log blocks per segment (%u)\n",
754 get_sb(log_blocks_per_seg));
755 return -1;
756 }
757
758 /* Currently, support 512/1024/2048/4096 bytes sector size */
759 if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
760 get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) {
761 MSG(0, "Invalid log sectorsize (%u)\n", get_sb(log_sectorsize));
762 return -1;
763 }
764
765 if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
766 F2FS_MAX_LOG_SECTOR_SIZE) {
767 MSG(0, "Invalid log sectors per block(%u) log sectorsize(%u)\n",
768 get_sb(log_sectors_per_block),
769 get_sb(log_sectorsize));
770 return -1;
771 }
772
773 segment_count = get_sb(segment_count);
774 segs_per_sec = get_sb(segs_per_sec);
775 secs_per_zone = get_sb(secs_per_zone);
776 total_sections = get_sb(section_count);
777
778 /* blocks_per_seg should be 512, given the above check */
779 blocks_per_seg = 1 << get_sb(log_blocks_per_seg);
780
781 if (segment_count > F2FS_MAX_SEGMENT ||
782 segment_count < F2FS_MIN_SEGMENTS) {
783 MSG(0, "\tInvalid segment count (%u)\n", segment_count);
784 return -1;
785 }
786
787 if (total_sections > segment_count ||
788 total_sections < F2FS_MIN_SEGMENTS ||
789 segs_per_sec > segment_count || !segs_per_sec) {
790 MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n",
791 segment_count, total_sections, segs_per_sec);
792 return 1;
793 }
794
795 if ((segment_count / segs_per_sec) < total_sections) {
796 MSG(0, "Small segment_count (%u < %u * %u)\n",
797 segment_count, segs_per_sec, total_sections);
798 return 1;
799 }
800
801 if (segment_count > (get_sb(block_count) >> 9)) {
802 MSG(0, "Wrong segment_count / block_count (%u > %llu)\n",
803 segment_count, get_sb(block_count));
804 return 1;
805 }
806
807 if (sb->devs[0].path[0]) {
808 unsigned int dev_segs = le32_to_cpu(sb->devs[0].total_segments);
809 int i = 1;
810
811 while (i < MAX_DEVICES && sb->devs[i].path[0]) {
812 dev_segs += le32_to_cpu(sb->devs[i].total_segments);
813 i++;
814 }
815 if (segment_count != dev_segs) {
816 MSG(0, "Segment count (%u) mismatch with total segments from devices (%u)",
817 segment_count, dev_segs);
818 return 1;
819 }
820 }
821
822 if (secs_per_zone > total_sections || !secs_per_zone) {
823 MSG(0, "Wrong secs_per_zone / total_sections (%u, %u)\n",
824 secs_per_zone, total_sections);
825 return 1;
826 }
827 if (get_sb(extension_count) > F2FS_MAX_EXTENSION ||
828 sb->hot_ext_count > F2FS_MAX_EXTENSION ||
829 get_sb(extension_count) +
830 sb->hot_ext_count > F2FS_MAX_EXTENSION) {
831 MSG(0, "Corrupted extension count (%u + %u > %u)\n",
832 get_sb(extension_count),
833 sb->hot_ext_count,
834 F2FS_MAX_EXTENSION);
835 return 1;
836 }
837
838 if (get_sb(cp_payload) > (blocks_per_seg - F2FS_CP_PACKS)) {
839 MSG(0, "Insane cp_payload (%u > %u)\n",
840 get_sb(cp_payload), blocks_per_seg - F2FS_CP_PACKS);
841 return 1;
842 }
843
844 /* check reserved ino info */
845 if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
846 get_sb(root_ino) != 3) {
847 MSG(0, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)\n",
848 get_sb(node_ino), get_sb(meta_ino), get_sb(root_ino));
849 return -1;
850 }
851
852 /* Check zoned block device feature */
853 if (c.devices[0].zoned_model == F2FS_ZONED_HM &&
854 !(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
855 MSG(0, "\tMissing zoned block device feature\n");
856 return -1;
857 }
858
859 if (sanity_check_area_boundary(sb, sb_addr))
860 return -1;
861 return 0;
862 }
863
validate_super_block(struct f2fs_sb_info * sbi,enum SB_ADDR sb_addr)864 int validate_super_block(struct f2fs_sb_info *sbi, enum SB_ADDR sb_addr)
865 {
866 char buf[F2FS_BLKSIZE];
867
868 sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
869 if (!sbi->raw_super)
870 return -ENOMEM;
871
872 if (dev_read_block(buf, sb_addr))
873 return -1;
874
875 memcpy(sbi->raw_super, buf + F2FS_SUPER_OFFSET,
876 sizeof(struct f2fs_super_block));
877
878 if (!sanity_check_raw_super(sbi->raw_super, sb_addr)) {
879 /* get kernel version */
880 if (c.kd >= 0) {
881 dev_read_version(c.version, 0, VERSION_LEN);
882 get_kernel_version(c.version);
883 } else {
884 get_kernel_uname_version(c.version);
885 }
886
887 /* build sb version */
888 memcpy(c.sb_version, sbi->raw_super->version, VERSION_LEN);
889 get_kernel_version(c.sb_version);
890 memcpy(c.init_version, sbi->raw_super->init_version, VERSION_LEN);
891 get_kernel_version(c.init_version);
892
893 MSG(0, "Info: MKFS version\n \"%s\"\n", c.init_version);
894 MSG(0, "Info: FSCK version\n from \"%s\"\n to \"%s\"\n",
895 c.sb_version, c.version);
896 if (!c.no_kernel_check &&
897 memcmp(c.sb_version, c.version, VERSION_LEN)) {
898 memcpy(sbi->raw_super->version,
899 c.version, VERSION_LEN);
900 update_superblock(sbi->raw_super, SB_MASK(sb_addr));
901
902 c.auto_fix = 0;
903 c.fix_on = 1;
904 }
905 print_sb_state(sbi->raw_super);
906 return 0;
907 }
908
909 free(sbi->raw_super);
910 sbi->raw_super = NULL;
911 MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", sb_addr);
912
913 return -EINVAL;
914 }
915
init_sb_info(struct f2fs_sb_info * sbi)916 int init_sb_info(struct f2fs_sb_info *sbi)
917 {
918 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
919 u64 total_sectors;
920 int i;
921
922 sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
923 sbi->log_blocksize = get_sb(log_blocksize);
924 sbi->blocksize = 1 << sbi->log_blocksize;
925 sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
926 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
927 sbi->segs_per_sec = get_sb(segs_per_sec);
928 sbi->secs_per_zone = get_sb(secs_per_zone);
929 sbi->total_sections = get_sb(section_count);
930 sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
931 sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
932 sbi->root_ino_num = get_sb(root_ino);
933 sbi->node_ino_num = get_sb(node_ino);
934 sbi->meta_ino_num = get_sb(meta_ino);
935 sbi->cur_victim_sec = NULL_SEGNO;
936
937 for (i = 0; i < MAX_DEVICES; i++) {
938 if (!sb->devs[i].path[0])
939 break;
940
941 if (i) {
942 c.devices[i].path = strdup((char *)sb->devs[i].path);
943 if (get_device_info(i))
944 ASSERT(0);
945 } else {
946 ASSERT(!strcmp((char *)sb->devs[i].path,
947 (char *)c.devices[i].path));
948 }
949
950 c.devices[i].total_segments =
951 le32_to_cpu(sb->devs[i].total_segments);
952 if (i)
953 c.devices[i].start_blkaddr =
954 c.devices[i - 1].end_blkaddr + 1;
955 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
956 c.devices[i].total_segments *
957 c.blks_per_seg - 1;
958 if (i == 0)
959 c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
960
961 c.ndevs = i + 1;
962 MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
963 i, c.devices[i].path,
964 c.devices[i].start_blkaddr,
965 c.devices[i].end_blkaddr);
966 }
967
968 total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
969 MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
970 total_sectors, total_sectors >>
971 (20 - get_sb(log_sectorsize)));
972 return 0;
973 }
974
verify_checksum_chksum(struct f2fs_checkpoint * cp)975 static int verify_checksum_chksum(struct f2fs_checkpoint *cp)
976 {
977 unsigned int chksum_offset = get_cp(checksum_offset);
978 unsigned int crc, cal_crc;
979
980 if (chksum_offset < CP_MIN_CHKSUM_OFFSET ||
981 chksum_offset > CP_CHKSUM_OFFSET) {
982 MSG(0, "\tInvalid CP CRC offset: %u\n", chksum_offset);
983 return -1;
984 }
985
986 crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + chksum_offset));
987 cal_crc = f2fs_checkpoint_chksum(cp);
988 if (cal_crc != crc) {
989 MSG(0, "\tInvalid CP CRC: offset:%u, crc:0x%x, calc:0x%x\n",
990 chksum_offset, crc, cal_crc);
991 return -1;
992 }
993 return 0;
994 }
995
get_checkpoint_version(block_t cp_addr)996 static void *get_checkpoint_version(block_t cp_addr)
997 {
998 void *cp_page;
999
1000 cp_page = malloc(PAGE_SIZE);
1001 ASSERT(cp_page);
1002
1003 if (dev_read_block(cp_page, cp_addr) < 0)
1004 ASSERT(0);
1005
1006 if (verify_checksum_chksum((struct f2fs_checkpoint *)cp_page))
1007 goto out;
1008 return cp_page;
1009 out:
1010 free(cp_page);
1011 return NULL;
1012 }
1013
validate_checkpoint(struct f2fs_sb_info * sbi,block_t cp_addr,unsigned long long * version)1014 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
1015 unsigned long long *version)
1016 {
1017 void *cp_page_1, *cp_page_2;
1018 struct f2fs_checkpoint *cp;
1019 unsigned long long cur_version = 0, pre_version = 0;
1020
1021 /* Read the 1st cp block in this CP pack */
1022 cp_page_1 = get_checkpoint_version(cp_addr);
1023 if (!cp_page_1)
1024 return NULL;
1025
1026 cp = (struct f2fs_checkpoint *)cp_page_1;
1027 if (get_cp(cp_pack_total_block_count) > sbi->blocks_per_seg)
1028 goto invalid_cp1;
1029
1030 pre_version = get_cp(checkpoint_ver);
1031
1032 /* Read the 2nd cp block in this CP pack */
1033 cp_addr += get_cp(cp_pack_total_block_count) - 1;
1034 cp_page_2 = get_checkpoint_version(cp_addr);
1035 if (!cp_page_2)
1036 goto invalid_cp1;
1037
1038 cp = (struct f2fs_checkpoint *)cp_page_2;
1039 cur_version = get_cp(checkpoint_ver);
1040
1041 if (cur_version == pre_version) {
1042 *version = cur_version;
1043 free(cp_page_2);
1044 return cp_page_1;
1045 }
1046
1047 free(cp_page_2);
1048 invalid_cp1:
1049 free(cp_page_1);
1050 return NULL;
1051 }
1052
get_valid_checkpoint(struct f2fs_sb_info * sbi)1053 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
1054 {
1055 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1056 void *cp1, *cp2, *cur_page;
1057 unsigned long blk_size = sbi->blocksize;
1058 unsigned long long cp1_version = 0, cp2_version = 0, version;
1059 unsigned long long cp_start_blk_no;
1060 unsigned int cp_payload, cp_blks;
1061 int ret;
1062
1063 cp_payload = get_sb(cp_payload);
1064 if (cp_payload > F2FS_BLK_ALIGN(MAX_SIT_BITMAP_SIZE))
1065 return -EINVAL;
1066
1067 cp_blks = 1 + cp_payload;
1068 sbi->ckpt = malloc(cp_blks * blk_size);
1069 if (!sbi->ckpt)
1070 return -ENOMEM;
1071 /*
1072 * Finding out valid cp block involves read both
1073 * sets( cp pack1 and cp pack 2)
1074 */
1075 cp_start_blk_no = get_sb(cp_blkaddr);
1076 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
1077
1078 /* The second checkpoint pack should start at the next segment */
1079 cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
1080 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
1081
1082 if (cp1 && cp2) {
1083 if (ver_after(cp2_version, cp1_version)) {
1084 cur_page = cp2;
1085 sbi->cur_cp = 2;
1086 version = cp2_version;
1087 } else {
1088 cur_page = cp1;
1089 sbi->cur_cp = 1;
1090 version = cp1_version;
1091 }
1092 } else if (cp1) {
1093 cur_page = cp1;
1094 sbi->cur_cp = 1;
1095 version = cp1_version;
1096 } else if (cp2) {
1097 cur_page = cp2;
1098 sbi->cur_cp = 2;
1099 version = cp2_version;
1100 } else
1101 goto fail_no_cp;
1102
1103 MSG(0, "Info: CKPT version = %llx\n", version);
1104
1105 memcpy(sbi->ckpt, cur_page, blk_size);
1106
1107 if (cp_blks > 1) {
1108 unsigned int i;
1109 unsigned long long cp_blk_no;
1110
1111 cp_blk_no = get_sb(cp_blkaddr);
1112 if (cur_page == cp2)
1113 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1114
1115 /* copy sit bitmap */
1116 for (i = 1; i < cp_blks; i++) {
1117 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
1118 ret = dev_read_block(cur_page, cp_blk_no + i);
1119 ASSERT(ret >= 0);
1120 memcpy(ckpt + i * blk_size, cur_page, blk_size);
1121 }
1122 }
1123 if (cp1)
1124 free(cp1);
1125 if (cp2)
1126 free(cp2);
1127 return 0;
1128
1129 fail_no_cp:
1130 free(sbi->ckpt);
1131 sbi->ckpt = NULL;
1132 return -EINVAL;
1133 }
1134
1135 /*
1136 * For a return value of 1, caller should further check for c.fix_on state
1137 * and take appropriate action.
1138 */
f2fs_should_proceed(struct f2fs_super_block * sb,u32 flag)1139 static int f2fs_should_proceed(struct f2fs_super_block *sb, u32 flag)
1140 {
1141 if (!c.fix_on && (c.auto_fix || c.preen_mode)) {
1142 if (flag & CP_FSCK_FLAG ||
1143 flag & CP_QUOTA_NEED_FSCK_FLAG ||
1144 (exist_qf_ino(sb) && (flag & CP_ERROR_FLAG))) {
1145 c.fix_on = 1;
1146 } else if (!c.preen_mode) {
1147 print_cp_state(flag);
1148 return 0;
1149 }
1150 }
1151 return 1;
1152 }
1153
sanity_check_ckpt(struct f2fs_sb_info * sbi)1154 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1155 {
1156 unsigned int total, fsmeta;
1157 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1158 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1159 unsigned int flag = get_cp(ckpt_flags);
1160 unsigned int ovp_segments, reserved_segments;
1161 unsigned int main_segs, blocks_per_seg;
1162 unsigned int sit_segs, nat_segs;
1163 unsigned int sit_bitmap_size, nat_bitmap_size;
1164 unsigned int log_blocks_per_seg;
1165 unsigned int segment_count_main;
1166 unsigned int cp_pack_start_sum, cp_payload;
1167 block_t user_block_count;
1168 int i;
1169
1170 total = get_sb(segment_count);
1171 fsmeta = get_sb(segment_count_ckpt);
1172 sit_segs = get_sb(segment_count_sit);
1173 fsmeta += sit_segs;
1174 nat_segs = get_sb(segment_count_nat);
1175 fsmeta += nat_segs;
1176 fsmeta += get_cp(rsvd_segment_count);
1177 fsmeta += get_sb(segment_count_ssa);
1178
1179 if (fsmeta >= total)
1180 return 1;
1181
1182 ovp_segments = get_cp(overprov_segment_count);
1183 reserved_segments = get_cp(rsvd_segment_count);
1184
1185 if (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
1186 reserved_segments == 0) {
1187 MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
1188 return 1;
1189 }
1190
1191 user_block_count = get_cp(user_block_count);
1192 segment_count_main = get_sb(segment_count_main);
1193 log_blocks_per_seg = get_sb(log_blocks_per_seg);
1194 if (!user_block_count || user_block_count >=
1195 segment_count_main << log_blocks_per_seg) {
1196 ASSERT_MSG("\tWrong user_block_count(%u)\n", user_block_count);
1197
1198 if (!f2fs_should_proceed(sb, flag))
1199 return 1;
1200 if (!c.fix_on)
1201 return 1;
1202
1203 if (flag & (CP_FSCK_FLAG | CP_RESIZEFS_FLAG)) {
1204 u32 valid_user_block_cnt;
1205 u32 seg_cnt_main = get_sb(segment_count) -
1206 (get_sb(segment_count_ckpt) +
1207 get_sb(segment_count_sit) +
1208 get_sb(segment_count_nat) +
1209 get_sb(segment_count_ssa));
1210
1211 /* validate segment_count_main in sb first */
1212 if (seg_cnt_main != get_sb(segment_count_main)) {
1213 MSG(0, "Inconsistent segment_cnt_main %u in sb\n",
1214 segment_count_main << log_blocks_per_seg);
1215 return 1;
1216 }
1217 valid_user_block_cnt = ((get_sb(segment_count_main) -
1218 get_cp(overprov_segment_count)) * c.blks_per_seg);
1219 MSG(0, "Info: Fix wrong user_block_count in CP: (%u) -> (%u)\n",
1220 user_block_count, valid_user_block_cnt);
1221 set_cp(user_block_count, valid_user_block_cnt);
1222 c.bug_on = 1;
1223 }
1224 }
1225
1226 main_segs = get_sb(segment_count_main);
1227 blocks_per_seg = sbi->blocks_per_seg;
1228
1229 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1230 if (get_cp(cur_node_segno[i]) >= main_segs ||
1231 get_cp(cur_node_blkoff[i]) >= blocks_per_seg)
1232 return 1;
1233 }
1234 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1235 if (get_cp(cur_data_segno[i]) >= main_segs ||
1236 get_cp(cur_data_blkoff[i]) >= blocks_per_seg)
1237 return 1;
1238 }
1239
1240 sit_bitmap_size = get_cp(sit_ver_bitmap_bytesize);
1241 nat_bitmap_size = get_cp(nat_ver_bitmap_bytesize);
1242
1243 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
1244 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
1245 MSG(0, "\tWrong bitmap size: sit(%u), nat(%u)\n",
1246 sit_bitmap_size, nat_bitmap_size);
1247 return 1;
1248 }
1249
1250 cp_pack_start_sum = __start_sum_addr(sbi);
1251 cp_payload = __cp_payload(sbi);
1252 if (cp_pack_start_sum < cp_payload + 1 ||
1253 cp_pack_start_sum > blocks_per_seg - 1 -
1254 NR_CURSEG_TYPE) {
1255 MSG(0, "\tWrong cp_pack_start_sum(%u) or cp_payload(%u)\n",
1256 cp_pack_start_sum, cp_payload);
1257 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM))
1258 return 1;
1259 set_sb(cp_payload, cp_pack_start_sum - 1);
1260 update_superblock(sb, SB_MASK_ALL);
1261 }
1262
1263 return 0;
1264 }
1265
current_nat_addr(struct f2fs_sb_info * sbi,nid_t start,int * pack)1266 pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start, int *pack)
1267 {
1268 struct f2fs_nm_info *nm_i = NM_I(sbi);
1269 pgoff_t block_off;
1270 pgoff_t block_addr;
1271 int seg_off;
1272
1273 block_off = NAT_BLOCK_OFFSET(start);
1274 seg_off = block_off >> sbi->log_blocks_per_seg;
1275
1276 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1277 (seg_off << sbi->log_blocks_per_seg << 1) +
1278 (block_off & ((1 << sbi->log_blocks_per_seg) -1)));
1279 if (pack)
1280 *pack = 1;
1281
1282 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
1283 block_addr += sbi->blocks_per_seg;
1284 if (pack)
1285 *pack = 2;
1286 }
1287
1288 return block_addr;
1289 }
1290
1291 /* will not init nid_bitmap from nat */
f2fs_early_init_nid_bitmap(struct f2fs_sb_info * sbi)1292 static int f2fs_early_init_nid_bitmap(struct f2fs_sb_info *sbi)
1293 {
1294 struct f2fs_nm_info *nm_i = NM_I(sbi);
1295 int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
1296 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1297 struct f2fs_summary_block *sum = curseg->sum_blk;
1298 struct f2fs_journal *journal = &sum->journal;
1299 nid_t nid;
1300 int i;
1301
1302 if (!(c.func == SLOAD || c.func == FSCK))
1303 return 0;
1304
1305 nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
1306 if (!nm_i->nid_bitmap)
1307 return -ENOMEM;
1308
1309 /* arbitrarily set 0 bit */
1310 f2fs_set_bit(0, nm_i->nid_bitmap);
1311
1312 if (nats_in_cursum(journal) > NAT_JOURNAL_ENTRIES) {
1313 MSG(0, "\tError: f2fs_init_nid_bitmap truncate n_nats(%u) to "
1314 "NAT_JOURNAL_ENTRIES(%lu)\n",
1315 nats_in_cursum(journal), NAT_JOURNAL_ENTRIES);
1316 journal->n_nats = cpu_to_le16(NAT_JOURNAL_ENTRIES);
1317 c.fix_on = 1;
1318 }
1319
1320 for (i = 0; i < nats_in_cursum(journal); i++) {
1321 block_t addr;
1322
1323 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1324 if (!IS_VALID_BLK_ADDR(sbi, addr)) {
1325 MSG(0, "\tError: f2fs_init_nid_bitmap: addr(%u) is invalid!!!\n", addr);
1326 journal->n_nats = cpu_to_le16(i);
1327 c.fix_on = 1;
1328 continue;
1329 }
1330
1331 nid = le32_to_cpu(nid_in_journal(journal, i));
1332 if (!IS_VALID_NID(sbi, nid)) {
1333 MSG(0, "\tError: f2fs_init_nid_bitmap: nid(%u) is invalid!!!\n", nid);
1334 journal->n_nats = cpu_to_le16(i);
1335 c.fix_on = 1;
1336 continue;
1337 }
1338 if (addr != NULL_ADDR)
1339 f2fs_set_bit(nid, nm_i->nid_bitmap);
1340 }
1341 return 0;
1342 }
1343
1344 /* will init nid_bitmap from nat */
f2fs_late_init_nid_bitmap(struct f2fs_sb_info * sbi)1345 static int f2fs_late_init_nid_bitmap(struct f2fs_sb_info *sbi)
1346 {
1347 struct f2fs_nm_info *nm_i = NM_I(sbi);
1348 struct f2fs_nat_block *nat_block;
1349 block_t start_blk;
1350 nid_t nid;
1351
1352 if (!(c.func == SLOAD || c.func == FSCK))
1353 return 0;
1354
1355 nat_block = malloc(F2FS_BLKSIZE);
1356 if (!nat_block) {
1357 free(nm_i->nid_bitmap);
1358 return -ENOMEM;
1359 }
1360
1361 f2fs_ra_meta_pages(sbi, 0, NAT_BLOCK_OFFSET(nm_i->max_nid),
1362 META_NAT);
1363 for (nid = 0; nid < nm_i->max_nid; nid++) {
1364 if (!(nid % NAT_ENTRY_PER_BLOCK)) {
1365 int ret;
1366
1367 start_blk = current_nat_addr(sbi, nid, NULL);
1368 ret = dev_read_block(nat_block, start_blk);
1369 ASSERT(ret >= 0);
1370 }
1371
1372 if (nat_block->entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
1373 f2fs_set_bit(nid, nm_i->nid_bitmap);
1374 }
1375
1376 free(nat_block);
1377 return 0;
1378 }
1379
update_nat_bits_flags(struct f2fs_super_block * sb,struct f2fs_checkpoint * cp,u32 flags)1380 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
1381 struct f2fs_checkpoint *cp, u32 flags)
1382 {
1383 u_int32_t nat_bits_bytes, nat_bits_blocks;
1384
1385 nat_bits_bytes = get_sb(segment_count_nat) << 5;
1386 nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
1387 F2FS_BLKSIZE - 1);
1388 if (get_cp(cp_pack_total_block_count) <=
1389 (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
1390 flags |= CP_NAT_BITS_FLAG;
1391 else
1392 flags &= (~CP_NAT_BITS_FLAG);
1393
1394 return flags;
1395 }
1396
1397 /* should call flush_journal_entries() bfore this */
write_nat_bits(struct f2fs_sb_info * sbi,struct f2fs_super_block * sb,struct f2fs_checkpoint * cp,int set)1398 void write_nat_bits(struct f2fs_sb_info *sbi,
1399 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
1400 {
1401 struct f2fs_nm_info *nm_i = NM_I(sbi);
1402 u_int32_t nat_blocks = get_sb(segment_count_nat) <<
1403 (get_sb(log_blocks_per_seg) - 1);
1404 u_int32_t nat_bits_bytes = nat_blocks >> 3;
1405 u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1406 8 + F2FS_BLKSIZE - 1);
1407 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1408 struct f2fs_nat_block *nat_block;
1409 u_int32_t i, j;
1410 block_t blkaddr;
1411 int ret;
1412
1413 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1414 ASSERT(nat_bits);
1415
1416 nat_block = malloc(F2FS_BLKSIZE);
1417 ASSERT(nat_block);
1418
1419 full_nat_bits = nat_bits + 8;
1420 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1421
1422 memset(full_nat_bits, 0, nat_bits_bytes);
1423 memset(empty_nat_bits, 0, nat_bits_bytes);
1424
1425 for (i = 0; i < nat_blocks; i++) {
1426 int seg_off = i >> get_sb(log_blocks_per_seg);
1427 int valid = 0;
1428
1429 blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
1430 (seg_off << get_sb(log_blocks_per_seg) << 1) +
1431 (i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
1432
1433 /*
1434 * Should consider new nat_blocks is larger than old
1435 * nm_i->nat_blocks, since nm_i->nat_bitmap is based on
1436 * old one.
1437 */
1438 if (i < nm_i->nat_blocks && f2fs_test_bit(i, nm_i->nat_bitmap))
1439 blkaddr += (1 << get_sb(log_blocks_per_seg));
1440
1441 ret = dev_read_block(nat_block, blkaddr);
1442 ASSERT(ret >= 0);
1443
1444 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1445 if ((i == 0 && j == 0) ||
1446 nat_block->entries[j].block_addr != NULL_ADDR)
1447 valid++;
1448 }
1449 if (valid == 0)
1450 test_and_set_bit_le(i, empty_nat_bits);
1451 else if (valid == NAT_ENTRY_PER_BLOCK)
1452 test_and_set_bit_le(i, full_nat_bits);
1453 }
1454 *(__le64 *)nat_bits = get_cp_crc(cp);
1455 free(nat_block);
1456
1457 blkaddr = get_sb(segment0_blkaddr) + (set <<
1458 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1459
1460 DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
1461
1462 for (i = 0; i < nat_bits_blocks; i++) {
1463 if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1464 ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
1465 }
1466 MSG(0, "Info: Write valid nat_bits in checkpoint\n");
1467
1468 free(nat_bits);
1469 }
1470
check_nat_bits(struct f2fs_sb_info * sbi,struct f2fs_super_block * sb,struct f2fs_checkpoint * cp)1471 static int check_nat_bits(struct f2fs_sb_info *sbi,
1472 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp)
1473 {
1474 struct f2fs_nm_info *nm_i = NM_I(sbi);
1475 u_int32_t nat_blocks = get_sb(segment_count_nat) <<
1476 (get_sb(log_blocks_per_seg) - 1);
1477 u_int32_t nat_bits_bytes = nat_blocks >> 3;
1478 u_int32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1479 8 + F2FS_BLKSIZE - 1);
1480 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1481 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1482 struct f2fs_journal *journal = &curseg->sum_blk->journal;
1483 u_int32_t i, j;
1484 block_t blkaddr;
1485 int err = 0;
1486
1487 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1488 ASSERT(nat_bits);
1489
1490 full_nat_bits = nat_bits + 8;
1491 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1492
1493 blkaddr = get_sb(segment0_blkaddr) + (sbi->cur_cp <<
1494 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1495
1496 for (i = 0; i < nat_bits_blocks; i++) {
1497 if (dev_read_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1498 ASSERT_MSG("\tError: read NAT bits to disk!!!\n");
1499 }
1500
1501 if (*(__le64 *)nat_bits != get_cp_crc(cp) || nats_in_cursum(journal)) {
1502 /*
1503 * if there is a journal, f2fs was not shutdown cleanly. Let's
1504 * flush them with nat_bits.
1505 */
1506 if (c.fix_on)
1507 err = -1;
1508 /* Otherwise, kernel will disable nat_bits */
1509 goto out;
1510 }
1511
1512 for (i = 0; i < nat_blocks; i++) {
1513 u_int32_t start_nid = i * NAT_ENTRY_PER_BLOCK;
1514 u_int32_t valid = 0;
1515 int empty = test_bit_le(i, empty_nat_bits);
1516 int full = test_bit_le(i, full_nat_bits);
1517
1518 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1519 if (f2fs_test_bit(start_nid + j, nm_i->nid_bitmap))
1520 valid++;
1521 }
1522 if (valid == 0) {
1523 if (!empty || full) {
1524 err = -1;
1525 goto out;
1526 }
1527 } else if (valid == NAT_ENTRY_PER_BLOCK) {
1528 if (empty || !full) {
1529 err = -1;
1530 goto out;
1531 }
1532 } else {
1533 if (empty || full) {
1534 err = -1;
1535 goto out;
1536 }
1537 }
1538 }
1539 out:
1540 free(nat_bits);
1541 if (!err) {
1542 MSG(0, "Info: Checked valid nat_bits in checkpoint\n");
1543 } else {
1544 c.bug_nat_bits = 1;
1545 MSG(0, "Info: Corrupted valid nat_bits in checkpoint\n");
1546 }
1547 return err;
1548 }
1549
init_node_manager(struct f2fs_sb_info * sbi)1550 int init_node_manager(struct f2fs_sb_info *sbi)
1551 {
1552 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1553 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1554 struct f2fs_nm_info *nm_i = NM_I(sbi);
1555 unsigned char *version_bitmap;
1556 unsigned int nat_segs;
1557
1558 nm_i->nat_blkaddr = get_sb(nat_blkaddr);
1559
1560 /* segment_count_nat includes pair segment so divide to 2. */
1561 nat_segs = get_sb(segment_count_nat) >> 1;
1562 nm_i->nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
1563 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
1564 nm_i->fcnt = 0;
1565 nm_i->nat_cnt = 0;
1566 nm_i->init_scan_nid = get_cp(next_free_nid);
1567 nm_i->next_scan_nid = get_cp(next_free_nid);
1568
1569 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1570
1571 nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
1572 if (!nm_i->nat_bitmap)
1573 return -ENOMEM;
1574 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1575 if (!version_bitmap)
1576 return -EFAULT;
1577
1578 /* copy version bitmap */
1579 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1580 return f2fs_early_init_nid_bitmap(sbi);
1581 }
1582
build_node_manager(struct f2fs_sb_info * sbi)1583 int build_node_manager(struct f2fs_sb_info *sbi)
1584 {
1585 int err;
1586 sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
1587 if (!sbi->nm_info)
1588 return -ENOMEM;
1589
1590 err = init_node_manager(sbi);
1591 if (err)
1592 return err;
1593
1594 return 0;
1595 }
1596
build_sit_info(struct f2fs_sb_info * sbi)1597 int build_sit_info(struct f2fs_sb_info *sbi)
1598 {
1599 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1600 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1601 struct sit_info *sit_i;
1602 unsigned int sit_segs;
1603 int start;
1604 char *src_bitmap, *dst_bitmap;
1605 unsigned char *bitmap;
1606 unsigned int bitmap_size;
1607
1608 sit_i = malloc(sizeof(struct sit_info));
1609 if (!sit_i) {
1610 MSG(1, "\tError: Malloc failed for build_sit_info!\n");
1611 return -ENOMEM;
1612 }
1613
1614 SM_I(sbi)->sit_info = sit_i;
1615
1616 sit_i->sentries = calloc(TOTAL_SEGS(sbi) * sizeof(struct seg_entry), 1);
1617 if (!sit_i->sentries) {
1618 MSG(1, "\tError: Calloc failed for build_sit_info!\n");
1619 goto free_sit_info;
1620 }
1621
1622 bitmap_size = TOTAL_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE;
1623
1624 if (need_fsync_data_record(sbi))
1625 bitmap_size += bitmap_size;
1626
1627 sit_i->bitmap = calloc(bitmap_size, 1);
1628 if (!sit_i->bitmap) {
1629 MSG(1, "\tError: Calloc failed for build_sit_info!!\n");
1630 goto free_sentries;
1631 }
1632
1633 bitmap = sit_i->bitmap;
1634
1635 for (start = 0; start < TOTAL_SEGS(sbi); start++) {
1636 sit_i->sentries[start].cur_valid_map = bitmap;
1637 bitmap += SIT_VBLOCK_MAP_SIZE;
1638
1639 if (need_fsync_data_record(sbi)) {
1640 sit_i->sentries[start].ckpt_valid_map = bitmap;
1641 bitmap += SIT_VBLOCK_MAP_SIZE;
1642 }
1643 }
1644
1645 sit_segs = get_sb(segment_count_sit) >> 1;
1646 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1647 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1648
1649 dst_bitmap = malloc(bitmap_size);
1650 if (!dst_bitmap) {
1651 MSG(1, "\tError: Malloc failed for build_sit_info!!\n");
1652 goto free_validity_maps;
1653 }
1654
1655 memcpy(dst_bitmap, src_bitmap, bitmap_size);
1656
1657 sit_i->sit_base_addr = get_sb(sit_blkaddr);
1658 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1659 sit_i->written_valid_blocks = get_cp(valid_block_count);
1660 sit_i->sit_bitmap = dst_bitmap;
1661 sit_i->bitmap_size = bitmap_size;
1662 sit_i->dirty_sentries = 0;
1663 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1664 sit_i->elapsed_time = get_cp(elapsed_time);
1665 return 0;
1666
1667 free_validity_maps:
1668 free(sit_i->bitmap);
1669 free_sentries:
1670 free(sit_i->sentries);
1671 free_sit_info:
1672 free(sit_i);
1673
1674 return -ENOMEM;
1675 }
1676
reset_curseg(struct f2fs_sb_info * sbi,int type)1677 void reset_curseg(struct f2fs_sb_info *sbi, int type)
1678 {
1679 struct curseg_info *curseg = CURSEG_I(sbi, type);
1680 struct summary_footer *sum_footer;
1681 struct seg_entry *se;
1682
1683 sum_footer = &(curseg->sum_blk->footer);
1684 memset(sum_footer, 0, sizeof(struct summary_footer));
1685 if (IS_DATASEG(type))
1686 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1687 if (IS_NODESEG(type))
1688 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1689 se = get_seg_entry(sbi, curseg->segno);
1690 se->type = type;
1691 se->dirty = 1;
1692 }
1693
read_compacted_summaries(struct f2fs_sb_info * sbi)1694 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
1695 {
1696 struct curseg_info *curseg;
1697 unsigned int i, j, offset;
1698 block_t start;
1699 char *kaddr;
1700 int ret;
1701
1702 start = start_sum_block(sbi);
1703
1704 kaddr = (char *)malloc(PAGE_SIZE);
1705 ASSERT(kaddr);
1706
1707 ret = dev_read_block(kaddr, start++);
1708 ASSERT(ret >= 0);
1709
1710 curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1711 memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
1712
1713 curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1714 memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
1715 SUM_JOURNAL_SIZE);
1716
1717 offset = 2 * SUM_JOURNAL_SIZE;
1718 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1719 unsigned short blk_off;
1720 struct curseg_info *curseg = CURSEG_I(sbi, i);
1721
1722 reset_curseg(sbi, i);
1723
1724 if (curseg->alloc_type == SSR)
1725 blk_off = sbi->blocks_per_seg;
1726 else
1727 blk_off = curseg->next_blkoff;
1728
1729 ASSERT(blk_off <= ENTRIES_IN_SUM);
1730
1731 for (j = 0; j < blk_off; j++) {
1732 struct f2fs_summary *s;
1733 s = (struct f2fs_summary *)(kaddr + offset);
1734 curseg->sum_blk->entries[j] = *s;
1735 offset += SUMMARY_SIZE;
1736 if (offset + SUMMARY_SIZE <=
1737 PAGE_CACHE_SIZE - SUM_FOOTER_SIZE)
1738 continue;
1739 memset(kaddr, 0, PAGE_SIZE);
1740 ret = dev_read_block(kaddr, start++);
1741 ASSERT(ret >= 0);
1742 offset = 0;
1743 }
1744 }
1745 free(kaddr);
1746 }
1747
restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum_blk)1748 static void restore_node_summary(struct f2fs_sb_info *sbi,
1749 unsigned int segno, struct f2fs_summary_block *sum_blk)
1750 {
1751 struct f2fs_node *node_blk;
1752 struct f2fs_summary *sum_entry;
1753 block_t addr;
1754 unsigned int i;
1755 int ret;
1756
1757 node_blk = malloc(F2FS_BLKSIZE);
1758 ASSERT(node_blk);
1759
1760 /* scan the node segment */
1761 addr = START_BLOCK(sbi, segno);
1762 sum_entry = &sum_blk->entries[0];
1763
1764 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
1765 ret = dev_read_block(node_blk, addr);
1766 ASSERT(ret >= 0);
1767 sum_entry->nid = node_blk->footer.nid;
1768 addr++;
1769 }
1770 free(node_blk);
1771 }
1772
read_normal_summaries(struct f2fs_sb_info * sbi,int type)1773 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1774 {
1775 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1776 struct f2fs_summary_block *sum_blk;
1777 struct curseg_info *curseg;
1778 unsigned int segno = 0;
1779 block_t blk_addr = 0;
1780 int ret;
1781
1782 if (IS_DATASEG(type)) {
1783 segno = get_cp(cur_data_segno[type]);
1784 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1785 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1786 else
1787 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1788 } else {
1789 segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
1790 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1791 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1792 type - CURSEG_HOT_NODE);
1793 else
1794 blk_addr = GET_SUM_BLKADDR(sbi, segno);
1795 }
1796
1797 sum_blk = (struct f2fs_summary_block *)malloc(PAGE_SIZE);
1798 ASSERT(sum_blk);
1799
1800 ret = dev_read_block(sum_blk, blk_addr);
1801 ASSERT(ret >= 0);
1802
1803 if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1804 restore_node_summary(sbi, segno, sum_blk);
1805
1806 curseg = CURSEG_I(sbi, type);
1807 memcpy(curseg->sum_blk, sum_blk, PAGE_CACHE_SIZE);
1808 reset_curseg(sbi, type);
1809 free(sum_blk);
1810 }
1811
update_sum_entry(struct f2fs_sb_info * sbi,block_t blk_addr,struct f2fs_summary * sum)1812 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
1813 struct f2fs_summary *sum)
1814 {
1815 struct f2fs_summary_block *sum_blk;
1816 u32 segno, offset;
1817 int type, ret;
1818 struct seg_entry *se;
1819
1820 segno = GET_SEGNO(sbi, blk_addr);
1821 offset = OFFSET_IN_SEG(sbi, blk_addr);
1822
1823 se = get_seg_entry(sbi, segno);
1824
1825 sum_blk = get_sum_block(sbi, segno, &type);
1826 memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
1827 sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
1828 SUM_TYPE_DATA;
1829
1830 /* write SSA all the time */
1831 ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
1832 ASSERT(ret >= 0);
1833
1834 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
1835 type == SEG_TYPE_MAX)
1836 free(sum_blk);
1837 }
1838
restore_curseg_summaries(struct f2fs_sb_info * sbi)1839 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
1840 {
1841 int type = CURSEG_HOT_DATA;
1842
1843 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
1844 read_compacted_summaries(sbi);
1845 type = CURSEG_HOT_NODE;
1846 }
1847
1848 for (; type <= CURSEG_COLD_NODE; type++)
1849 read_normal_summaries(sbi, type);
1850 }
1851
build_curseg(struct f2fs_sb_info * sbi)1852 static int build_curseg(struct f2fs_sb_info *sbi)
1853 {
1854 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1855 struct curseg_info *array;
1856 unsigned short blk_off;
1857 unsigned int segno;
1858 int i;
1859
1860 array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
1861 if (!array) {
1862 MSG(1, "\tError: Malloc failed for build_curseg!\n");
1863 return -ENOMEM;
1864 }
1865
1866 SM_I(sbi)->curseg_array = array;
1867
1868 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1869 array[i].sum_blk = malloc(PAGE_CACHE_SIZE);
1870 if (!array[i].sum_blk) {
1871 MSG(1, "\tError: Malloc failed for build_curseg!!\n");
1872 goto seg_cleanup;
1873 }
1874
1875 if (i <= CURSEG_COLD_DATA) {
1876 blk_off = get_cp(cur_data_blkoff[i]);
1877 segno = get_cp(cur_data_segno[i]);
1878 }
1879 if (i > CURSEG_COLD_DATA) {
1880 blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
1881 segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
1882 }
1883 ASSERT(segno < TOTAL_SEGS(sbi));
1884 ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
1885
1886 array[i].segno = segno;
1887 array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
1888 array[i].next_segno = NULL_SEGNO;
1889 array[i].next_blkoff = blk_off;
1890 array[i].alloc_type = cp->alloc_type[i];
1891 }
1892 restore_curseg_summaries(sbi);
1893 return 0;
1894
1895 seg_cleanup:
1896 for(--i ; i >=0; --i)
1897 free(array[i].sum_blk);
1898 free(array);
1899
1900 return -ENOMEM;
1901 }
1902
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)1903 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
1904 {
1905 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
1906 ASSERT(segno <= end_segno);
1907 }
1908
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int segno)1909 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
1910 unsigned int segno)
1911 {
1912 struct sit_info *sit_i = SIT_I(sbi);
1913 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
1914 block_t blk_addr = sit_i->sit_base_addr + offset;
1915
1916 check_seg_range(sbi, segno);
1917
1918 /* calculate sit block address */
1919 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
1920 blk_addr += sit_i->sit_blocks;
1921
1922 return blk_addr;
1923 }
1924
get_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_block * sit_blk)1925 void get_current_sit_page(struct f2fs_sb_info *sbi,
1926 unsigned int segno, struct f2fs_sit_block *sit_blk)
1927 {
1928 block_t blk_addr = current_sit_addr(sbi, segno);
1929
1930 ASSERT(dev_read_block(sit_blk, blk_addr) >= 0);
1931 }
1932
rewrite_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_block * sit_blk)1933 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
1934 unsigned int segno, struct f2fs_sit_block *sit_blk)
1935 {
1936 block_t blk_addr = current_sit_addr(sbi, segno);
1937
1938 ASSERT(dev_write_block(sit_blk, blk_addr) >= 0);
1939 }
1940
check_block_count(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_entry * raw_sit)1941 void check_block_count(struct f2fs_sb_info *sbi,
1942 unsigned int segno, struct f2fs_sit_entry *raw_sit)
1943 {
1944 struct f2fs_sm_info *sm_info = SM_I(sbi);
1945 unsigned int end_segno = sm_info->segment_count - 1;
1946 int valid_blocks = 0;
1947 unsigned int i;
1948
1949 /* check segment usage */
1950 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
1951 ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
1952 segno, GET_SIT_VBLOCKS(raw_sit));
1953
1954 /* check boundary of a given segment number */
1955 if (segno > end_segno)
1956 ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
1957
1958 /* check bitmap with valid block count */
1959 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
1960 valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
1961
1962 if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
1963 ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
1964 segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
1965
1966 if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
1967 ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
1968 segno, GET_SIT_TYPE(raw_sit));
1969 }
1970
__seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * raw_sit)1971 void __seg_info_from_raw_sit(struct seg_entry *se,
1972 struct f2fs_sit_entry *raw_sit)
1973 {
1974 se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
1975 memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
1976 se->type = GET_SIT_TYPE(raw_sit);
1977 se->orig_type = GET_SIT_TYPE(raw_sit);
1978 se->mtime = le64_to_cpu(raw_sit->mtime);
1979 }
1980
seg_info_from_raw_sit(struct f2fs_sb_info * sbi,struct seg_entry * se,struct f2fs_sit_entry * raw_sit)1981 void seg_info_from_raw_sit(struct f2fs_sb_info *sbi, struct seg_entry *se,
1982 struct f2fs_sit_entry *raw_sit)
1983 {
1984 __seg_info_from_raw_sit(se, raw_sit);
1985
1986 if (!need_fsync_data_record(sbi))
1987 return;
1988 se->ckpt_valid_blocks = se->valid_blocks;
1989 memcpy(se->ckpt_valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
1990 se->ckpt_type = se->type;
1991 }
1992
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)1993 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
1994 unsigned int segno)
1995 {
1996 struct sit_info *sit_i = SIT_I(sbi);
1997 return &sit_i->sentries[segno];
1998 }
1999
get_seg_vblocks(struct f2fs_sb_info * sbi,struct seg_entry * se)2000 unsigned short get_seg_vblocks(struct f2fs_sb_info *sbi, struct seg_entry *se)
2001 {
2002 if (!need_fsync_data_record(sbi))
2003 return se->valid_blocks;
2004 else
2005 return se->ckpt_valid_blocks;
2006 }
2007
get_seg_bitmap(struct f2fs_sb_info * sbi,struct seg_entry * se)2008 unsigned char *get_seg_bitmap(struct f2fs_sb_info *sbi, struct seg_entry *se)
2009 {
2010 if (!need_fsync_data_record(sbi))
2011 return se->cur_valid_map;
2012 else
2013 return se->ckpt_valid_map;
2014 }
2015
get_seg_type(struct f2fs_sb_info * sbi,struct seg_entry * se)2016 unsigned char get_seg_type(struct f2fs_sb_info *sbi, struct seg_entry *se)
2017 {
2018 if (!need_fsync_data_record(sbi))
2019 return se->type;
2020 else
2021 return se->ckpt_type;
2022 }
2023
get_sum_block(struct f2fs_sb_info * sbi,unsigned int segno,int * ret_type)2024 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
2025 unsigned int segno, int *ret_type)
2026 {
2027 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2028 struct f2fs_summary_block *sum_blk;
2029 struct curseg_info *curseg;
2030 int type, ret;
2031 u64 ssa_blk;
2032
2033 *ret_type= SEG_TYPE_MAX;
2034
2035 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
2036 for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
2037 if (segno == get_cp(cur_node_segno[type])) {
2038 curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
2039 if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2040 ASSERT_MSG("segno [0x%x] indicates a data "
2041 "segment, but should be node",
2042 segno);
2043 *ret_type = -SEG_TYPE_CUR_NODE;
2044 } else {
2045 *ret_type = SEG_TYPE_CUR_NODE;
2046 }
2047 return curseg->sum_blk;
2048 }
2049 }
2050
2051 for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
2052 if (segno == get_cp(cur_data_segno[type])) {
2053 curseg = CURSEG_I(sbi, type);
2054 if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2055 ASSERT_MSG("segno [0x%x] indicates a node "
2056 "segment, but should be data",
2057 segno);
2058 *ret_type = -SEG_TYPE_CUR_DATA;
2059 } else {
2060 *ret_type = SEG_TYPE_CUR_DATA;
2061 }
2062 return curseg->sum_blk;
2063 }
2064 }
2065
2066 sum_blk = calloc(BLOCK_SZ, 1);
2067 ASSERT(sum_blk);
2068
2069 ret = dev_read_block(sum_blk, ssa_blk);
2070 ASSERT(ret >= 0);
2071
2072 if (IS_SUM_NODE_SEG(sum_blk->footer))
2073 *ret_type = SEG_TYPE_NODE;
2074 else if (IS_SUM_DATA_SEG(sum_blk->footer))
2075 *ret_type = SEG_TYPE_DATA;
2076
2077 return sum_blk;
2078 }
2079
get_sum_entry(struct f2fs_sb_info * sbi,u32 blk_addr,struct f2fs_summary * sum_entry)2080 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
2081 struct f2fs_summary *sum_entry)
2082 {
2083 struct f2fs_summary_block *sum_blk;
2084 u32 segno, offset;
2085 int type;
2086
2087 segno = GET_SEGNO(sbi, blk_addr);
2088 offset = OFFSET_IN_SEG(sbi, blk_addr);
2089
2090 sum_blk = get_sum_block(sbi, segno, &type);
2091 memcpy(sum_entry, &(sum_blk->entries[offset]),
2092 sizeof(struct f2fs_summary));
2093 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2094 type == SEG_TYPE_MAX)
2095 free(sum_blk);
2096 return type;
2097 }
2098
get_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * raw_nat)2099 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
2100 struct f2fs_nat_entry *raw_nat)
2101 {
2102 struct f2fs_nat_block *nat_block;
2103 pgoff_t block_addr;
2104 int entry_off;
2105 int ret;
2106
2107 if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
2108 return;
2109
2110 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2111 ASSERT(nat_block);
2112
2113 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2114 block_addr = current_nat_addr(sbi, nid, NULL);
2115
2116 ret = dev_read_block(nat_block, block_addr);
2117 ASSERT(ret >= 0);
2118
2119 memcpy(raw_nat, &nat_block->entries[entry_off],
2120 sizeof(struct f2fs_nat_entry));
2121 free(nat_block);
2122 }
2123
update_data_blkaddr(struct f2fs_sb_info * sbi,nid_t nid,u16 ofs_in_node,block_t newaddr)2124 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
2125 u16 ofs_in_node, block_t newaddr)
2126 {
2127 struct f2fs_node *node_blk = NULL;
2128 struct node_info ni;
2129 block_t oldaddr, startaddr, endaddr;
2130 int ret;
2131
2132 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2133 ASSERT(node_blk);
2134
2135 get_node_info(sbi, nid, &ni);
2136
2137 /* read node_block */
2138 ret = dev_read_block(node_blk, ni.blk_addr);
2139 ASSERT(ret >= 0);
2140
2141 /* check its block address */
2142 if (node_blk->footer.nid == node_blk->footer.ino) {
2143 int ofs = get_extra_isize(node_blk);
2144
2145 oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
2146 node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
2147 ret = write_inode(node_blk, ni.blk_addr);
2148 ASSERT(ret >= 0);
2149 } else {
2150 oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
2151 node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
2152 ret = dev_write_block(node_blk, ni.blk_addr);
2153 ASSERT(ret >= 0);
2154 }
2155
2156 /* check extent cache entry */
2157 if (node_blk->footer.nid != node_blk->footer.ino) {
2158 get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
2159
2160 /* read inode block */
2161 ret = dev_read_block(node_blk, ni.blk_addr);
2162 ASSERT(ret >= 0);
2163 }
2164
2165 startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
2166 endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
2167 if (oldaddr >= startaddr && oldaddr < endaddr) {
2168 node_blk->i.i_ext.len = 0;
2169
2170 /* update inode block */
2171 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
2172 }
2173 free(node_blk);
2174 }
2175
update_nat_blkaddr(struct f2fs_sb_info * sbi,nid_t ino,nid_t nid,block_t newaddr)2176 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
2177 nid_t nid, block_t newaddr)
2178 {
2179 struct f2fs_nat_block *nat_block;
2180 pgoff_t block_addr;
2181 int entry_off;
2182 int ret;
2183
2184 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2185 ASSERT(nat_block);
2186
2187 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2188 block_addr = current_nat_addr(sbi, nid, NULL);
2189
2190 ret = dev_read_block(nat_block, block_addr);
2191 ASSERT(ret >= 0);
2192
2193 if (ino)
2194 nat_block->entries[entry_off].ino = cpu_to_le32(ino);
2195 nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
2196 if (c.func == FSCK)
2197 F2FS_FSCK(sbi)->entries[nid] = nat_block->entries[entry_off];
2198
2199 ret = dev_write_block(nat_block, block_addr);
2200 ASSERT(ret >= 0);
2201 free(nat_block);
2202 }
2203
get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni)2204 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2205 {
2206 struct f2fs_nat_entry raw_nat;
2207
2208 ni->nid = nid;
2209 if (c.func == FSCK && F2FS_FSCK(sbi)->nr_nat_entries) {
2210 node_info_from_raw_nat(ni, &(F2FS_FSCK(sbi)->entries[nid]));
2211 if (ni->blk_addr)
2212 return;
2213 /* nat entry is not cached, read it */
2214 }
2215
2216 get_nat_entry(sbi, nid, &raw_nat);
2217 node_info_from_raw_nat(ni, &raw_nat);
2218 }
2219
build_sit_entries(struct f2fs_sb_info * sbi)2220 static int build_sit_entries(struct f2fs_sb_info *sbi)
2221 {
2222 struct sit_info *sit_i = SIT_I(sbi);
2223 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2224 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2225 struct f2fs_sit_block *sit_blk;
2226 struct seg_entry *se;
2227 struct f2fs_sit_entry sit;
2228 int sit_blk_cnt = SIT_BLK_CNT(sbi);
2229 unsigned int i, segno, end;
2230 unsigned int readed, start_blk = 0;
2231
2232 sit_blk = calloc(BLOCK_SZ, 1);
2233 if (!sit_blk) {
2234 MSG(1, "\tError: Calloc failed for build_sit_entries!\n");
2235 return -ENOMEM;
2236 }
2237
2238 do {
2239 readed = f2fs_ra_meta_pages(sbi, start_blk, MAX_RA_BLOCKS,
2240 META_SIT);
2241
2242 segno = start_blk * sit_i->sents_per_block;
2243 end = (start_blk + readed) * sit_i->sents_per_block;
2244
2245 for (; segno < end && segno < TOTAL_SEGS(sbi); segno++) {
2246 se = &sit_i->sentries[segno];
2247
2248 get_current_sit_page(sbi, segno, sit_blk);
2249 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2250
2251 check_block_count(sbi, segno, &sit);
2252 seg_info_from_raw_sit(sbi, se, &sit);
2253 }
2254 start_blk += readed;
2255 } while (start_blk < sit_blk_cnt);
2256
2257
2258 free(sit_blk);
2259
2260 if (sits_in_cursum(journal) > SIT_JOURNAL_ENTRIES) {
2261 MSG(0, "\tError: build_sit_entries truncate n_sits(%u) to "
2262 "SIT_JOURNAL_ENTRIES(%lu)\n",
2263 sits_in_cursum(journal), SIT_JOURNAL_ENTRIES);
2264 journal->n_sits = cpu_to_le16(SIT_JOURNAL_ENTRIES);
2265 c.fix_on = 1;
2266 }
2267
2268 for (i = 0; i < sits_in_cursum(journal); i++) {
2269 segno = le32_to_cpu(segno_in_journal(journal, i));
2270
2271 if (segno >= TOTAL_SEGS(sbi)) {
2272 MSG(0, "\tError: build_sit_entries: segno(%u) is invalid!!!\n", segno);
2273 journal->n_sits = cpu_to_le16(i);
2274 c.fix_on = 1;
2275 continue;
2276 }
2277
2278 se = &sit_i->sentries[segno];
2279 sit = sit_in_journal(journal, i);
2280
2281 check_block_count(sbi, segno, &sit);
2282 seg_info_from_raw_sit(sbi, se, &sit);
2283 }
2284 return 0;
2285 }
2286
early_build_segment_manager(struct f2fs_sb_info * sbi)2287 static int early_build_segment_manager(struct f2fs_sb_info *sbi)
2288 {
2289 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2290 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2291 struct f2fs_sm_info *sm_info;
2292
2293 sm_info = malloc(sizeof(struct f2fs_sm_info));
2294 if (!sm_info) {
2295 MSG(1, "\tError: Malloc failed for build_segment_manager!\n");
2296 return -ENOMEM;
2297 }
2298
2299 /* init sm info */
2300 sbi->sm_info = sm_info;
2301 sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
2302 sm_info->main_blkaddr = get_sb(main_blkaddr);
2303 sm_info->segment_count = get_sb(segment_count);
2304 sm_info->reserved_segments = get_cp(rsvd_segment_count);
2305 sm_info->ovp_segments = get_cp(overprov_segment_count);
2306 sm_info->main_segments = get_sb(segment_count_main);
2307 sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
2308
2309 if (build_sit_info(sbi) || build_curseg(sbi)) {
2310 free(sm_info);
2311 return -ENOMEM;
2312 }
2313
2314 return 0;
2315 }
2316
late_build_segment_manager(struct f2fs_sb_info * sbi)2317 static int late_build_segment_manager(struct f2fs_sb_info *sbi)
2318 {
2319 if (sbi->seg_manager_done)
2320 return 1; /* this function was already called */
2321
2322 sbi->seg_manager_done = true;
2323 if (build_sit_entries(sbi)) {
2324 free (sbi->sm_info);
2325 return -ENOMEM;
2326 }
2327
2328 return 0;
2329 }
2330
build_sit_area_bitmap(struct f2fs_sb_info * sbi)2331 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
2332 {
2333 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2334 struct f2fs_sm_info *sm_i = SM_I(sbi);
2335 unsigned int segno = 0;
2336 char *ptr = NULL;
2337 u32 sum_vblocks = 0;
2338 u32 free_segs = 0;
2339 struct seg_entry *se;
2340
2341 fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
2342 fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
2343 ASSERT(fsck->sit_area_bitmap);
2344 ptr = fsck->sit_area_bitmap;
2345
2346 ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
2347
2348 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2349 se = get_seg_entry(sbi, segno);
2350
2351 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2352 ptr += SIT_VBLOCK_MAP_SIZE;
2353
2354 if (se->valid_blocks == 0x0) {
2355 if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
2356 le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
2357 le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
2358 le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
2359 le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
2360 le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
2361 continue;
2362 } else {
2363 free_segs++;
2364 }
2365 } else {
2366 sum_vblocks += se->valid_blocks;
2367 }
2368 }
2369 fsck->chk.sit_valid_blocks = sum_vblocks;
2370 fsck->chk.sit_free_segs = free_segs;
2371
2372 DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
2373 sum_vblocks, sum_vblocks,
2374 free_segs, free_segs);
2375 }
2376
rewrite_sit_area_bitmap(struct f2fs_sb_info * sbi)2377 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
2378 {
2379 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2380 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2381 struct sit_info *sit_i = SIT_I(sbi);
2382 struct f2fs_sit_block *sit_blk;
2383 unsigned int segno = 0;
2384 struct f2fs_summary_block *sum = curseg->sum_blk;
2385 char *ptr = NULL;
2386
2387 sit_blk = calloc(BLOCK_SZ, 1);
2388 ASSERT(sit_blk);
2389 /* remove sit journal */
2390 sum->journal.n_sits = 0;
2391
2392 ptr = fsck->main_area_bitmap;
2393
2394 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2395 struct f2fs_sit_entry *sit;
2396 struct seg_entry *se;
2397 u16 valid_blocks = 0;
2398 u16 type;
2399 int i;
2400
2401 get_current_sit_page(sbi, segno, sit_blk);
2402 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2403 memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2404
2405 /* update valid block count */
2406 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2407 valid_blocks += get_bits_in_byte(sit->valid_map[i]);
2408
2409 se = get_seg_entry(sbi, segno);
2410 memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2411 se->valid_blocks = valid_blocks;
2412 type = se->type;
2413 if (type >= NO_CHECK_TYPE) {
2414 ASSERT_MSG("Invalide type and valid blocks=%x,%x",
2415 segno, valid_blocks);
2416 type = 0;
2417 }
2418 sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
2419 valid_blocks);
2420 rewrite_current_sit_page(sbi, segno, sit_blk);
2421
2422 ptr += SIT_VBLOCK_MAP_SIZE;
2423 }
2424
2425 free(sit_blk);
2426 }
2427
flush_sit_journal_entries(struct f2fs_sb_info * sbi)2428 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
2429 {
2430 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2431 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2432 struct sit_info *sit_i = SIT_I(sbi);
2433 struct f2fs_sit_block *sit_blk;
2434 unsigned int segno;
2435 int i;
2436
2437 sit_blk = calloc(BLOCK_SZ, 1);
2438 ASSERT(sit_blk);
2439 for (i = 0; i < sits_in_cursum(journal); i++) {
2440 struct f2fs_sit_entry *sit;
2441 struct seg_entry *se;
2442
2443 segno = segno_in_journal(journal, i);
2444 se = get_seg_entry(sbi, segno);
2445
2446 get_current_sit_page(sbi, segno, sit_blk);
2447 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2448
2449 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2450 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2451 se->valid_blocks);
2452 sit->mtime = cpu_to_le64(se->mtime);
2453
2454 rewrite_current_sit_page(sbi, segno, sit_blk);
2455 }
2456
2457 free(sit_blk);
2458 journal->n_sits = 0;
2459 return i;
2460 }
2461
flush_nat_journal_entries(struct f2fs_sb_info * sbi)2462 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
2463 {
2464 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2465 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2466 struct f2fs_nat_block *nat_block;
2467 pgoff_t block_addr;
2468 int entry_off;
2469 nid_t nid;
2470 int ret;
2471 int i = 0;
2472
2473 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2474 ASSERT(nat_block);
2475 next:
2476 if (i >= nats_in_cursum(journal)) {
2477 free(nat_block);
2478 journal->n_nats = 0;
2479 return i;
2480 }
2481
2482 nid = le32_to_cpu(nid_in_journal(journal, i));
2483
2484 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2485 block_addr = current_nat_addr(sbi, nid, NULL);
2486
2487 ret = dev_read_block(nat_block, block_addr);
2488 ASSERT(ret >= 0);
2489
2490 memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
2491 sizeof(struct f2fs_nat_entry));
2492
2493 ret = dev_write_block(nat_block, block_addr);
2494 ASSERT(ret >= 0);
2495 i++;
2496 goto next;
2497 }
2498
flush_journal_entries(struct f2fs_sb_info * sbi)2499 void flush_journal_entries(struct f2fs_sb_info *sbi)
2500 {
2501 int n_nats = flush_nat_journal_entries(sbi);
2502 int n_sits = flush_sit_journal_entries(sbi);
2503
2504 if (n_nats || n_sits)
2505 write_checkpoints(sbi);
2506 }
2507
flush_sit_entries(struct f2fs_sb_info * sbi)2508 void flush_sit_entries(struct f2fs_sb_info *sbi)
2509 {
2510 struct sit_info *sit_i = SIT_I(sbi);
2511 struct f2fs_sit_block *sit_blk;
2512 unsigned int segno = 0;
2513
2514 sit_blk = calloc(BLOCK_SZ, 1);
2515 ASSERT(sit_blk);
2516 /* update free segments */
2517 for (segno = 0; segno < TOTAL_SEGS(sbi); segno++) {
2518 struct f2fs_sit_entry *sit;
2519 struct seg_entry *se;
2520
2521 se = get_seg_entry(sbi, segno);
2522
2523 if (!se->dirty)
2524 continue;
2525
2526 get_current_sit_page(sbi, segno, sit_blk);
2527 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2528 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2529 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2530 se->valid_blocks);
2531 rewrite_current_sit_page(sbi, segno, sit_blk);
2532 }
2533
2534 free(sit_blk);
2535 }
2536
relocate_curseg_offset(struct f2fs_sb_info * sbi,int type)2537 int relocate_curseg_offset(struct f2fs_sb_info *sbi, int type)
2538 {
2539 struct curseg_info *curseg = CURSEG_I(sbi, type);
2540 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
2541 unsigned int i;
2542
2543 if (c.zoned_model == F2FS_ZONED_HM)
2544 return -EINVAL;
2545
2546 for (i = 0; i < sbi->blocks_per_seg; i++) {
2547 if (!f2fs_test_bit(i, (const char *)se->cur_valid_map))
2548 break;
2549 }
2550
2551 if (i == sbi->blocks_per_seg)
2552 return -EINVAL;
2553
2554 DBG(1, "Update curseg[%d].next_blkoff %u -> %u, alloc_type %s -> SSR\n",
2555 type, curseg->next_blkoff, i,
2556 curseg->alloc_type == LFS ? "LFS" : "SSR");
2557
2558 curseg->next_blkoff = i;
2559 curseg->alloc_type = SSR;
2560
2561 return 0;
2562 }
2563
set_section_type(struct f2fs_sb_info * sbi,unsigned int segno,int type)2564 void set_section_type(struct f2fs_sb_info *sbi, unsigned int segno, int type)
2565 {
2566 unsigned int i;
2567
2568 if (sbi->segs_per_sec == 1)
2569 return;
2570
2571 for (i = 0; i < sbi->segs_per_sec; i++) {
2572 struct seg_entry *se = get_seg_entry(sbi, segno + i);
2573
2574 se->type = type;
2575 }
2576 }
2577
2578 #ifdef HAVE_LINUX_BLKZONED_H
2579
write_pointer_at_zone_start(struct f2fs_sb_info * sbi,unsigned int zone_segno)2580 static bool write_pointer_at_zone_start(struct f2fs_sb_info *sbi,
2581 unsigned int zone_segno)
2582 {
2583 u_int64_t sector;
2584 struct blk_zone blkz;
2585 block_t block = START_BLOCK(sbi, zone_segno);
2586 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2587 int ret, j;
2588
2589 if (c.zoned_model != F2FS_ZONED_HM)
2590 return true;
2591
2592 for (j = 0; j < MAX_DEVICES; j++) {
2593 if (!c.devices[j].path)
2594 break;
2595 if (c.devices[j].start_blkaddr <= block &&
2596 block <= c.devices[j].end_blkaddr)
2597 break;
2598 }
2599
2600 if (j >= MAX_DEVICES)
2601 return false;
2602
2603 sector = (block - c.devices[j].start_blkaddr) << log_sectors_per_block;
2604 ret = f2fs_report_zone(j, sector, &blkz);
2605 if (ret)
2606 return false;
2607
2608 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2609 return true;
2610
2611 return blk_zone_sector(&blkz) == blk_zone_wp_sector(&blkz);
2612 }
2613
2614 #else
2615
write_pointer_at_zone_start(struct f2fs_sb_info * UNUSED (sbi),unsigned int UNUSED (zone_segno))2616 static bool write_pointer_at_zone_start(struct f2fs_sb_info *UNUSED(sbi),
2617 unsigned int UNUSED(zone_segno))
2618 {
2619 return true;
2620 }
2621
2622 #endif
2623
find_next_free_block(struct f2fs_sb_info * sbi,u64 * to,int left,int want_type,bool new_sec)2624 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
2625 int want_type, bool new_sec)
2626 {
2627 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2628 struct seg_entry *se;
2629 u32 segno;
2630 u32 offset;
2631 int not_enough = 0;
2632 u64 end_blkaddr = (get_sb(segment_count_main) <<
2633 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
2634
2635 if (*to > 0)
2636 *to -= left;
2637 if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
2638 not_enough = 1;
2639
2640 while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
2641 unsigned short vblocks;
2642 unsigned char *bitmap;
2643 unsigned char type;
2644
2645 segno = GET_SEGNO(sbi, *to);
2646 offset = OFFSET_IN_SEG(sbi, *to);
2647
2648 se = get_seg_entry(sbi, segno);
2649
2650 vblocks = get_seg_vblocks(sbi, se);
2651 bitmap = get_seg_bitmap(sbi, se);
2652 type = get_seg_type(sbi, se);
2653
2654 if (vblocks == sbi->blocks_per_seg ||
2655 IS_CUR_SEGNO(sbi, segno)) {
2656 *to = left ? START_BLOCK(sbi, segno) - 1:
2657 START_BLOCK(sbi, segno + 1);
2658 continue;
2659 }
2660
2661 if (vblocks == 0 && not_enough) {
2662 *to = left ? START_BLOCK(sbi, segno) - 1:
2663 START_BLOCK(sbi, segno + 1);
2664 continue;
2665 }
2666
2667 if (vblocks == 0 && !(segno % sbi->segs_per_sec)) {
2668 struct seg_entry *se2;
2669 unsigned int i;
2670
2671 for (i = 1; i < sbi->segs_per_sec; i++) {
2672 se2 = get_seg_entry(sbi, segno + i);
2673 if (get_seg_vblocks(sbi, se2))
2674 break;
2675 }
2676
2677 if (i == sbi->segs_per_sec &&
2678 write_pointer_at_zone_start(sbi, segno)) {
2679 set_section_type(sbi, segno, want_type);
2680 return 0;
2681 }
2682 }
2683
2684 if (type == want_type && !new_sec &&
2685 !f2fs_test_bit(offset, (const char *)bitmap))
2686 return 0;
2687
2688 *to = left ? *to - 1: *to + 1;
2689 }
2690 return -1;
2691 }
2692
move_one_curseg_info(struct f2fs_sb_info * sbi,u64 from,int left,int i)2693 static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left,
2694 int i)
2695 {
2696 struct curseg_info *curseg = CURSEG_I(sbi, i);
2697 struct f2fs_summary_block buf;
2698 u32 old_segno;
2699 u64 ssa_blk, to;
2700 int ret;
2701
2702 /* update original SSA too */
2703 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2704 ret = dev_write_block(curseg->sum_blk, ssa_blk);
2705 ASSERT(ret >= 0);
2706
2707 to = from;
2708 ret = find_next_free_block(sbi, &to, left, i,
2709 c.zoned_model == F2FS_ZONED_HM);
2710 ASSERT(ret == 0);
2711
2712 old_segno = curseg->segno;
2713 curseg->segno = GET_SEGNO(sbi, to);
2714 curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
2715 curseg->alloc_type = c.zoned_model == F2FS_ZONED_HM ? LFS : SSR;
2716
2717 /* update new segno */
2718 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2719 ret = dev_read_block(&buf, ssa_blk);
2720 ASSERT(ret >= 0);
2721
2722 memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
2723
2724 /* update se->types */
2725 reset_curseg(sbi, i);
2726
2727 FIX_MSG("Move curseg[%d] %x -> %x after %"PRIx64"\n",
2728 i, old_segno, curseg->segno, from);
2729 }
2730
move_curseg_info(struct f2fs_sb_info * sbi,u64 from,int left)2731 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left)
2732 {
2733 int i;
2734
2735 /* update summary blocks having nullified journal entries */
2736 for (i = 0; i < NO_CHECK_TYPE; i++)
2737 move_one_curseg_info(sbi, from, left, i);
2738 }
2739
update_curseg_info(struct f2fs_sb_info * sbi,int type)2740 void update_curseg_info(struct f2fs_sb_info *sbi, int type)
2741 {
2742 if (!relocate_curseg_offset(sbi, type))
2743 return;
2744 move_one_curseg_info(sbi, SM_I(sbi)->main_blkaddr, 0, type);
2745 }
2746
zero_journal_entries(struct f2fs_sb_info * sbi)2747 void zero_journal_entries(struct f2fs_sb_info *sbi)
2748 {
2749 int i;
2750
2751 for (i = 0; i < NO_CHECK_TYPE; i++)
2752 CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
2753 }
2754
write_curseg_info(struct f2fs_sb_info * sbi)2755 void write_curseg_info(struct f2fs_sb_info *sbi)
2756 {
2757 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2758 int i;
2759
2760 for (i = 0; i < NO_CHECK_TYPE; i++) {
2761 cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
2762 if (i < CURSEG_HOT_NODE) {
2763 set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
2764 set_cp(cur_data_blkoff[i],
2765 CURSEG_I(sbi, i)->next_blkoff);
2766 } else {
2767 int n = i - CURSEG_HOT_NODE;
2768
2769 set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
2770 set_cp(cur_node_blkoff[n],
2771 CURSEG_I(sbi, i)->next_blkoff);
2772 }
2773 }
2774 }
2775
lookup_nat_in_journal(struct f2fs_sb_info * sbi,u32 nid,struct f2fs_nat_entry * raw_nat)2776 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
2777 struct f2fs_nat_entry *raw_nat)
2778 {
2779 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2780 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2781 int i = 0;
2782
2783 for (i = 0; i < nats_in_cursum(journal); i++) {
2784 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2785 memcpy(raw_nat, &nat_in_journal(journal, i),
2786 sizeof(struct f2fs_nat_entry));
2787 DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
2788 return i;
2789 }
2790 }
2791 return -1;
2792 }
2793
nullify_nat_entry(struct f2fs_sb_info * sbi,u32 nid)2794 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
2795 {
2796 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2797 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2798 struct f2fs_nat_block *nat_block;
2799 pgoff_t block_addr;
2800 int entry_off;
2801 int ret;
2802 int i = 0;
2803
2804 /* check in journal */
2805 for (i = 0; i < nats_in_cursum(journal); i++) {
2806 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2807 memset(&nat_in_journal(journal, i), 0,
2808 sizeof(struct f2fs_nat_entry));
2809 FIX_MSG("Remove nid [0x%x] in nat journal", nid);
2810 return;
2811 }
2812 }
2813 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2814 ASSERT(nat_block);
2815
2816 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2817 block_addr = current_nat_addr(sbi, nid, NULL);
2818
2819 ret = dev_read_block(nat_block, block_addr);
2820 ASSERT(ret >= 0);
2821
2822 if (nid == F2FS_NODE_INO(sbi) || nid == F2FS_META_INO(sbi)) {
2823 FIX_MSG("nid [0x%x] block_addr= 0x%x -> 0x1", nid,
2824 le32_to_cpu(nat_block->entries[entry_off].block_addr));
2825 nat_block->entries[entry_off].block_addr = cpu_to_le32(0x1);
2826 } else {
2827 memset(&nat_block->entries[entry_off], 0,
2828 sizeof(struct f2fs_nat_entry));
2829 FIX_MSG("Remove nid [0x%x] in NAT", nid);
2830 }
2831
2832 ret = dev_write_block(nat_block, block_addr);
2833 ASSERT(ret >= 0);
2834 free(nat_block);
2835 }
2836
duplicate_checkpoint(struct f2fs_sb_info * sbi)2837 void duplicate_checkpoint(struct f2fs_sb_info *sbi)
2838 {
2839 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2840 unsigned long long dst, src;
2841 void *buf;
2842 unsigned int seg_size = 1 << get_sb(log_blocks_per_seg);
2843 int ret;
2844
2845 if (sbi->cp_backuped)
2846 return;
2847
2848 buf = malloc(F2FS_BLKSIZE * seg_size);
2849 ASSERT(buf);
2850
2851 if (sbi->cur_cp == 1) {
2852 src = get_sb(cp_blkaddr);
2853 dst = src + seg_size;
2854 } else {
2855 dst = get_sb(cp_blkaddr);
2856 src = dst + seg_size;
2857 }
2858
2859 ret = dev_read(buf, src << F2FS_BLKSIZE_BITS,
2860 seg_size << F2FS_BLKSIZE_BITS);
2861 ASSERT(ret >= 0);
2862
2863 ret = dev_write(buf, dst << F2FS_BLKSIZE_BITS,
2864 seg_size << F2FS_BLKSIZE_BITS);
2865 ASSERT(ret >= 0);
2866
2867 free(buf);
2868
2869 ret = f2fs_fsync_device();
2870 ASSERT(ret >= 0);
2871
2872 sbi->cp_backuped = 1;
2873
2874 MSG(0, "Info: Duplicate valid checkpoint to mirror position "
2875 "%llu -> %llu\n", src, dst);
2876 }
2877
write_checkpoint(struct f2fs_sb_info * sbi)2878 void write_checkpoint(struct f2fs_sb_info *sbi)
2879 {
2880 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2881 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2882 block_t orphan_blks = 0;
2883 unsigned long long cp_blk_no;
2884 u32 flags = CP_UMOUNT_FLAG;
2885 int i, ret;
2886 u_int32_t crc = 0;
2887
2888 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
2889 orphan_blks = __start_sum_addr(sbi) - 1;
2890 flags |= CP_ORPHAN_PRESENT_FLAG;
2891 }
2892 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
2893 flags |= CP_TRIMMED_FLAG;
2894 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
2895 flags |= CP_DISABLED_FLAG;
2896 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
2897 flags |= CP_LARGE_NAT_BITMAP_FLAG;
2898 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
2899 } else {
2900 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
2901 }
2902
2903 set_cp(free_segment_count, get_free_segments(sbi));
2904 if (c.func == FSCK) {
2905 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2906
2907 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
2908 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
2909 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
2910 } else {
2911 set_cp(valid_block_count, sbi->total_valid_block_count);
2912 set_cp(valid_node_count, sbi->total_valid_node_count);
2913 set_cp(valid_inode_count, sbi->total_valid_inode_count);
2914 }
2915 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
2916
2917 flags = update_nat_bits_flags(sb, cp, flags);
2918 set_cp(ckpt_flags, flags);
2919
2920 crc = f2fs_checkpoint_chksum(cp);
2921 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
2922 cpu_to_le32(crc);
2923
2924 cp_blk_no = get_sb(cp_blkaddr);
2925 if (sbi->cur_cp == 2)
2926 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
2927
2928 /* write the first cp */
2929 ret = dev_write_block(cp, cp_blk_no++);
2930 ASSERT(ret >= 0);
2931
2932 /* skip payload */
2933 cp_blk_no += get_sb(cp_payload);
2934 /* skip orphan blocks */
2935 cp_blk_no += orphan_blks;
2936
2937 /* update summary blocks having nullified journal entries */
2938 for (i = 0; i < NO_CHECK_TYPE; i++) {
2939 struct curseg_info *curseg = CURSEG_I(sbi, i);
2940 u64 ssa_blk;
2941
2942 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
2943 ASSERT(ret >= 0);
2944
2945 /* update original SSA too */
2946 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2947 ret = dev_write_block(curseg->sum_blk, ssa_blk);
2948 ASSERT(ret >= 0);
2949 }
2950
2951 /* Write nat bits */
2952 if (flags & CP_NAT_BITS_FLAG)
2953 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2954
2955 /* in case of sudden power off */
2956 ret = f2fs_fsync_device();
2957 ASSERT(ret >= 0);
2958
2959 /* write the last cp */
2960 ret = dev_write_block(cp, cp_blk_no++);
2961 ASSERT(ret >= 0);
2962
2963 ret = f2fs_fsync_device();
2964 ASSERT(ret >= 0);
2965 }
2966
write_checkpoints(struct f2fs_sb_info * sbi)2967 void write_checkpoints(struct f2fs_sb_info *sbi)
2968 {
2969 /* copy valid checkpoint to its mirror position */
2970 duplicate_checkpoint(sbi);
2971
2972 /* repair checkpoint at CP #0 position */
2973 sbi->cur_cp = 1;
2974 write_checkpoint(sbi);
2975 }
2976
build_nat_area_bitmap(struct f2fs_sb_info * sbi)2977 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
2978 {
2979 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2980 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2981 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2982 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2983 struct f2fs_nm_info *nm_i = NM_I(sbi);
2984 struct f2fs_nat_block *nat_block;
2985 struct node_info ni;
2986 u32 nid, nr_nat_blks;
2987 pgoff_t block_off;
2988 pgoff_t block_addr;
2989 int seg_off;
2990 int ret;
2991 unsigned int i;
2992
2993 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2994 ASSERT(nat_block);
2995
2996 /* Alloc & build nat entry bitmap */
2997 nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
2998 sbi->log_blocks_per_seg;
2999
3000 fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
3001 fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
3002 fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
3003 ASSERT(fsck->nat_area_bitmap);
3004
3005 fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
3006 fsck->nr_nat_entries);
3007 ASSERT(fsck->entries);
3008
3009 for (block_off = 0; block_off < nr_nat_blks; block_off++) {
3010
3011 seg_off = block_off >> sbi->log_blocks_per_seg;
3012 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
3013 (seg_off << sbi->log_blocks_per_seg << 1) +
3014 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
3015
3016 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
3017 block_addr += sbi->blocks_per_seg;
3018
3019 ret = dev_read_block(nat_block, block_addr);
3020 ASSERT(ret >= 0);
3021
3022 nid = block_off * NAT_ENTRY_PER_BLOCK;
3023 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
3024 ni.nid = nid + i;
3025
3026 if ((nid + i) == F2FS_NODE_INO(sbi) ||
3027 (nid + i) == F2FS_META_INO(sbi)) {
3028 /*
3029 * block_addr of node/meta inode should be 0x1.
3030 * Set this bit, and fsck_verify will fix it.
3031 */
3032 if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
3033 ASSERT_MSG("\tError: ino[0x%x] block_addr[0x%x] is invalid\n",
3034 nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
3035 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3036 }
3037 continue;
3038 }
3039
3040 node_info_from_raw_nat(&ni, &nat_block->entries[i]);
3041 if (ni.blk_addr == 0x0)
3042 continue;
3043 if (ni.ino == 0x0) {
3044 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3045 " is invalid\n", ni.ino, ni.blk_addr);
3046 }
3047 if (ni.ino == (nid + i)) {
3048 fsck->nat_valid_inode_cnt++;
3049 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3050 }
3051 if (nid + i == 0) {
3052 /*
3053 * nat entry [0] must be null. If
3054 * it is corrupted, set its bit in
3055 * nat_area_bitmap, fsck_verify will
3056 * nullify it
3057 */
3058 ASSERT_MSG("Invalid nat entry[0]: "
3059 "blk_addr[0x%x]\n", ni.blk_addr);
3060 fsck->chk.valid_nat_entry_cnt--;
3061 }
3062
3063 DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
3064 nid + i, ni.blk_addr, ni.ino);
3065 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3066 fsck->chk.valid_nat_entry_cnt++;
3067
3068 fsck->entries[nid + i] = nat_block->entries[i];
3069 }
3070 }
3071
3072 /* Traverse nat journal, update the corresponding entries */
3073 for (i = 0; i < nats_in_cursum(journal); i++) {
3074 struct f2fs_nat_entry raw_nat;
3075 nid = le32_to_cpu(nid_in_journal(journal, i));
3076 ni.nid = nid;
3077
3078 DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
3079
3080 /* Clear the original bit and count */
3081 if (fsck->entries[nid].block_addr != 0x0) {
3082 fsck->chk.valid_nat_entry_cnt--;
3083 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
3084 if (fsck->entries[nid].ino == nid)
3085 fsck->nat_valid_inode_cnt--;
3086 }
3087
3088 /* Use nat entries in journal */
3089 memcpy(&raw_nat, &nat_in_journal(journal, i),
3090 sizeof(struct f2fs_nat_entry));
3091 node_info_from_raw_nat(&ni, &raw_nat);
3092 if (ni.blk_addr != 0x0) {
3093 if (ni.ino == 0x0)
3094 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3095 " is invalid\n", ni.ino, ni.blk_addr);
3096 if (ni.ino == nid) {
3097 fsck->nat_valid_inode_cnt++;
3098 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3099 }
3100 f2fs_set_bit(nid, fsck->nat_area_bitmap);
3101 fsck->chk.valid_nat_entry_cnt++;
3102 DBG(3, "nid[0x%x] in nat cache\n", nid);
3103 }
3104 fsck->entries[nid] = raw_nat;
3105 }
3106 free(nat_block);
3107
3108 DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
3109 fsck->chk.valid_nat_entry_cnt,
3110 fsck->chk.valid_nat_entry_cnt);
3111 }
3112
check_sector_size(struct f2fs_super_block * sb)3113 static int check_sector_size(struct f2fs_super_block *sb)
3114 {
3115 u_int32_t log_sectorsize, log_sectors_per_block;
3116
3117 log_sectorsize = log_base_2(c.sector_size);
3118 log_sectors_per_block = log_base_2(c.sectors_per_blk);
3119
3120 if (log_sectorsize == get_sb(log_sectorsize) &&
3121 log_sectors_per_block == get_sb(log_sectors_per_block))
3122 return 0;
3123
3124 set_sb(log_sectorsize, log_sectorsize);
3125 set_sb(log_sectors_per_block, log_sectors_per_block);
3126
3127 update_superblock(sb, SB_MASK_ALL);
3128 return 0;
3129 }
3130
tune_sb_features(struct f2fs_sb_info * sbi)3131 static int tune_sb_features(struct f2fs_sb_info *sbi)
3132 {
3133 int sb_changed = 0;
3134 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3135
3136 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) &&
3137 c.feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
3138 sb->feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
3139 MSG(0, "Info: Set Encryption feature\n");
3140 sb_changed = 1;
3141 }
3142 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) &&
3143 c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
3144 if (!c.s_encoding) {
3145 ERR_MSG("ERROR: Must specify encoding to enable casefolding.\n");
3146 return -1;
3147 }
3148 sb->feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
3149 MSG(0, "Info: Set Casefold feature\n");
3150 sb_changed = 1;
3151 }
3152 /* TODO: quota needs to allocate inode numbers */
3153
3154 c.feature = sb->feature;
3155 if (!sb_changed)
3156 return 0;
3157
3158 update_superblock(sb, SB_MASK_ALL);
3159 return 0;
3160 }
3161
get_fsync_inode(struct list_head * head,nid_t ino)3162 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
3163 nid_t ino)
3164 {
3165 struct fsync_inode_entry *entry;
3166
3167 list_for_each_entry(entry, head, list)
3168 if (entry->ino == ino)
3169 return entry;
3170
3171 return NULL;
3172 }
3173
add_fsync_inode(struct list_head * head,nid_t ino)3174 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
3175 nid_t ino)
3176 {
3177 struct fsync_inode_entry *entry;
3178
3179 entry = calloc(sizeof(struct fsync_inode_entry), 1);
3180 if (!entry)
3181 return NULL;
3182 entry->ino = ino;
3183 list_add_tail(&entry->list, head);
3184 return entry;
3185 }
3186
del_fsync_inode(struct fsync_inode_entry * entry)3187 static void del_fsync_inode(struct fsync_inode_entry *entry)
3188 {
3189 list_del(&entry->list);
3190 free(entry);
3191 }
3192
destroy_fsync_dnodes(struct list_head * head)3193 static void destroy_fsync_dnodes(struct list_head *head)
3194 {
3195 struct fsync_inode_entry *entry, *tmp;
3196
3197 list_for_each_entry_safe(entry, tmp, head, list)
3198 del_fsync_inode(entry);
3199 }
3200
find_fsync_inode(struct f2fs_sb_info * sbi,struct list_head * head)3201 static int find_fsync_inode(struct f2fs_sb_info *sbi, struct list_head *head)
3202 {
3203 struct curseg_info *curseg;
3204 struct f2fs_node *node_blk;
3205 block_t blkaddr;
3206 unsigned int loop_cnt = 0;
3207 unsigned int free_blocks = TOTAL_SEGS(sbi) * sbi->blocks_per_seg -
3208 sbi->total_valid_block_count;
3209 int err = 0;
3210
3211 /* get node pages in the current segment */
3212 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3213 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3214
3215 node_blk = calloc(F2FS_BLKSIZE, 1);
3216 ASSERT(node_blk);
3217
3218 while (1) {
3219 struct fsync_inode_entry *entry;
3220
3221 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3222 break;
3223
3224 err = dev_read_block(node_blk, blkaddr);
3225 if (err)
3226 break;
3227
3228 if (!is_recoverable_dnode(sbi, node_blk))
3229 break;
3230
3231 if (!is_fsync_dnode(node_blk))
3232 goto next;
3233
3234 entry = get_fsync_inode(head, ino_of_node(node_blk));
3235 if (!entry) {
3236 entry = add_fsync_inode(head, ino_of_node(node_blk));
3237 if (!entry) {
3238 err = -1;
3239 break;
3240 }
3241 }
3242 entry->blkaddr = blkaddr;
3243
3244 if (IS_INODE(node_blk) && is_dent_dnode(node_blk))
3245 entry->last_dentry = blkaddr;
3246 next:
3247 /* sanity check in order to detect looped node chain */
3248 if (++loop_cnt >= free_blocks ||
3249 blkaddr == next_blkaddr_of_node(node_blk)) {
3250 MSG(0, "\tdetect looped node chain, blkaddr:%u, next:%u\n",
3251 blkaddr,
3252 next_blkaddr_of_node(node_blk));
3253 err = -1;
3254 break;
3255 }
3256
3257 blkaddr = next_blkaddr_of_node(node_blk);
3258 }
3259
3260 free(node_blk);
3261 return err;
3262 }
3263
do_record_fsync_data(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk,block_t blkaddr)3264 static int do_record_fsync_data(struct f2fs_sb_info *sbi,
3265 struct f2fs_node *node_blk,
3266 block_t blkaddr)
3267 {
3268 unsigned int segno, offset;
3269 struct seg_entry *se;
3270 unsigned int ofs_in_node = 0;
3271 unsigned int start, end;
3272 int err = 0, recorded = 0;
3273
3274 segno = GET_SEGNO(sbi, blkaddr);
3275 se = get_seg_entry(sbi, segno);
3276 offset = OFFSET_IN_SEG(sbi, blkaddr);
3277
3278 if (f2fs_test_bit(offset, (char *)se->cur_valid_map)) {
3279 ASSERT(0);
3280 return -1;
3281 }
3282 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map)) {
3283 ASSERT(0);
3284 return -1;
3285 }
3286
3287 if (!se->ckpt_valid_blocks)
3288 se->ckpt_type = CURSEG_WARM_NODE;
3289
3290 se->ckpt_valid_blocks++;
3291 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3292
3293 MSG(1, "do_record_fsync_data: [node] ino = %u, nid = %u, blkaddr = %u\n",
3294 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3295
3296 /* inline data */
3297 if (IS_INODE(node_blk) && (node_blk->i.i_inline & F2FS_INLINE_DATA))
3298 return 0;
3299 /* xattr node */
3300 if (ofs_of_node(node_blk) == XATTR_NODE_OFFSET)
3301 return 0;
3302
3303 /* step 3: recover data indices */
3304 start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
3305 end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
3306
3307 for (; start < end; start++, ofs_in_node++) {
3308 blkaddr = datablock_addr(node_blk, ofs_in_node);
3309
3310 if (!is_valid_data_blkaddr(blkaddr))
3311 continue;
3312
3313 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) {
3314 err = -1;
3315 goto out;
3316 }
3317
3318 segno = GET_SEGNO(sbi, blkaddr);
3319 se = get_seg_entry(sbi, segno);
3320 offset = OFFSET_IN_SEG(sbi, blkaddr);
3321
3322 if (f2fs_test_bit(offset, (char *)se->cur_valid_map))
3323 continue;
3324 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map))
3325 continue;
3326
3327 if (!se->ckpt_valid_blocks)
3328 se->ckpt_type = CURSEG_WARM_DATA;
3329
3330 se->ckpt_valid_blocks++;
3331 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3332
3333 MSG(1, "do_record_fsync_data: [data] ino = %u, nid = %u, blkaddr = %u\n",
3334 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3335
3336 recorded++;
3337 }
3338 out:
3339 MSG(1, "recover_data: ino = %u, nid = %u, recorded = %d, err = %d\n",
3340 ino_of_node(node_blk), ofs_of_node(node_blk),
3341 recorded, err);
3342 return err;
3343 }
3344
traverse_dnodes(struct f2fs_sb_info * sbi,struct list_head * inode_list)3345 static int traverse_dnodes(struct f2fs_sb_info *sbi,
3346 struct list_head *inode_list)
3347 {
3348 struct curseg_info *curseg;
3349 struct f2fs_node *node_blk;
3350 block_t blkaddr;
3351 int err = 0;
3352
3353 /* get node pages in the current segment */
3354 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3355 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3356
3357 node_blk = calloc(F2FS_BLKSIZE, 1);
3358 ASSERT(node_blk);
3359
3360 while (1) {
3361 struct fsync_inode_entry *entry;
3362
3363 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3364 break;
3365
3366 err = dev_read_block(node_blk, blkaddr);
3367 if (err)
3368 break;
3369
3370 if (!is_recoverable_dnode(sbi, node_blk))
3371 break;
3372
3373 entry = get_fsync_inode(inode_list,
3374 ino_of_node(node_blk));
3375 if (!entry)
3376 goto next;
3377
3378 err = do_record_fsync_data(sbi, node_blk, blkaddr);
3379 if (err)
3380 break;
3381
3382 if (entry->blkaddr == blkaddr)
3383 del_fsync_inode(entry);
3384 next:
3385 blkaddr = next_blkaddr_of_node(node_blk);
3386 }
3387
3388 free(node_blk);
3389 return err;
3390 }
3391
record_fsync_data(struct f2fs_sb_info * sbi)3392 static int record_fsync_data(struct f2fs_sb_info *sbi)
3393 {
3394 struct list_head inode_list = LIST_HEAD_INIT(inode_list);
3395 int ret;
3396
3397 if (!need_fsync_data_record(sbi))
3398 return 0;
3399
3400 ret = find_fsync_inode(sbi, &inode_list);
3401 if (ret)
3402 goto out;
3403
3404 ret = late_build_segment_manager(sbi);
3405 if (ret < 0) {
3406 ERR_MSG("late_build_segment_manager failed\n");
3407 goto out;
3408 }
3409
3410 ret = traverse_dnodes(sbi, &inode_list);
3411 out:
3412 destroy_fsync_dnodes(&inode_list);
3413 return ret;
3414 }
3415
f2fs_do_mount(struct f2fs_sb_info * sbi)3416 int f2fs_do_mount(struct f2fs_sb_info *sbi)
3417 {
3418 struct f2fs_checkpoint *cp = NULL;
3419 struct f2fs_super_block *sb = NULL;
3420 int ret;
3421
3422 sbi->active_logs = NR_CURSEG_TYPE;
3423 ret = validate_super_block(sbi, SB0_ADDR);
3424 if (ret) {
3425 ret = validate_super_block(sbi, SB1_ADDR);
3426 if (ret)
3427 return -1;
3428 }
3429 sb = F2FS_RAW_SUPER(sbi);
3430
3431 ret = check_sector_size(sb);
3432 if (ret)
3433 return -1;
3434
3435 print_raw_sb_info(sb);
3436
3437 init_sb_info(sbi);
3438
3439 ret = get_valid_checkpoint(sbi);
3440 if (ret) {
3441 ERR_MSG("Can't find valid checkpoint\n");
3442 return -1;
3443 }
3444
3445 c.bug_on = 0;
3446
3447 if (sanity_check_ckpt(sbi)) {
3448 ERR_MSG("Checkpoint is polluted\n");
3449 return -1;
3450 }
3451 cp = F2FS_CKPT(sbi);
3452
3453 if (c.func != FSCK && c.func != DUMP &&
3454 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
3455 ERR_MSG("Mount unclean image to replay log first\n");
3456 return -1;
3457 }
3458
3459 print_ckpt_info(sbi);
3460
3461 if (c.quota_fix) {
3462 if (get_cp(ckpt_flags) & CP_QUOTA_NEED_FSCK_FLAG)
3463 c.fix_on = 1;
3464 }
3465
3466 if (tune_sb_features(sbi))
3467 return -1;
3468
3469 /* precompute checksum seed for metadata */
3470 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
3471 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
3472
3473 sbi->total_valid_node_count = get_cp(valid_node_count);
3474 sbi->total_valid_inode_count = get_cp(valid_inode_count);
3475 sbi->user_block_count = get_cp(user_block_count);
3476 sbi->total_valid_block_count = get_cp(valid_block_count);
3477 sbi->last_valid_block_count = sbi->total_valid_block_count;
3478 sbi->alloc_valid_block_count = 0;
3479
3480 if (early_build_segment_manager(sbi)) {
3481 ERR_MSG("early_build_segment_manager failed\n");
3482 return -1;
3483 }
3484
3485 if (build_node_manager(sbi)) {
3486 ERR_MSG("build_node_manager failed\n");
3487 return -1;
3488 }
3489
3490 if (record_fsync_data(sbi)) {
3491 ERR_MSG("record_fsync_data failed\n");
3492 return -1;
3493 }
3494
3495 if (!f2fs_should_proceed(sb, get_cp(ckpt_flags)))
3496 return 1;
3497
3498 if (late_build_segment_manager(sbi) < 0) {
3499 ERR_MSG("late_build_segment_manager failed\n");
3500 return -1;
3501 }
3502
3503 if (f2fs_late_init_nid_bitmap(sbi)) {
3504 ERR_MSG("f2fs_late_init_nid_bitmap failed\n");
3505 return -1;
3506 }
3507
3508 /* Check nat_bits */
3509 if (c.func == FSCK && is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
3510 if (check_nat_bits(sbi, sb, cp) && c.fix_on)
3511 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3512 }
3513 return 0;
3514 }
3515
f2fs_do_umount(struct f2fs_sb_info * sbi)3516 void f2fs_do_umount(struct f2fs_sb_info *sbi)
3517 {
3518 struct sit_info *sit_i = SIT_I(sbi);
3519 struct f2fs_sm_info *sm_i = SM_I(sbi);
3520 struct f2fs_nm_info *nm_i = NM_I(sbi);
3521 unsigned int i;
3522
3523 /* free nm_info */
3524 if (c.func == SLOAD || c.func == FSCK)
3525 free(nm_i->nid_bitmap);
3526 free(nm_i->nat_bitmap);
3527 free(sbi->nm_info);
3528
3529 /* free sit_info */
3530 free(sit_i->bitmap);
3531 free(sit_i->sit_bitmap);
3532 free(sit_i->sentries);
3533 free(sm_i->sit_info);
3534
3535 /* free sm_info */
3536 for (i = 0; i < NR_CURSEG_TYPE; i++)
3537 free(sm_i->curseg_array[i].sum_blk);
3538
3539 free(sm_i->curseg_array);
3540 free(sbi->sm_info);
3541
3542 free(sbi->ckpt);
3543 free(sbi->raw_super);
3544 }
3545
3546 #ifdef WITH_ANDROID
f2fs_sparse_initialize_meta(struct f2fs_sb_info * sbi)3547 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi)
3548 {
3549 struct f2fs_super_block *sb = sbi->raw_super;
3550 u_int32_t sit_seg_count, sit_size;
3551 u_int32_t nat_seg_count, nat_size;
3552 u_int64_t sit_seg_addr, nat_seg_addr, payload_addr;
3553 u_int32_t seg_size = 1 << get_sb(log_blocks_per_seg);
3554 int ret;
3555
3556 if (!c.sparse_mode)
3557 return 0;
3558
3559 sit_seg_addr = get_sb(sit_blkaddr);
3560 sit_seg_count = get_sb(segment_count_sit);
3561 sit_size = sit_seg_count * seg_size;
3562
3563 DBG(1, "\tSparse: filling sit area at block offset: 0x%08"PRIx64" len: %u\n",
3564 sit_seg_addr, sit_size);
3565 ret = dev_fill(NULL, sit_seg_addr * F2FS_BLKSIZE,
3566 sit_size * F2FS_BLKSIZE);
3567 if (ret) {
3568 MSG(1, "\tError: While zeroing out the sit area "
3569 "on disk!!!\n");
3570 return -1;
3571 }
3572
3573 nat_seg_addr = get_sb(nat_blkaddr);
3574 nat_seg_count = get_sb(segment_count_nat);
3575 nat_size = nat_seg_count * seg_size;
3576
3577 DBG(1, "\tSparse: filling nat area at block offset 0x%08"PRIx64" len: %u\n",
3578 nat_seg_addr, nat_size);
3579 ret = dev_fill(NULL, nat_seg_addr * F2FS_BLKSIZE,
3580 nat_size * F2FS_BLKSIZE);
3581 if (ret) {
3582 MSG(1, "\tError: While zeroing out the nat area "
3583 "on disk!!!\n");
3584 return -1;
3585 }
3586
3587 payload_addr = get_sb(segment0_blkaddr) + 1;
3588
3589 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3590 payload_addr, get_sb(cp_payload));
3591 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3592 get_sb(cp_payload) * F2FS_BLKSIZE);
3593 if (ret) {
3594 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3595 "on disk!!!\n");
3596 return -1;
3597 }
3598
3599 payload_addr += seg_size;
3600
3601 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3602 payload_addr, get_sb(cp_payload));
3603 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3604 get_sb(cp_payload) * F2FS_BLKSIZE);
3605 if (ret) {
3606 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3607 "on disk!!!\n");
3608 return -1;
3609 }
3610 return 0;
3611 }
3612 #else
f2fs_sparse_initialize_meta(struct f2fs_sb_info * sbi)3613 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi) { return 0; }
3614 #endif
3615