1 /**
2 * mount.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include "fsck.h"
12 #include "node.h"
13 #include "xattr.h"
14 #include <locale.h>
15 #include <stdbool.h>
16 #include <time.h>
17 #ifdef HAVE_LINUX_POSIX_ACL_H
18 #include <linux/posix_acl.h>
19 #endif
20 #ifdef HAVE_SYS_ACL_H
21 #include <sys/acl.h>
22 #endif
23
24 #ifndef ACL_UNDEFINED_TAG
25 #define ACL_UNDEFINED_TAG (0x00)
26 #define ACL_USER_OBJ (0x01)
27 #define ACL_USER (0x02)
28 #define ACL_GROUP_OBJ (0x04)
29 #define ACL_GROUP (0x08)
30 #define ACL_MASK (0x10)
31 #define ACL_OTHER (0x20)
32 #endif
33
34 #ifdef HAVE_LINUX_BLKZONED_H
35
get_device_idx(struct f2fs_sb_info * sbi,uint32_t segno)36 static int get_device_idx(struct f2fs_sb_info *sbi, uint32_t segno)
37 {
38 block_t seg_start_blkaddr;
39 int i;
40
41 seg_start_blkaddr = SM_I(sbi)->main_blkaddr +
42 segno * DEFAULT_BLOCKS_PER_SEGMENT;
43 for (i = 0; i < c.ndevs; i++)
44 if (c.devices[i].start_blkaddr <= seg_start_blkaddr &&
45 c.devices[i].end_blkaddr > seg_start_blkaddr)
46 return i;
47 return 0;
48 }
49
get_zone_idx_from_dev(struct f2fs_sb_info * sbi,uint32_t segno,uint32_t dev_idx)50 static int get_zone_idx_from_dev(struct f2fs_sb_info *sbi,
51 uint32_t segno, uint32_t dev_idx)
52 {
53 block_t seg_start_blkaddr = START_BLOCK(sbi, segno);
54
55 return (seg_start_blkaddr - c.devices[dev_idx].start_blkaddr) >>
56 log_base_2(sbi->segs_per_sec * sbi->blocks_per_seg);
57 }
58
is_usable_seg(struct f2fs_sb_info * sbi,unsigned int segno)59 bool is_usable_seg(struct f2fs_sb_info *sbi, unsigned int segno)
60 {
61 unsigned int secno = segno / sbi->segs_per_sec;
62 block_t seg_start = START_BLOCK(sbi, segno);
63 block_t blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
64 unsigned int dev_idx = get_device_idx(sbi, segno);
65 unsigned int zone_idx = get_zone_idx_from_dev(sbi, segno, dev_idx);
66 unsigned int sec_off = SM_I(sbi)->main_blkaddr >>
67 log_base_2(blocks_per_sec);
68
69 if (zone_idx < c.devices[dev_idx].nr_rnd_zones)
70 return true;
71
72 if (c.devices[dev_idx].zoned_model != F2FS_ZONED_HM)
73 return true;
74
75 return seg_start < ((sec_off + secno) * blocks_per_sec) +
76 c.devices[dev_idx].zone_cap_blocks[zone_idx];
77 }
78
get_usable_seg_count(struct f2fs_sb_info * sbi)79 unsigned int get_usable_seg_count(struct f2fs_sb_info *sbi)
80 {
81 unsigned int i, usable_seg_count = 0;
82
83 for (i = 0; i < MAIN_SEGS(sbi); i++)
84 if (is_usable_seg(sbi, i))
85 usable_seg_count++;
86
87 return usable_seg_count;
88 }
89
90 #else
91
is_usable_seg(struct f2fs_sb_info * UNUSED (sbi),unsigned int UNUSED (segno))92 bool is_usable_seg(struct f2fs_sb_info *UNUSED(sbi), unsigned int UNUSED(segno))
93 {
94 return true;
95 }
96
get_usable_seg_count(struct f2fs_sb_info * sbi)97 unsigned int get_usable_seg_count(struct f2fs_sb_info *sbi)
98 {
99 return MAIN_SEGS(sbi);
100 }
101
102 #endif
103
get_free_segments(struct f2fs_sb_info * sbi)104 u32 get_free_segments(struct f2fs_sb_info *sbi)
105 {
106 u32 i, free_segs = 0;
107
108 for (i = 0; i < MAIN_SEGS(sbi); i++) {
109 struct seg_entry *se = get_seg_entry(sbi, i);
110
111 if (se->valid_blocks == 0x0 && !IS_CUR_SEGNO(sbi, i) &&
112 is_usable_seg(sbi, i))
113 free_segs++;
114 }
115 return free_segs;
116 }
117
update_free_segments(struct f2fs_sb_info * sbi)118 void update_free_segments(struct f2fs_sb_info *sbi)
119 {
120 char *progress = "-*|*-";
121 static int i = 0;
122
123 if (c.dbg_lv)
124 return;
125
126 MSG(0, "\r [ %c ] Free segments: 0x%x", progress[i % 5], get_free_segments(sbi));
127 fflush(stdout);
128 i++;
129 }
130
131 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
print_acl(const u8 * value,int size)132 static void print_acl(const u8 *value, int size)
133 {
134 const struct f2fs_acl_header *hdr = (struct f2fs_acl_header *)value;
135 const struct f2fs_acl_entry *entry = (struct f2fs_acl_entry *)(hdr + 1);
136 const u8 *end = value + size;
137 int i, count;
138
139 if (hdr->a_version != cpu_to_le32(F2FS_ACL_VERSION)) {
140 MSG(0, "Invalid ACL version [0x%x : 0x%x]\n",
141 le32_to_cpu(hdr->a_version), F2FS_ACL_VERSION);
142 return;
143 }
144
145 count = f2fs_acl_count(size);
146 if (count <= 0) {
147 MSG(0, "Invalid ACL value size %d\n", size);
148 return;
149 }
150
151 for (i = 0; i < count; i++) {
152 if ((u8 *)entry > end) {
153 MSG(0, "Invalid ACL entries count %d\n", count);
154 return;
155 }
156
157 switch (le16_to_cpu(entry->e_tag)) {
158 case ACL_USER_OBJ:
159 case ACL_GROUP_OBJ:
160 case ACL_MASK:
161 case ACL_OTHER:
162 MSG(0, "tag:0x%x perm:0x%x\n",
163 le16_to_cpu(entry->e_tag),
164 le16_to_cpu(entry->e_perm));
165 entry = (struct f2fs_acl_entry *)((char *)entry +
166 sizeof(struct f2fs_acl_entry_short));
167 break;
168 case ACL_USER:
169 MSG(0, "tag:0x%x perm:0x%x uid:%u\n",
170 le16_to_cpu(entry->e_tag),
171 le16_to_cpu(entry->e_perm),
172 le32_to_cpu(entry->e_id));
173 entry = (struct f2fs_acl_entry *)((char *)entry +
174 sizeof(struct f2fs_acl_entry));
175 break;
176 case ACL_GROUP:
177 MSG(0, "tag:0x%x perm:0x%x gid:%u\n",
178 le16_to_cpu(entry->e_tag),
179 le16_to_cpu(entry->e_perm),
180 le32_to_cpu(entry->e_id));
181 entry = (struct f2fs_acl_entry *)((char *)entry +
182 sizeof(struct f2fs_acl_entry));
183 break;
184 default:
185 MSG(0, "Unknown ACL tag 0x%x\n",
186 le16_to_cpu(entry->e_tag));
187 return;
188 }
189 }
190 }
191 #endif /* HAVE_LINUX_POSIX_ACL_H || HAVE_SYS_ACL_H */
192
print_xattr_entry(const struct f2fs_xattr_entry * ent)193 static void print_xattr_entry(const struct f2fs_xattr_entry *ent)
194 {
195 const u8 *value = (const u8 *)&ent->e_name[ent->e_name_len];
196 const int size = le16_to_cpu(ent->e_value_size);
197 const struct fscrypt_context *ctx;
198 int i;
199
200 MSG(0, "\nxattr: e_name_index:%d e_name:", ent->e_name_index);
201 for (i = 0; i < ent->e_name_len; i++)
202 MSG(0, "%c", ent->e_name[i]);
203 MSG(0, " e_name_len:%d e_value_size:%d e_value:\n",
204 ent->e_name_len, size);
205
206 switch (ent->e_name_index) {
207 #if defined(HAVE_LINUX_POSIX_ACL_H) || defined(HAVE_SYS_ACL_H)
208 case F2FS_XATTR_INDEX_POSIX_ACL_ACCESS:
209 case F2FS_XATTR_INDEX_POSIX_ACL_DEFAULT:
210 print_acl(value, size);
211 return;
212 #endif
213 case F2FS_XATTR_INDEX_ENCRYPTION:
214 ctx = (const struct fscrypt_context *)value;
215 if (size != sizeof(*ctx) ||
216 ctx->format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
217 break;
218 MSG(0, "format: %d\n", ctx->format);
219 MSG(0, "contents_encryption_mode: 0x%x\n", ctx->contents_encryption_mode);
220 MSG(0, "filenames_encryption_mode: 0x%x\n", ctx->filenames_encryption_mode);
221 MSG(0, "flags: 0x%x\n", ctx->flags);
222 MSG(0, "master_key_descriptor: ");
223 for (i = 0; i < FS_KEY_DESCRIPTOR_SIZE; i++)
224 MSG(0, "%02X", ctx->master_key_descriptor[i]);
225 MSG(0, "\nnonce: ");
226 for (i = 0; i < FS_KEY_DERIVATION_NONCE_SIZE; i++)
227 MSG(0, "%02X", ctx->nonce[i]);
228 MSG(0, "\n");
229 return;
230 }
231 for (i = 0; i < size; i++)
232 MSG(0, "%02X", value[i]);
233 MSG(0, "\n");
234 }
235
print_inode_info(struct f2fs_sb_info * sbi,struct f2fs_node * node,int name)236 void print_inode_info(struct f2fs_sb_info *sbi,
237 struct f2fs_node *node, int name)
238 {
239 struct f2fs_inode *inode = &node->i;
240 void *xattr_addr;
241 void *last_base_addr;
242 struct f2fs_xattr_entry *ent;
243 char en[F2FS_PRINT_NAMELEN];
244 unsigned int i = 0;
245 u32 namelen = le32_to_cpu(inode->i_namelen);
246 int enc_name = file_enc_name(inode);
247 int ofs = get_extra_isize(node);
248
249 pretty_print_filename(inode->i_name, namelen, en, enc_name);
250 if (name && en[0]) {
251 MSG(0, " - File name : %s%s\n", en,
252 enc_name ? " <encrypted>" : "");
253 setlocale(LC_ALL, "");
254 MSG(0, " - File size : %'" PRIu64 " (bytes)\n",
255 le64_to_cpu(inode->i_size));
256 return;
257 }
258
259 DISP_u32(inode, i_mode);
260 DISP_u32(inode, i_advise);
261 DISP_u32(inode, i_uid);
262 DISP_u32(inode, i_gid);
263 DISP_u32(inode, i_links);
264 DISP_u64(inode, i_size);
265 DISP_u64(inode, i_blocks);
266
267 DISP_u64(inode, i_atime);
268 DISP_u32(inode, i_atime_nsec);
269 DISP_u64(inode, i_ctime);
270 DISP_u32(inode, i_ctime_nsec);
271 DISP_u64(inode, i_mtime);
272 DISP_u32(inode, i_mtime_nsec);
273
274 DISP_u32(inode, i_generation);
275 DISP_u32(inode, i_current_depth);
276 DISP_u32(inode, i_xattr_nid);
277 DISP_u32(inode, i_flags);
278 DISP_u32(inode, i_inline);
279 DISP_u32(inode, i_pino);
280 DISP_u32(inode, i_dir_level);
281
282 if (en[0]) {
283 DISP_u32(inode, i_namelen);
284 printf("%-30s\t\t[%s]\n", "i_name", en);
285 }
286
287 printf("i_ext: fofs:%x blkaddr:%x len:%x\n",
288 le32_to_cpu(inode->i_ext.fofs),
289 le32_to_cpu(inode->i_ext.blk_addr),
290 le32_to_cpu(inode->i_ext.len));
291
292 if (c.feature & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
293 DISP_u16(inode, i_extra_isize);
294 if (c.feature & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR))
295 DISP_u16(inode, i_inline_xattr_size);
296 if (c.feature & cpu_to_le32(F2FS_FEATURE_PRJQUOTA))
297 DISP_u32(inode, i_projid);
298 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
299 DISP_u32(inode, i_inode_checksum);
300 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
301 DISP_u64(inode, i_crtime);
302 DISP_u32(inode, i_crtime_nsec);
303 }
304 if (c.feature & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
305 DISP_u64(inode, i_compr_blocks);
306 DISP_u32(inode, i_compress_algrithm);
307 DISP_u32(inode, i_log_cluster_size);
308 DISP_u32(inode, i_padding);
309 }
310 }
311
312 for (i = 0; i < ADDRS_PER_INODE(inode); i++) {
313 block_t blkaddr;
314 char *flag = "";
315
316 if (i + ofs >= DEF_ADDRS_PER_INODE)
317 break;
318
319 blkaddr = le32_to_cpu(inode->i_addr[i + ofs]);
320
321 if (blkaddr == 0x0)
322 continue;
323 if (blkaddr == COMPRESS_ADDR)
324 flag = "cluster flag";
325 else if (blkaddr == NEW_ADDR)
326 flag = "reserved flag";
327 printf("i_addr[0x%x] %-16s\t\t[0x%8x : %u]\n", i + ofs, flag,
328 blkaddr, blkaddr);
329 }
330
331 DISP_u32(inode, i_nid[0]); /* direct */
332 DISP_u32(inode, i_nid[1]); /* direct */
333 DISP_u32(inode, i_nid[2]); /* indirect */
334 DISP_u32(inode, i_nid[3]); /* indirect */
335 DISP_u32(inode, i_nid[4]); /* double indirect */
336
337 xattr_addr = read_all_xattrs(sbi, node, true);
338 if (!xattr_addr)
339 goto out;
340
341 last_base_addr = (void *)xattr_addr + XATTR_SIZE(&node->i);
342
343 list_for_each_xattr(ent, xattr_addr) {
344 if ((void *)(ent) + sizeof(__u32) > last_base_addr ||
345 (void *)XATTR_NEXT_ENTRY(ent) > last_base_addr) {
346 MSG(0, "xattr entry crosses the end of xattr space\n");
347 break;
348 }
349 print_xattr_entry(ent);
350 }
351 free(xattr_addr);
352
353 out:
354 printf("\n");
355 }
356
print_node_info(struct f2fs_sb_info * sbi,struct f2fs_node * node_block,int verbose)357 void print_node_info(struct f2fs_sb_info *sbi,
358 struct f2fs_node *node_block, int verbose)
359 {
360 nid_t ino = le32_to_cpu(node_block->footer.ino);
361 nid_t nid = le32_to_cpu(node_block->footer.nid);
362 /* Is this inode? */
363 if (ino == nid) {
364 DBG(verbose, "Node ID [0x%x:%u] is inode\n", nid, nid);
365 print_inode_info(sbi, node_block, verbose);
366 } else {
367 int i;
368 u32 *dump_blk = (u32 *)node_block;
369 DBG(verbose,
370 "Node ID [0x%x:%u] is direct node or indirect node.\n",
371 nid, nid);
372 for (i = 0; i < DEF_ADDRS_PER_BLOCK; i++)
373 MSG(verbose, "[%d]\t\t\t[0x%8x : %d]\n",
374 i, dump_blk[i], dump_blk[i]);
375 }
376 }
377
DISP_label(uint16_t * name)378 static void DISP_label(uint16_t *name)
379 {
380 char buffer[MAX_VOLUME_NAME];
381
382 utf16_to_utf8(buffer, name, MAX_VOLUME_NAME, MAX_VOLUME_NAME);
383 if (c.layout)
384 printf("%-30s %s\n", "Filesystem volume name:", buffer);
385 else
386 printf("%-30s" "\t\t[%s]\n", "volum_name", buffer);
387 }
388
print_raw_sb_info(struct f2fs_super_block * sb)389 void print_raw_sb_info(struct f2fs_super_block *sb)
390 {
391 if (c.layout)
392 goto printout;
393 if (!c.dbg_lv)
394 return;
395
396 printf("\n");
397 printf("+--------------------------------------------------------+\n");
398 printf("| Super block |\n");
399 printf("+--------------------------------------------------------+\n");
400 printout:
401 DISP_u32(sb, magic);
402 DISP_u32(sb, major_ver);
403
404 DISP_label(sb->volume_name);
405
406 DISP_u32(sb, minor_ver);
407 DISP_u32(sb, log_sectorsize);
408 DISP_u32(sb, log_sectors_per_block);
409
410 DISP_u32(sb, log_blocksize);
411 DISP_u32(sb, log_blocks_per_seg);
412 DISP_u32(sb, segs_per_sec);
413 DISP_u32(sb, secs_per_zone);
414 DISP_u32(sb, checksum_offset);
415 DISP_u64(sb, block_count);
416
417 DISP_u32(sb, section_count);
418 DISP_u32(sb, segment_count);
419 DISP_u32(sb, segment_count_ckpt);
420 DISP_u32(sb, segment_count_sit);
421 DISP_u32(sb, segment_count_nat);
422
423 DISP_u32(sb, segment_count_ssa);
424 DISP_u32(sb, segment_count_main);
425 DISP_u32(sb, segment0_blkaddr);
426
427 DISP_u32(sb, cp_blkaddr);
428 DISP_u32(sb, sit_blkaddr);
429 DISP_u32(sb, nat_blkaddr);
430 DISP_u32(sb, ssa_blkaddr);
431 DISP_u32(sb, main_blkaddr);
432
433 DISP_u32(sb, root_ino);
434 DISP_u32(sb, node_ino);
435 DISP_u32(sb, meta_ino);
436 DISP_u32(sb, cp_payload);
437 DISP_u32(sb, crc);
438 DISP("%-.252s", sb, version);
439 printf("\n");
440 }
441
print_ckpt_info(struct f2fs_sb_info * sbi)442 void print_ckpt_info(struct f2fs_sb_info *sbi)
443 {
444 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
445
446 if (c.layout)
447 goto printout;
448 if (!c.dbg_lv)
449 return;
450
451 printf("\n");
452 printf("+--------------------------------------------------------+\n");
453 printf("| Checkpoint |\n");
454 printf("+--------------------------------------------------------+\n");
455 printout:
456 DISP_u64(cp, checkpoint_ver);
457 DISP_u64(cp, user_block_count);
458 DISP_u64(cp, valid_block_count);
459 DISP_u32(cp, rsvd_segment_count);
460 DISP_u32(cp, overprov_segment_count);
461 DISP_u32(cp, free_segment_count);
462
463 DISP_u32(cp, alloc_type[CURSEG_HOT_NODE]);
464 DISP_u32(cp, alloc_type[CURSEG_WARM_NODE]);
465 DISP_u32(cp, alloc_type[CURSEG_COLD_NODE]);
466 DISP_u32(cp, cur_node_segno[0]);
467 DISP_u32(cp, cur_node_segno[1]);
468 DISP_u32(cp, cur_node_segno[2]);
469
470 DISP_u32(cp, cur_node_blkoff[0]);
471 DISP_u32(cp, cur_node_blkoff[1]);
472 DISP_u32(cp, cur_node_blkoff[2]);
473
474
475 DISP_u32(cp, alloc_type[CURSEG_HOT_DATA]);
476 DISP_u32(cp, alloc_type[CURSEG_WARM_DATA]);
477 DISP_u32(cp, alloc_type[CURSEG_COLD_DATA]);
478 DISP_u32(cp, cur_data_segno[0]);
479 DISP_u32(cp, cur_data_segno[1]);
480 DISP_u32(cp, cur_data_segno[2]);
481
482 DISP_u32(cp, cur_data_blkoff[0]);
483 DISP_u32(cp, cur_data_blkoff[1]);
484 DISP_u32(cp, cur_data_blkoff[2]);
485
486 DISP_u32(cp, ckpt_flags);
487 DISP_u32(cp, cp_pack_total_block_count);
488 DISP_u32(cp, cp_pack_start_sum);
489 DISP_u32(cp, valid_node_count);
490 DISP_u32(cp, valid_inode_count);
491 DISP_u32(cp, next_free_nid);
492 DISP_u32(cp, sit_ver_bitmap_bytesize);
493 DISP_u32(cp, nat_ver_bitmap_bytesize);
494 DISP_u32(cp, checksum_offset);
495 DISP_u64(cp, elapsed_time);
496
497 DISP_u32(cp, sit_nat_version_bitmap[0]);
498 printf("\n\n");
499 }
500
print_cp_state(u32 flag)501 void print_cp_state(u32 flag)
502 {
503 if (c.show_file_map)
504 return;
505
506 MSG(0, "Info: checkpoint state = %x : ", flag);
507 if (flag & CP_QUOTA_NEED_FSCK_FLAG)
508 MSG(0, "%s", " quota_need_fsck");
509 if (flag & CP_LARGE_NAT_BITMAP_FLAG)
510 MSG(0, "%s", " large_nat_bitmap");
511 if (flag & CP_NOCRC_RECOVERY_FLAG)
512 MSG(0, "%s", " allow_nocrc");
513 if (flag & CP_TRIMMED_FLAG)
514 MSG(0, "%s", " trimmed");
515 if (flag & CP_NAT_BITS_FLAG)
516 MSG(0, "%s", " nat_bits");
517 if (flag & CP_CRC_RECOVERY_FLAG)
518 MSG(0, "%s", " crc");
519 if (flag & CP_FASTBOOT_FLAG)
520 MSG(0, "%s", " fastboot");
521 if (flag & CP_FSCK_FLAG)
522 MSG(0, "%s", " fsck");
523 if (flag & CP_ERROR_FLAG)
524 MSG(0, "%s", " error");
525 if (flag & CP_COMPACT_SUM_FLAG)
526 MSG(0, "%s", " compacted_summary");
527 if (flag & CP_ORPHAN_PRESENT_FLAG)
528 MSG(0, "%s", " orphan_inodes");
529 if (flag & CP_DISABLED_FLAG)
530 MSG(0, "%s", " disabled");
531 if (flag & CP_RESIZEFS_FLAG)
532 MSG(0, "%s", " resizefs");
533 if (flag & CP_UMOUNT_FLAG)
534 MSG(0, "%s", " unmount");
535 else
536 MSG(0, "%s", " sudden-power-off");
537 MSG(0, "\n");
538 }
539
print_sb_state(struct f2fs_super_block * sb)540 void print_sb_state(struct f2fs_super_block *sb)
541 {
542 __le32 f = sb->feature;
543 int i;
544
545 MSG(0, "Info: superblock features = %x : ", f);
546 if (f & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
547 MSG(0, "%s", " encrypt");
548 }
549 if (f & cpu_to_le32(F2FS_FEATURE_VERITY)) {
550 MSG(0, "%s", " verity");
551 }
552 if (f & cpu_to_le32(F2FS_FEATURE_BLKZONED)) {
553 MSG(0, "%s", " blkzoned");
554 }
555 if (f & cpu_to_le32(F2FS_FEATURE_EXTRA_ATTR)) {
556 MSG(0, "%s", " extra_attr");
557 }
558 if (f & cpu_to_le32(F2FS_FEATURE_PRJQUOTA)) {
559 MSG(0, "%s", " project_quota");
560 }
561 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM)) {
562 MSG(0, "%s", " inode_checksum");
563 }
564 if (f & cpu_to_le32(F2FS_FEATURE_FLEXIBLE_INLINE_XATTR)) {
565 MSG(0, "%s", " flexible_inline_xattr");
566 }
567 if (f & cpu_to_le32(F2FS_FEATURE_QUOTA_INO)) {
568 MSG(0, "%s", " quota_ino");
569 }
570 if (f & cpu_to_le32(F2FS_FEATURE_INODE_CRTIME)) {
571 MSG(0, "%s", " inode_crtime");
572 }
573 if (f & cpu_to_le32(F2FS_FEATURE_LOST_FOUND)) {
574 MSG(0, "%s", " lost_found");
575 }
576 if (f & cpu_to_le32(F2FS_FEATURE_SB_CHKSUM)) {
577 MSG(0, "%s", " sb_checksum");
578 }
579 if (f & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
580 MSG(0, "%s", " casefold");
581 }
582 if (f & cpu_to_le32(F2FS_FEATURE_COMPRESSION)) {
583 MSG(0, "%s", " compression");
584 }
585 if (f & cpu_to_le32(F2FS_FEATURE_RO)) {
586 MSG(0, "%s", " ro");
587 }
588 MSG(0, "\n");
589 MSG(0, "Info: superblock encrypt level = %d, salt = ",
590 sb->encryption_level);
591 for (i = 0; i < 16; i++)
592 MSG(0, "%02x", sb->encrypt_pw_salt[i]);
593 MSG(0, "\n");
594 }
595
596 static char *stop_reason_str[] = {
597 [STOP_CP_REASON_SHUTDOWN] = "shutdown",
598 [STOP_CP_REASON_FAULT_INJECT] = "fault_inject",
599 [STOP_CP_REASON_META_PAGE] = "meta_page",
600 [STOP_CP_REASON_WRITE_FAIL] = "write_fail",
601 [STOP_CP_REASON_CORRUPTED_SUMMARY] = "corrupted_summary",
602 [STOP_CP_REASON_UPDATE_INODE] = "update_inode",
603 [STOP_CP_REASON_FLUSH_FAIL] = "flush_fail",
604 };
605
print_sb_stop_reason(struct f2fs_super_block * sb)606 void print_sb_stop_reason(struct f2fs_super_block *sb)
607 {
608 u8 *reason = sb->s_stop_reason;
609 int i;
610
611 if (!c.force_stop)
612 return;
613
614 MSG(0, "Info: checkpoint stop reason: ");
615
616 for (i = 0; i < STOP_CP_REASON_MAX; i++) {
617 if (reason[i])
618 MSG(0, "%s(%d) ", stop_reason_str[i], reason[i]);
619 }
620
621 MSG(0, "\n");
622 }
623
624 static char *errors_str[] = {
625 [ERROR_CORRUPTED_CLUSTER] = "corrupted_cluster",
626 [ERROR_FAIL_DECOMPRESSION] = "fail_decompression",
627 [ERROR_INVALID_BLKADDR] = "invalid_blkaddr",
628 [ERROR_CORRUPTED_DIRENT] = "corrupted_dirent",
629 [ERROR_CORRUPTED_INODE] = "corrupted_inode",
630 [ERROR_INCONSISTENT_SUMMARY] = "inconsistent_summary",
631 [ERROR_INCONSISTENT_FOOTER] = "inconsistent_footer",
632 [ERROR_INCONSISTENT_SUM_TYPE] = "inconsistent_sum_type",
633 [ERROR_CORRUPTED_JOURNAL] = "corrupted_journal",
634 [ERROR_INCONSISTENT_NODE_COUNT] = "inconsistent_node_count",
635 [ERROR_INCONSISTENT_BLOCK_COUNT] = "inconsistent_block_count",
636 [ERROR_INVALID_CURSEG] = "invalid_curseg",
637 [ERROR_INCONSISTENT_SIT] = "inconsistent_sit",
638 [ERROR_CORRUPTED_VERITY_XATTR] = "corrupted_verity_xattr",
639 [ERROR_CORRUPTED_XATTR] = "corrupted_xattr",
640 };
641
print_sb_errors(struct f2fs_super_block * sb)642 void print_sb_errors(struct f2fs_super_block *sb)
643 {
644 u8 *errors = sb->s_errors;
645 int i;
646
647 if (!c.fs_errors)
648 return;
649
650 MSG(0, "Info: fs errors: ");
651
652 for (i = 0; i < ERROR_MAX; i++) {
653 if (test_bit_le(i, errors))
654 MSG(0, "%s ", errors_str[i]);
655 }
656
657 MSG(0, "\n");
658 }
659
f2fs_is_valid_blkaddr(struct f2fs_sb_info * sbi,block_t blkaddr,int type)660 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
661 block_t blkaddr, int type)
662 {
663 switch (type) {
664 case META_NAT:
665 break;
666 case META_SIT:
667 if (blkaddr >= SIT_BLK_CNT(sbi))
668 return 0;
669 break;
670 case META_SSA:
671 if (blkaddr >= MAIN_BLKADDR(sbi) ||
672 blkaddr < SM_I(sbi)->ssa_blkaddr)
673 return 0;
674 break;
675 case META_CP:
676 if (blkaddr >= SIT_I(sbi)->sit_base_addr ||
677 blkaddr < __start_cp_addr(sbi))
678 return 0;
679 break;
680 case META_POR:
681 if (blkaddr >= MAX_BLKADDR(sbi) ||
682 blkaddr < MAIN_BLKADDR(sbi))
683 return 0;
684 break;
685 default:
686 ASSERT(0);
687 }
688
689 return 1;
690 }
691
692 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
693 unsigned int start);
694
695 /*
696 * Readahead CP/NAT/SIT/SSA pages
697 */
f2fs_ra_meta_pages(struct f2fs_sb_info * sbi,block_t start,int nrpages,int type)698 int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
699 int type)
700 {
701 block_t blkno = start;
702 block_t blkaddr, start_blk = 0, len = 0;
703
704 for (; nrpages-- > 0; blkno++) {
705
706 if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
707 goto out;
708
709 switch (type) {
710 case META_NAT:
711 if (blkno >= NAT_BLOCK_OFFSET(NM_I(sbi)->max_nid))
712 blkno = 0;
713 /* get nat block addr */
714 blkaddr = current_nat_addr(sbi,
715 blkno * NAT_ENTRY_PER_BLOCK, NULL);
716 break;
717 case META_SIT:
718 /* get sit block addr */
719 blkaddr = current_sit_addr(sbi,
720 blkno * SIT_ENTRY_PER_BLOCK);
721 break;
722 case META_SSA:
723 case META_CP:
724 case META_POR:
725 blkaddr = blkno;
726 break;
727 default:
728 ASSERT(0);
729 }
730
731 if (!len) {
732 start_blk = blkaddr;
733 len = 1;
734 } else if (start_blk + len == blkaddr) {
735 len++;
736 } else {
737 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
738 len << F2FS_BLKSIZE_BITS);
739 }
740 }
741 out:
742 if (len)
743 dev_readahead(start_blk << F2FS_BLKSIZE_BITS,
744 len << F2FS_BLKSIZE_BITS);
745 return blkno - start;
746 }
747
update_superblock(struct f2fs_super_block * sb,int sb_mask)748 void update_superblock(struct f2fs_super_block *sb, int sb_mask)
749 {
750 int addr, ret;
751 uint8_t *buf;
752 u32 old_crc, new_crc;
753
754 buf = calloc(BLOCK_SZ, 1);
755 ASSERT(buf);
756
757 if (get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) {
758 old_crc = get_sb(crc);
759 new_crc = f2fs_cal_crc32(F2FS_SUPER_MAGIC, sb,
760 SB_CHKSUM_OFFSET);
761 set_sb(crc, new_crc);
762 MSG(1, "Info: SB CRC is updated (0x%x -> 0x%x)\n",
763 old_crc, new_crc);
764 }
765
766 memcpy(buf + F2FS_SUPER_OFFSET, sb, sizeof(*sb));
767 for (addr = SB0_ADDR; addr < SB_MAX_ADDR; addr++) {
768 if (SB_MASK(addr) & sb_mask) {
769 ret = dev_write_block(buf, addr);
770 ASSERT(ret >= 0);
771 }
772 }
773
774 free(buf);
775 DBG(0, "Info: Done to update superblock\n");
776 }
777
sanity_check_area_boundary(struct f2fs_super_block * sb,enum SB_ADDR sb_addr)778 static inline int sanity_check_area_boundary(struct f2fs_super_block *sb,
779 enum SB_ADDR sb_addr)
780 {
781 u32 segment0_blkaddr = get_sb(segment0_blkaddr);
782 u32 cp_blkaddr = get_sb(cp_blkaddr);
783 u32 sit_blkaddr = get_sb(sit_blkaddr);
784 u32 nat_blkaddr = get_sb(nat_blkaddr);
785 u32 ssa_blkaddr = get_sb(ssa_blkaddr);
786 u32 main_blkaddr = get_sb(main_blkaddr);
787 u32 segment_count_ckpt = get_sb(segment_count_ckpt);
788 u32 segment_count_sit = get_sb(segment_count_sit);
789 u32 segment_count_nat = get_sb(segment_count_nat);
790 u32 segment_count_ssa = get_sb(segment_count_ssa);
791 u32 segment_count_main = get_sb(segment_count_main);
792 u32 segment_count = get_sb(segment_count);
793 u32 log_blocks_per_seg = get_sb(log_blocks_per_seg);
794 u64 main_end_blkaddr = main_blkaddr +
795 (segment_count_main << log_blocks_per_seg);
796 u64 seg_end_blkaddr = segment0_blkaddr +
797 (segment_count << log_blocks_per_seg);
798
799 if (segment0_blkaddr != cp_blkaddr) {
800 MSG(0, "\tMismatch segment0(%u) cp_blkaddr(%u)\n",
801 segment0_blkaddr, cp_blkaddr);
802 return -1;
803 }
804
805 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
806 sit_blkaddr) {
807 MSG(0, "\tWrong CP boundary, start(%u) end(%u) blocks(%u)\n",
808 cp_blkaddr, sit_blkaddr,
809 segment_count_ckpt << log_blocks_per_seg);
810 return -1;
811 }
812
813 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
814 nat_blkaddr) {
815 MSG(0, "\tWrong SIT boundary, start(%u) end(%u) blocks(%u)\n",
816 sit_blkaddr, nat_blkaddr,
817 segment_count_sit << log_blocks_per_seg);
818 return -1;
819 }
820
821 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
822 ssa_blkaddr) {
823 MSG(0, "\tWrong NAT boundary, start(%u) end(%u) blocks(%u)\n",
824 nat_blkaddr, ssa_blkaddr,
825 segment_count_nat << log_blocks_per_seg);
826 return -1;
827 }
828
829 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
830 main_blkaddr) {
831 MSG(0, "\tWrong SSA boundary, start(%u) end(%u) blocks(%u)\n",
832 ssa_blkaddr, main_blkaddr,
833 segment_count_ssa << log_blocks_per_seg);
834 return -1;
835 }
836
837 if (main_end_blkaddr > seg_end_blkaddr) {
838 MSG(0, "\tWrong MAIN_AREA, start(%u) end(%u) block(%u)\n",
839 main_blkaddr,
840 segment0_blkaddr +
841 (segment_count << log_blocks_per_seg),
842 segment_count_main << log_blocks_per_seg);
843 return -1;
844 } else if (main_end_blkaddr < seg_end_blkaddr) {
845 set_sb(segment_count, (main_end_blkaddr -
846 segment0_blkaddr) >> log_blocks_per_seg);
847
848 update_superblock(sb, SB_MASK(sb_addr));
849 MSG(0, "Info: Fix alignment: start(%u) end(%u) block(%u)\n",
850 main_blkaddr,
851 segment0_blkaddr +
852 (segment_count << log_blocks_per_seg),
853 segment_count_main << log_blocks_per_seg);
854 }
855 return 0;
856 }
857
verify_sb_chksum(struct f2fs_super_block * sb)858 static int verify_sb_chksum(struct f2fs_super_block *sb)
859 {
860 if (SB_CHKSUM_OFFSET != get_sb(checksum_offset)) {
861 MSG(0, "\tInvalid SB CRC offset: %u\n",
862 get_sb(checksum_offset));
863 return -1;
864 }
865 if (f2fs_crc_valid(get_sb(crc), sb,
866 get_sb(checksum_offset))) {
867 MSG(0, "\tInvalid SB CRC: 0x%x\n", get_sb(crc));
868 return -1;
869 }
870 return 0;
871 }
872
sanity_check_raw_super(struct f2fs_super_block * sb,enum SB_ADDR sb_addr)873 int sanity_check_raw_super(struct f2fs_super_block *sb, enum SB_ADDR sb_addr)
874 {
875 unsigned int blocksize;
876 unsigned int segment_count, segs_per_sec, secs_per_zone, segs_per_zone;
877 unsigned int total_sections, blocks_per_seg;
878
879 if (F2FS_SUPER_MAGIC != get_sb(magic)) {
880 MSG(0, "Magic Mismatch, valid(0x%x) - read(0x%x)\n",
881 F2FS_SUPER_MAGIC, get_sb(magic));
882 return -1;
883 }
884
885 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM) &&
886 verify_sb_chksum(sb))
887 return -1;
888
889 blocksize = 1 << get_sb(log_blocksize);
890 if (F2FS_BLKSIZE != blocksize) {
891 MSG(0, "Invalid blocksize (%u), supports only 4KB\n",
892 blocksize);
893 return -1;
894 }
895
896 /* check log blocks per segment */
897 if (get_sb(log_blocks_per_seg) != 9) {
898 MSG(0, "Invalid log blocks per segment (%u)\n",
899 get_sb(log_blocks_per_seg));
900 return -1;
901 }
902
903 /* Currently, support 512/1024/2048/4096 bytes sector size */
904 if (get_sb(log_sectorsize) > F2FS_MAX_LOG_SECTOR_SIZE ||
905 get_sb(log_sectorsize) < F2FS_MIN_LOG_SECTOR_SIZE) {
906 MSG(0, "Invalid log sectorsize (%u)\n", get_sb(log_sectorsize));
907 return -1;
908 }
909
910 if (get_sb(log_sectors_per_block) + get_sb(log_sectorsize) !=
911 F2FS_MAX_LOG_SECTOR_SIZE) {
912 MSG(0, "Invalid log sectors per block(%u) log sectorsize(%u)\n",
913 get_sb(log_sectors_per_block),
914 get_sb(log_sectorsize));
915 return -1;
916 }
917
918 segment_count = get_sb(segment_count);
919 segs_per_sec = get_sb(segs_per_sec);
920 secs_per_zone = get_sb(secs_per_zone);
921 total_sections = get_sb(section_count);
922 segs_per_zone = segs_per_sec * secs_per_zone;
923
924 /* blocks_per_seg should be 512, given the above check */
925 blocks_per_seg = 1 << get_sb(log_blocks_per_seg);
926
927 if (segment_count > F2FS_MAX_SEGMENT ||
928 segment_count < F2FS_MIN_SEGMENTS) {
929 MSG(0, "\tInvalid segment count (%u)\n", segment_count);
930 return -1;
931 }
932
933 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
934 (total_sections > segment_count ||
935 total_sections < F2FS_MIN_SEGMENTS ||
936 segs_per_sec > segment_count || !segs_per_sec)) {
937 MSG(0, "\tInvalid segment/section count (%u, %u x %u)\n",
938 segment_count, total_sections, segs_per_sec);
939 return 1;
940 }
941
942 if ((segment_count / segs_per_sec) < total_sections) {
943 MSG(0, "Small segment_count (%u < %u * %u)\n",
944 segment_count, segs_per_sec, total_sections);
945 return 1;
946 }
947
948 if (segment_count > (get_sb(block_count) >> 9)) {
949 MSG(0, "Wrong segment_count / block_count (%u > %llu)\n",
950 segment_count, get_sb(block_count));
951 return 1;
952 }
953
954 if (sb->devs[0].path[0]) {
955 unsigned int dev_segs = le32_to_cpu(sb->devs[0].total_segments);
956 int i = 1;
957
958 while (i < MAX_DEVICES && sb->devs[i].path[0]) {
959 dev_segs += le32_to_cpu(sb->devs[i].total_segments);
960 i++;
961 }
962 if (segment_count != dev_segs / segs_per_zone * segs_per_zone) {
963 MSG(0, "Segment count (%u) mismatch with total segments from devices (%u)",
964 segment_count, dev_segs);
965 return 1;
966 }
967 }
968
969 if (secs_per_zone > total_sections || !secs_per_zone) {
970 MSG(0, "Wrong secs_per_zone / total_sections (%u, %u)\n",
971 secs_per_zone, total_sections);
972 return 1;
973 }
974 if (get_sb(extension_count) > F2FS_MAX_EXTENSION ||
975 sb->hot_ext_count > F2FS_MAX_EXTENSION ||
976 get_sb(extension_count) +
977 sb->hot_ext_count > F2FS_MAX_EXTENSION) {
978 MSG(0, "Corrupted extension count (%u + %u > %u)\n",
979 get_sb(extension_count),
980 sb->hot_ext_count,
981 F2FS_MAX_EXTENSION);
982 return 1;
983 }
984
985 if (get_sb(cp_payload) > (blocks_per_seg - F2FS_CP_PACKS)) {
986 MSG(0, "Insane cp_payload (%u > %u)\n",
987 get_sb(cp_payload), blocks_per_seg - F2FS_CP_PACKS);
988 return 1;
989 }
990
991 /* check reserved ino info */
992 if (get_sb(node_ino) != 1 || get_sb(meta_ino) != 2 ||
993 get_sb(root_ino) != 3) {
994 MSG(0, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)\n",
995 get_sb(node_ino), get_sb(meta_ino), get_sb(root_ino));
996 return -1;
997 }
998
999 /* Check zoned block device feature */
1000 if (c.devices[0].zoned_model != F2FS_ZONED_NONE &&
1001 !(sb->feature & cpu_to_le32(F2FS_FEATURE_BLKZONED))) {
1002 MSG(0, "\tMissing zoned block device feature\n");
1003 return -1;
1004 }
1005
1006 if (sanity_check_area_boundary(sb, sb_addr))
1007 return -1;
1008 return 0;
1009 }
1010
1011 #define CHECK_PERIOD (3600 * 24 * 30) // one month by default
1012
validate_super_block(struct f2fs_sb_info * sbi,enum SB_ADDR sb_addr)1013 int validate_super_block(struct f2fs_sb_info *sbi, enum SB_ADDR sb_addr)
1014 {
1015 char buf[F2FS_BLKSIZE];
1016
1017 sbi->raw_super = malloc(sizeof(struct f2fs_super_block));
1018 if (!sbi->raw_super)
1019 return -ENOMEM;
1020
1021 if (dev_read_block(buf, sb_addr))
1022 return -1;
1023
1024 memcpy(sbi->raw_super, buf + F2FS_SUPER_OFFSET,
1025 sizeof(struct f2fs_super_block));
1026
1027 if (!sanity_check_raw_super(sbi->raw_super, sb_addr)) {
1028 /* get kernel version */
1029 if (c.kd >= 0) {
1030 dev_read_version(c.version, 0, VERSION_NAME_LEN);
1031 get_kernel_version(c.version);
1032 } else {
1033 get_kernel_uname_version(c.version);
1034 }
1035
1036 /* build sb version */
1037 memcpy(c.sb_version, sbi->raw_super->version, VERSION_NAME_LEN);
1038 get_kernel_version(c.sb_version);
1039 memcpy(c.init_version, sbi->raw_super->init_version,
1040 VERSION_NAME_LEN);
1041 get_kernel_version(c.init_version);
1042
1043 c.force_stop = is_checkpoint_stop(sbi->raw_super, false);
1044 c.abnormal_stop = is_checkpoint_stop(sbi->raw_super, true);
1045 c.fs_errors = is_inconsistent_error(sbi->raw_super);
1046
1047 MSG(0, "Info: MKFS version\n \"%s\"\n", c.init_version);
1048 MSG(0, "Info: FSCK version\n from \"%s\"\n to \"%s\"\n",
1049 c.sb_version, c.version);
1050 print_sb_state(sbi->raw_super);
1051 print_sb_stop_reason(sbi->raw_super);
1052 print_sb_errors(sbi->raw_super);
1053 return 0;
1054 }
1055
1056 free(sbi->raw_super);
1057 sbi->raw_super = NULL;
1058 MSG(0, "\tCan't find a valid F2FS superblock at 0x%x\n", sb_addr);
1059
1060 return -EINVAL;
1061 }
1062
init_sb_info(struct f2fs_sb_info * sbi)1063 int init_sb_info(struct f2fs_sb_info *sbi)
1064 {
1065 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1066 u64 total_sectors;
1067 int i;
1068
1069 sbi->log_sectors_per_block = get_sb(log_sectors_per_block);
1070 sbi->log_blocksize = get_sb(log_blocksize);
1071 sbi->blocksize = 1 << sbi->log_blocksize;
1072 sbi->log_blocks_per_seg = get_sb(log_blocks_per_seg);
1073 sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
1074 sbi->segs_per_sec = get_sb(segs_per_sec);
1075 sbi->secs_per_zone = get_sb(secs_per_zone);
1076 sbi->total_sections = get_sb(section_count);
1077 sbi->total_node_count = (get_sb(segment_count_nat) / 2) *
1078 sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
1079 sbi->root_ino_num = get_sb(root_ino);
1080 sbi->node_ino_num = get_sb(node_ino);
1081 sbi->meta_ino_num = get_sb(meta_ino);
1082 sbi->cur_victim_sec = NULL_SEGNO;
1083
1084 for (i = 0; i < MAX_DEVICES; i++) {
1085 if (!sb->devs[i].path[0])
1086 break;
1087
1088 if (i) {
1089 c.devices[i].path = strdup((char *)sb->devs[i].path);
1090 if (get_device_info(i))
1091 ASSERT(0);
1092 } else {
1093 ASSERT(!strcmp((char *)sb->devs[i].path,
1094 (char *)c.devices[i].path));
1095 }
1096
1097 c.devices[i].total_segments =
1098 le32_to_cpu(sb->devs[i].total_segments);
1099 if (i)
1100 c.devices[i].start_blkaddr =
1101 c.devices[i - 1].end_blkaddr + 1;
1102 c.devices[i].end_blkaddr = c.devices[i].start_blkaddr +
1103 c.devices[i].total_segments *
1104 c.blks_per_seg - 1;
1105 if (i == 0)
1106 c.devices[i].end_blkaddr += get_sb(segment0_blkaddr);
1107
1108 if (c.zoned_model == F2FS_ZONED_NONE) {
1109 if (c.devices[i].zoned_model == F2FS_ZONED_HM)
1110 c.zoned_model = F2FS_ZONED_HM;
1111 else if (c.devices[i].zoned_model == F2FS_ZONED_HA &&
1112 c.zoned_model != F2FS_ZONED_HM)
1113 c.zoned_model = F2FS_ZONED_HA;
1114 }
1115
1116 c.ndevs = i + 1;
1117 MSG(0, "Info: Device[%d] : %s blkaddr = %"PRIx64"--%"PRIx64"\n",
1118 i, c.devices[i].path,
1119 c.devices[i].start_blkaddr,
1120 c.devices[i].end_blkaddr);
1121 }
1122
1123 total_sectors = get_sb(block_count) << sbi->log_sectors_per_block;
1124 MSG(0, "Info: Segments per section = %d\n", sbi->segs_per_sec);
1125 MSG(0, "Info: Sections per zone = %d\n", sbi->secs_per_zone);
1126 MSG(0, "Info: total FS sectors = %"PRIu64" (%"PRIu64" MB)\n",
1127 total_sectors, total_sectors >>
1128 (20 - get_sb(log_sectorsize)));
1129 return 0;
1130 }
1131
verify_checksum_chksum(struct f2fs_checkpoint * cp)1132 static int verify_checksum_chksum(struct f2fs_checkpoint *cp)
1133 {
1134 unsigned int chksum_offset = get_cp(checksum_offset);
1135 unsigned int crc, cal_crc;
1136
1137 if (chksum_offset < CP_MIN_CHKSUM_OFFSET ||
1138 chksum_offset > CP_CHKSUM_OFFSET) {
1139 MSG(0, "\tInvalid CP CRC offset: %u\n", chksum_offset);
1140 return -1;
1141 }
1142
1143 crc = le32_to_cpu(*(__le32 *)((unsigned char *)cp + chksum_offset));
1144 cal_crc = f2fs_checkpoint_chksum(cp);
1145 if (cal_crc != crc) {
1146 MSG(0, "\tInvalid CP CRC: offset:%u, crc:0x%x, calc:0x%x\n",
1147 chksum_offset, crc, cal_crc);
1148 return -1;
1149 }
1150 return 0;
1151 }
1152
get_checkpoint_version(block_t cp_addr)1153 static void *get_checkpoint_version(block_t cp_addr)
1154 {
1155 void *cp_page;
1156
1157 cp_page = malloc(F2FS_BLKSIZE);
1158 ASSERT(cp_page);
1159
1160 if (dev_read_block(cp_page, cp_addr) < 0)
1161 ASSERT(0);
1162
1163 if (verify_checksum_chksum((struct f2fs_checkpoint *)cp_page))
1164 goto out;
1165 return cp_page;
1166 out:
1167 free(cp_page);
1168 return NULL;
1169 }
1170
validate_checkpoint(struct f2fs_sb_info * sbi,block_t cp_addr,unsigned long long * version)1171 void *validate_checkpoint(struct f2fs_sb_info *sbi, block_t cp_addr,
1172 unsigned long long *version)
1173 {
1174 void *cp_page_1, *cp_page_2;
1175 struct f2fs_checkpoint *cp;
1176 unsigned long long cur_version = 0, pre_version = 0;
1177
1178 /* Read the 1st cp block in this CP pack */
1179 cp_page_1 = get_checkpoint_version(cp_addr);
1180 if (!cp_page_1)
1181 return NULL;
1182
1183 cp = (struct f2fs_checkpoint *)cp_page_1;
1184 if (get_cp(cp_pack_total_block_count) > sbi->blocks_per_seg)
1185 goto invalid_cp1;
1186
1187 pre_version = get_cp(checkpoint_ver);
1188
1189 /* Read the 2nd cp block in this CP pack */
1190 cp_addr += get_cp(cp_pack_total_block_count) - 1;
1191 cp_page_2 = get_checkpoint_version(cp_addr);
1192 if (!cp_page_2)
1193 goto invalid_cp1;
1194
1195 cp = (struct f2fs_checkpoint *)cp_page_2;
1196 cur_version = get_cp(checkpoint_ver);
1197
1198 if (cur_version == pre_version) {
1199 *version = cur_version;
1200 free(cp_page_2);
1201 return cp_page_1;
1202 }
1203
1204 free(cp_page_2);
1205 invalid_cp1:
1206 free(cp_page_1);
1207 return NULL;
1208 }
1209
get_valid_checkpoint(struct f2fs_sb_info * sbi)1210 int get_valid_checkpoint(struct f2fs_sb_info *sbi)
1211 {
1212 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1213 void *cp1, *cp2, *cur_page;
1214 unsigned long blk_size = sbi->blocksize;
1215 unsigned long long cp1_version = 0, cp2_version = 0, version;
1216 unsigned long long cp_start_blk_no;
1217 unsigned int cp_payload, cp_blks;
1218 int ret;
1219
1220 cp_payload = get_sb(cp_payload);
1221 if (cp_payload > F2FS_BLK_ALIGN(MAX_CP_PAYLOAD))
1222 return -EINVAL;
1223
1224 cp_blks = 1 + cp_payload;
1225 sbi->ckpt = malloc(cp_blks * blk_size);
1226 if (!sbi->ckpt)
1227 return -ENOMEM;
1228 /*
1229 * Finding out valid cp block involves read both
1230 * sets( cp pack1 and cp pack 2)
1231 */
1232 cp_start_blk_no = get_sb(cp_blkaddr);
1233 cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
1234
1235 /* The second checkpoint pack should start at the next segment */
1236 cp_start_blk_no += 1 << get_sb(log_blocks_per_seg);
1237 cp2 = validate_checkpoint(sbi, cp_start_blk_no, &cp2_version);
1238
1239 if (cp1 && cp2) {
1240 if (ver_after(cp2_version, cp1_version)) {
1241 cur_page = cp2;
1242 sbi->cur_cp = 2;
1243 version = cp2_version;
1244 } else {
1245 cur_page = cp1;
1246 sbi->cur_cp = 1;
1247 version = cp1_version;
1248 }
1249 } else if (cp1) {
1250 cur_page = cp1;
1251 sbi->cur_cp = 1;
1252 version = cp1_version;
1253 } else if (cp2) {
1254 cur_page = cp2;
1255 sbi->cur_cp = 2;
1256 version = cp2_version;
1257 } else
1258 goto fail_no_cp;
1259
1260 MSG(0, "Info: CKPT version = %llx\n", version);
1261
1262 memcpy(sbi->ckpt, cur_page, blk_size);
1263
1264 if (cp_blks > 1) {
1265 unsigned int i;
1266 unsigned long long cp_blk_no;
1267
1268 cp_blk_no = get_sb(cp_blkaddr);
1269 if (cur_page == cp2)
1270 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
1271
1272 /* copy sit bitmap */
1273 for (i = 1; i < cp_blks; i++) {
1274 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
1275 ret = dev_read_block(cur_page, cp_blk_no + i);
1276 ASSERT(ret >= 0);
1277 memcpy(ckpt + i * blk_size, cur_page, blk_size);
1278 }
1279 }
1280 if (cp1)
1281 free(cp1);
1282 if (cp2)
1283 free(cp2);
1284 return 0;
1285
1286 fail_no_cp:
1287 free(sbi->ckpt);
1288 sbi->ckpt = NULL;
1289 return -EINVAL;
1290 }
1291
is_checkpoint_stop(struct f2fs_super_block * sb,bool abnormal)1292 bool is_checkpoint_stop(struct f2fs_super_block *sb, bool abnormal)
1293 {
1294 int i;
1295
1296 for (i = 0; i < STOP_CP_REASON_MAX; i++) {
1297 if (abnormal && i == STOP_CP_REASON_SHUTDOWN)
1298 continue;
1299 if (sb->s_stop_reason[i])
1300 return true;
1301 }
1302
1303 return false;
1304 }
1305
is_inconsistent_error(struct f2fs_super_block * sb)1306 bool is_inconsistent_error(struct f2fs_super_block *sb)
1307 {
1308 int i;
1309
1310 for (i = 0; i < MAX_F2FS_ERRORS; i++) {
1311 if (sb->s_errors[i])
1312 return true;
1313 }
1314
1315 return false;
1316 }
1317
1318 /*
1319 * For a return value of 1, caller should further check for c.fix_on state
1320 * and take appropriate action.
1321 */
f2fs_should_proceed(struct f2fs_super_block * sb,u32 flag)1322 static int f2fs_should_proceed(struct f2fs_super_block *sb, u32 flag)
1323 {
1324 if (!c.fix_on && (c.auto_fix || c.preen_mode)) {
1325 if (flag & CP_FSCK_FLAG ||
1326 flag & CP_QUOTA_NEED_FSCK_FLAG ||
1327 c.abnormal_stop || c.fs_errors ||
1328 (exist_qf_ino(sb) && (flag & CP_ERROR_FLAG))) {
1329 c.fix_on = 1;
1330 } else if (!c.preen_mode) {
1331 print_cp_state(flag);
1332 return 0;
1333 }
1334 }
1335 return 1;
1336 }
1337
sanity_check_ckpt(struct f2fs_sb_info * sbi)1338 int sanity_check_ckpt(struct f2fs_sb_info *sbi)
1339 {
1340 unsigned int total, fsmeta;
1341 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1342 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1343 unsigned int flag = get_cp(ckpt_flags);
1344 unsigned int ovp_segments, reserved_segments;
1345 unsigned int main_segs, blocks_per_seg;
1346 unsigned int sit_segs, nat_segs;
1347 unsigned int sit_bitmap_size, nat_bitmap_size;
1348 unsigned int log_blocks_per_seg;
1349 unsigned int segment_count_main;
1350 unsigned int cp_pack_start_sum, cp_payload;
1351 block_t user_block_count;
1352 int i;
1353
1354 total = get_sb(segment_count);
1355 fsmeta = get_sb(segment_count_ckpt);
1356 sit_segs = get_sb(segment_count_sit);
1357 fsmeta += sit_segs;
1358 nat_segs = get_sb(segment_count_nat);
1359 fsmeta += nat_segs;
1360 fsmeta += get_cp(rsvd_segment_count);
1361 fsmeta += get_sb(segment_count_ssa);
1362
1363 if (fsmeta >= total)
1364 return 1;
1365
1366 ovp_segments = get_cp(overprov_segment_count);
1367 reserved_segments = get_cp(rsvd_segment_count);
1368
1369 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
1370 (fsmeta < F2FS_MIN_SEGMENT || ovp_segments == 0 ||
1371 reserved_segments == 0)) {
1372 MSG(0, "\tWrong layout: check mkfs.f2fs version\n");
1373 return 1;
1374 }
1375
1376 user_block_count = get_cp(user_block_count);
1377 segment_count_main = get_sb(segment_count_main) +
1378 (cpu_to_le32(F2FS_FEATURE_RO) ? 1 : 0);
1379 log_blocks_per_seg = get_sb(log_blocks_per_seg);
1380 if (!user_block_count || user_block_count >=
1381 segment_count_main << log_blocks_per_seg) {
1382 ASSERT_MSG("\tWrong user_block_count(%u)\n", user_block_count);
1383
1384 if (!f2fs_should_proceed(sb, flag))
1385 return 1;
1386 if (!c.fix_on)
1387 return 1;
1388
1389 if (flag & (CP_FSCK_FLAG | CP_RESIZEFS_FLAG)) {
1390 u32 valid_user_block_cnt;
1391 u32 seg_cnt_main = get_sb(segment_count) -
1392 (get_sb(segment_count_ckpt) +
1393 get_sb(segment_count_sit) +
1394 get_sb(segment_count_nat) +
1395 get_sb(segment_count_ssa));
1396
1397 /* validate segment_count_main in sb first */
1398 if (seg_cnt_main != get_sb(segment_count_main)) {
1399 MSG(0, "Inconsistent segment_cnt_main %u in sb\n",
1400 segment_count_main << log_blocks_per_seg);
1401 return 1;
1402 }
1403 valid_user_block_cnt = ((get_sb(segment_count_main) -
1404 get_cp(overprov_segment_count)) * c.blks_per_seg);
1405 MSG(0, "Info: Fix wrong user_block_count in CP: (%u) -> (%u)\n",
1406 user_block_count, valid_user_block_cnt);
1407 set_cp(user_block_count, valid_user_block_cnt);
1408 c.bug_on = 1;
1409 }
1410 }
1411
1412 main_segs = get_sb(segment_count_main);
1413 blocks_per_seg = sbi->blocks_per_seg;
1414
1415 for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
1416 if (get_cp(cur_node_segno[i]) >= main_segs ||
1417 get_cp(cur_node_blkoff[i]) >= blocks_per_seg)
1418 return 1;
1419 }
1420 for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
1421 if (get_cp(cur_data_segno[i]) >= main_segs ||
1422 get_cp(cur_data_blkoff[i]) >= blocks_per_seg)
1423 return 1;
1424 }
1425
1426 sit_bitmap_size = get_cp(sit_ver_bitmap_bytesize);
1427 nat_bitmap_size = get_cp(nat_ver_bitmap_bytesize);
1428
1429 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
1430 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
1431 MSG(0, "\tWrong bitmap size: sit(%u), nat(%u)\n",
1432 sit_bitmap_size, nat_bitmap_size);
1433 return 1;
1434 }
1435
1436 cp_pack_start_sum = __start_sum_addr(sbi);
1437 cp_payload = __cp_payload(sbi);
1438 if (cp_pack_start_sum < cp_payload + 1 ||
1439 cp_pack_start_sum > blocks_per_seg - 1 -
1440 NR_CURSEG_TYPE) {
1441 MSG(0, "\tWrong cp_pack_start_sum(%u) or cp_payload(%u)\n",
1442 cp_pack_start_sum, cp_payload);
1443 if ((get_sb(feature) & F2FS_FEATURE_SB_CHKSUM))
1444 return 1;
1445 set_sb(cp_payload, cp_pack_start_sum - 1);
1446 update_superblock(sb, SB_MASK_ALL);
1447 }
1448
1449 return 0;
1450 }
1451
current_nat_addr(struct f2fs_sb_info * sbi,nid_t start,int * pack)1452 pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start, int *pack)
1453 {
1454 struct f2fs_nm_info *nm_i = NM_I(sbi);
1455 pgoff_t block_off;
1456 pgoff_t block_addr;
1457 int seg_off;
1458
1459 block_off = NAT_BLOCK_OFFSET(start);
1460 seg_off = block_off >> sbi->log_blocks_per_seg;
1461
1462 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
1463 (seg_off << sbi->log_blocks_per_seg << 1) +
1464 (block_off & ((1 << sbi->log_blocks_per_seg) -1)));
1465 if (pack)
1466 *pack = 1;
1467
1468 if (f2fs_test_bit(block_off, nm_i->nat_bitmap)) {
1469 block_addr += sbi->blocks_per_seg;
1470 if (pack)
1471 *pack = 2;
1472 }
1473
1474 return block_addr;
1475 }
1476
1477 /* will not init nid_bitmap from nat */
f2fs_early_init_nid_bitmap(struct f2fs_sb_info * sbi)1478 static int f2fs_early_init_nid_bitmap(struct f2fs_sb_info *sbi)
1479 {
1480 struct f2fs_nm_info *nm_i = NM_I(sbi);
1481 int nid_bitmap_size = (nm_i->max_nid + BITS_PER_BYTE - 1) / BITS_PER_BYTE;
1482 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1483 struct f2fs_summary_block *sum = curseg->sum_blk;
1484 struct f2fs_journal *journal = &sum->journal;
1485 nid_t nid;
1486 int i;
1487
1488 if (!(c.func == SLOAD || c.func == FSCK))
1489 return 0;
1490
1491 nm_i->nid_bitmap = (char *)calloc(nid_bitmap_size, 1);
1492 if (!nm_i->nid_bitmap)
1493 return -ENOMEM;
1494
1495 /* arbitrarily set 0 bit */
1496 f2fs_set_bit(0, nm_i->nid_bitmap);
1497
1498 if (nats_in_cursum(journal) > NAT_JOURNAL_ENTRIES) {
1499 MSG(0, "\tError: f2fs_init_nid_bitmap truncate n_nats(%u) to "
1500 "NAT_JOURNAL_ENTRIES(%zu)\n",
1501 nats_in_cursum(journal), NAT_JOURNAL_ENTRIES);
1502 journal->n_nats = cpu_to_le16(NAT_JOURNAL_ENTRIES);
1503 c.fix_on = 1;
1504 }
1505
1506 for (i = 0; i < nats_in_cursum(journal); i++) {
1507 block_t addr;
1508
1509 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
1510 if (!IS_VALID_BLK_ADDR(sbi, addr)) {
1511 MSG(0, "\tError: f2fs_init_nid_bitmap: addr(%u) is invalid!!!\n", addr);
1512 journal->n_nats = cpu_to_le16(i);
1513 c.fix_on = 1;
1514 continue;
1515 }
1516
1517 nid = le32_to_cpu(nid_in_journal(journal, i));
1518 if (!IS_VALID_NID(sbi, nid)) {
1519 MSG(0, "\tError: f2fs_init_nid_bitmap: nid(%u) is invalid!!!\n", nid);
1520 journal->n_nats = cpu_to_le16(i);
1521 c.fix_on = 1;
1522 continue;
1523 }
1524 if (addr != NULL_ADDR)
1525 f2fs_set_bit(nid, nm_i->nid_bitmap);
1526 }
1527 return 0;
1528 }
1529
1530 /* will init nid_bitmap from nat */
f2fs_late_init_nid_bitmap(struct f2fs_sb_info * sbi)1531 static int f2fs_late_init_nid_bitmap(struct f2fs_sb_info *sbi)
1532 {
1533 struct f2fs_nm_info *nm_i = NM_I(sbi);
1534 struct f2fs_nat_block *nat_block;
1535 block_t start_blk;
1536 nid_t nid;
1537
1538 if (!(c.func == SLOAD || c.func == FSCK))
1539 return 0;
1540
1541 nat_block = malloc(F2FS_BLKSIZE);
1542 if (!nat_block) {
1543 free(nm_i->nid_bitmap);
1544 return -ENOMEM;
1545 }
1546
1547 f2fs_ra_meta_pages(sbi, 0, NAT_BLOCK_OFFSET(nm_i->max_nid),
1548 META_NAT);
1549 for (nid = 0; nid < nm_i->max_nid; nid++) {
1550 if (!(nid % NAT_ENTRY_PER_BLOCK)) {
1551 int ret;
1552
1553 start_blk = current_nat_addr(sbi, nid, NULL);
1554 ret = dev_read_block(nat_block, start_blk);
1555 ASSERT(ret >= 0);
1556 }
1557
1558 if (nat_block->entries[nid % NAT_ENTRY_PER_BLOCK].block_addr)
1559 f2fs_set_bit(nid, nm_i->nid_bitmap);
1560 }
1561
1562 free(nat_block);
1563 return 0;
1564 }
1565
update_nat_bits_flags(struct f2fs_super_block * sb,struct f2fs_checkpoint * cp,u32 flags)1566 u32 update_nat_bits_flags(struct f2fs_super_block *sb,
1567 struct f2fs_checkpoint *cp, u32 flags)
1568 {
1569 uint32_t nat_bits_bytes, nat_bits_blocks;
1570
1571 nat_bits_bytes = get_sb(segment_count_nat) << 5;
1572 nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) + 8 +
1573 F2FS_BLKSIZE - 1);
1574 if (get_cp(cp_pack_total_block_count) <=
1575 (1 << get_sb(log_blocks_per_seg)) - nat_bits_blocks)
1576 flags |= CP_NAT_BITS_FLAG;
1577 else
1578 flags &= (~CP_NAT_BITS_FLAG);
1579
1580 return flags;
1581 }
1582
1583 /* should call flush_journal_entries() bfore this */
write_nat_bits(struct f2fs_sb_info * sbi,struct f2fs_super_block * sb,struct f2fs_checkpoint * cp,int set)1584 void write_nat_bits(struct f2fs_sb_info *sbi,
1585 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp, int set)
1586 {
1587 struct f2fs_nm_info *nm_i = NM_I(sbi);
1588 uint32_t nat_blocks = get_sb(segment_count_nat) <<
1589 (get_sb(log_blocks_per_seg) - 1);
1590 uint32_t nat_bits_bytes = nat_blocks >> 3;
1591 uint32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1592 8 + F2FS_BLKSIZE - 1);
1593 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1594 struct f2fs_nat_block *nat_block;
1595 uint32_t i, j;
1596 block_t blkaddr;
1597 int ret;
1598
1599 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1600 ASSERT(nat_bits);
1601
1602 nat_block = malloc(F2FS_BLKSIZE);
1603 ASSERT(nat_block);
1604
1605 full_nat_bits = nat_bits + 8;
1606 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1607
1608 memset(full_nat_bits, 0, nat_bits_bytes);
1609 memset(empty_nat_bits, 0, nat_bits_bytes);
1610
1611 for (i = 0; i < nat_blocks; i++) {
1612 int seg_off = i >> get_sb(log_blocks_per_seg);
1613 int valid = 0;
1614
1615 blkaddr = (pgoff_t)(get_sb(nat_blkaddr) +
1616 (seg_off << get_sb(log_blocks_per_seg) << 1) +
1617 (i & ((1 << get_sb(log_blocks_per_seg)) - 1)));
1618
1619 /*
1620 * Should consider new nat_blocks is larger than old
1621 * nm_i->nat_blocks, since nm_i->nat_bitmap is based on
1622 * old one.
1623 */
1624 if (i < nm_i->nat_blocks && f2fs_test_bit(i, nm_i->nat_bitmap))
1625 blkaddr += (1 << get_sb(log_blocks_per_seg));
1626
1627 ret = dev_read_block(nat_block, blkaddr);
1628 ASSERT(ret >= 0);
1629
1630 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1631 if ((i == 0 && j == 0) ||
1632 nat_block->entries[j].block_addr != NULL_ADDR)
1633 valid++;
1634 }
1635 if (valid == 0)
1636 test_and_set_bit_le(i, empty_nat_bits);
1637 else if (valid == NAT_ENTRY_PER_BLOCK)
1638 test_and_set_bit_le(i, full_nat_bits);
1639 }
1640 *(__le64 *)nat_bits = get_cp_crc(cp);
1641 free(nat_block);
1642
1643 blkaddr = get_sb(segment0_blkaddr) + (set <<
1644 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1645
1646 DBG(1, "\tWriting NAT bits pages, at offset 0x%08x\n", blkaddr);
1647
1648 for (i = 0; i < nat_bits_blocks; i++) {
1649 if (dev_write_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1650 ASSERT_MSG("\tError: write NAT bits to disk!!!\n");
1651 }
1652 MSG(0, "Info: Write valid nat_bits in checkpoint\n");
1653
1654 free(nat_bits);
1655 }
1656
check_nat_bits(struct f2fs_sb_info * sbi,struct f2fs_super_block * sb,struct f2fs_checkpoint * cp)1657 static int check_nat_bits(struct f2fs_sb_info *sbi,
1658 struct f2fs_super_block *sb, struct f2fs_checkpoint *cp)
1659 {
1660 struct f2fs_nm_info *nm_i = NM_I(sbi);
1661 uint32_t nat_blocks = get_sb(segment_count_nat) <<
1662 (get_sb(log_blocks_per_seg) - 1);
1663 uint32_t nat_bits_bytes = nat_blocks >> 3;
1664 uint32_t nat_bits_blocks = F2FS_BYTES_TO_BLK((nat_bits_bytes << 1) +
1665 8 + F2FS_BLKSIZE - 1);
1666 unsigned char *nat_bits, *full_nat_bits, *empty_nat_bits;
1667 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1668 struct f2fs_journal *journal = &curseg->sum_blk->journal;
1669 uint32_t i, j;
1670 block_t blkaddr;
1671 int err = 0;
1672
1673 nat_bits = calloc(F2FS_BLKSIZE, nat_bits_blocks);
1674 ASSERT(nat_bits);
1675
1676 full_nat_bits = nat_bits + 8;
1677 empty_nat_bits = full_nat_bits + nat_bits_bytes;
1678
1679 blkaddr = get_sb(segment0_blkaddr) + (sbi->cur_cp <<
1680 get_sb(log_blocks_per_seg)) - nat_bits_blocks;
1681
1682 for (i = 0; i < nat_bits_blocks; i++) {
1683 if (dev_read_block(nat_bits + i * F2FS_BLKSIZE, blkaddr + i))
1684 ASSERT_MSG("\tError: read NAT bits to disk!!!\n");
1685 }
1686
1687 if (*(__le64 *)nat_bits != get_cp_crc(cp) || nats_in_cursum(journal)) {
1688 /*
1689 * if there is a journal, f2fs was not shutdown cleanly. Let's
1690 * flush them with nat_bits.
1691 */
1692 if (c.fix_on)
1693 err = -1;
1694 /* Otherwise, kernel will disable nat_bits */
1695 goto out;
1696 }
1697
1698 for (i = 0; i < nat_blocks; i++) {
1699 uint32_t start_nid = i * NAT_ENTRY_PER_BLOCK;
1700 uint32_t valid = 0;
1701 int empty = test_bit_le(i, empty_nat_bits);
1702 int full = test_bit_le(i, full_nat_bits);
1703
1704 for (j = 0; j < NAT_ENTRY_PER_BLOCK; j++) {
1705 if (f2fs_test_bit(start_nid + j, nm_i->nid_bitmap))
1706 valid++;
1707 }
1708 if (valid == 0) {
1709 if (!empty || full) {
1710 err = -1;
1711 goto out;
1712 }
1713 } else if (valid == NAT_ENTRY_PER_BLOCK) {
1714 if (empty || !full) {
1715 err = -1;
1716 goto out;
1717 }
1718 } else {
1719 if (empty || full) {
1720 err = -1;
1721 goto out;
1722 }
1723 }
1724 }
1725 out:
1726 free(nat_bits);
1727 if (!err) {
1728 MSG(0, "Info: Checked valid nat_bits in checkpoint\n");
1729 } else {
1730 c.bug_nat_bits = 1;
1731 MSG(0, "Info: Corrupted valid nat_bits in checkpoint\n");
1732 }
1733 return err;
1734 }
1735
init_node_manager(struct f2fs_sb_info * sbi)1736 int init_node_manager(struct f2fs_sb_info *sbi)
1737 {
1738 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1739 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1740 struct f2fs_nm_info *nm_i = NM_I(sbi);
1741 unsigned char *version_bitmap;
1742 unsigned int nat_segs;
1743
1744 nm_i->nat_blkaddr = get_sb(nat_blkaddr);
1745
1746 /* segment_count_nat includes pair segment so divide to 2. */
1747 nat_segs = get_sb(segment_count_nat) >> 1;
1748 nm_i->nat_blocks = nat_segs << get_sb(log_blocks_per_seg);
1749 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
1750 nm_i->fcnt = 0;
1751 nm_i->nat_cnt = 0;
1752 nm_i->init_scan_nid = get_cp(next_free_nid);
1753 nm_i->next_scan_nid = get_cp(next_free_nid);
1754
1755 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
1756
1757 nm_i->nat_bitmap = malloc(nm_i->bitmap_size);
1758 if (!nm_i->nat_bitmap)
1759 return -ENOMEM;
1760 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
1761 if (!version_bitmap)
1762 return -EFAULT;
1763
1764 /* copy version bitmap */
1765 memcpy(nm_i->nat_bitmap, version_bitmap, nm_i->bitmap_size);
1766 return f2fs_early_init_nid_bitmap(sbi);
1767 }
1768
build_node_manager(struct f2fs_sb_info * sbi)1769 int build_node_manager(struct f2fs_sb_info *sbi)
1770 {
1771 int err;
1772 sbi->nm_info = malloc(sizeof(struct f2fs_nm_info));
1773 if (!sbi->nm_info)
1774 return -ENOMEM;
1775
1776 err = init_node_manager(sbi);
1777 if (err)
1778 return err;
1779
1780 return 0;
1781 }
1782
build_sit_info(struct f2fs_sb_info * sbi)1783 int build_sit_info(struct f2fs_sb_info *sbi)
1784 {
1785 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
1786 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1787 struct sit_info *sit_i;
1788 unsigned int sit_segs;
1789 int start;
1790 char *src_bitmap, *dst_bitmap;
1791 unsigned char *bitmap;
1792 unsigned int bitmap_size;
1793
1794 sit_i = malloc(sizeof(struct sit_info));
1795 if (!sit_i) {
1796 MSG(1, "\tError: Malloc failed for build_sit_info!\n");
1797 return -ENOMEM;
1798 }
1799
1800 SM_I(sbi)->sit_info = sit_i;
1801
1802 sit_i->sentries = calloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry), 1);
1803 if (!sit_i->sentries) {
1804 MSG(1, "\tError: Calloc failed for build_sit_info!\n");
1805 goto free_sit_info;
1806 }
1807
1808 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE;
1809
1810 if (need_fsync_data_record(sbi))
1811 bitmap_size += bitmap_size;
1812
1813 sit_i->bitmap = calloc(bitmap_size, 1);
1814 if (!sit_i->bitmap) {
1815 MSG(1, "\tError: Calloc failed for build_sit_info!!\n");
1816 goto free_sentries;
1817 }
1818
1819 bitmap = sit_i->bitmap;
1820
1821 for (start = 0; start < MAIN_SEGS(sbi); start++) {
1822 sit_i->sentries[start].cur_valid_map = bitmap;
1823 bitmap += SIT_VBLOCK_MAP_SIZE;
1824
1825 if (need_fsync_data_record(sbi)) {
1826 sit_i->sentries[start].ckpt_valid_map = bitmap;
1827 bitmap += SIT_VBLOCK_MAP_SIZE;
1828 }
1829 }
1830
1831 sit_segs = get_sb(segment_count_sit) >> 1;
1832 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1833 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1834
1835 dst_bitmap = malloc(bitmap_size);
1836 if (!dst_bitmap) {
1837 MSG(1, "\tError: Malloc failed for build_sit_info!!\n");
1838 goto free_validity_maps;
1839 }
1840
1841 memcpy(dst_bitmap, src_bitmap, bitmap_size);
1842
1843 sit_i->sit_base_addr = get_sb(sit_blkaddr);
1844 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1845 sit_i->written_valid_blocks = get_cp(valid_block_count);
1846 sit_i->sit_bitmap = dst_bitmap;
1847 sit_i->bitmap_size = bitmap_size;
1848 sit_i->dirty_sentries = 0;
1849 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1850 sit_i->elapsed_time = get_cp(elapsed_time);
1851 return 0;
1852
1853 free_validity_maps:
1854 free(sit_i->bitmap);
1855 free_sentries:
1856 free(sit_i->sentries);
1857 free_sit_info:
1858 free(sit_i);
1859
1860 return -ENOMEM;
1861 }
1862
reset_curseg(struct f2fs_sb_info * sbi,int type)1863 void reset_curseg(struct f2fs_sb_info *sbi, int type)
1864 {
1865 struct curseg_info *curseg = CURSEG_I(sbi, type);
1866 struct summary_footer *sum_footer;
1867 struct seg_entry *se;
1868
1869 sum_footer = &(curseg->sum_blk->footer);
1870 memset(sum_footer, 0, sizeof(struct summary_footer));
1871 if (IS_DATASEG(type))
1872 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
1873 if (IS_NODESEG(type))
1874 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
1875 se = get_seg_entry(sbi, curseg->segno);
1876 se->type = type;
1877 se->dirty = 1;
1878 }
1879
read_compacted_summaries(struct f2fs_sb_info * sbi)1880 static void read_compacted_summaries(struct f2fs_sb_info *sbi)
1881 {
1882 struct curseg_info *curseg;
1883 unsigned int i, j, offset;
1884 block_t start;
1885 char *kaddr;
1886 int ret;
1887
1888 start = start_sum_block(sbi);
1889
1890 kaddr = malloc(F2FS_BLKSIZE);
1891 ASSERT(kaddr);
1892
1893 ret = dev_read_block(kaddr, start++);
1894 ASSERT(ret >= 0);
1895
1896 curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1897 memcpy(&curseg->sum_blk->journal.n_nats, kaddr, SUM_JOURNAL_SIZE);
1898
1899 curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1900 memcpy(&curseg->sum_blk->journal.n_sits, kaddr + SUM_JOURNAL_SIZE,
1901 SUM_JOURNAL_SIZE);
1902
1903 offset = 2 * SUM_JOURNAL_SIZE;
1904 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1905 unsigned short blk_off;
1906 struct curseg_info *curseg = CURSEG_I(sbi, i);
1907
1908 reset_curseg(sbi, i);
1909
1910 if (curseg->alloc_type == SSR)
1911 blk_off = sbi->blocks_per_seg;
1912 else
1913 blk_off = curseg->next_blkoff;
1914
1915 ASSERT(blk_off <= ENTRIES_IN_SUM);
1916
1917 for (j = 0; j < blk_off; j++) {
1918 struct f2fs_summary *s;
1919 s = (struct f2fs_summary *)(kaddr + offset);
1920 curseg->sum_blk->entries[j] = *s;
1921 offset += SUMMARY_SIZE;
1922 if (offset + SUMMARY_SIZE <=
1923 F2FS_BLKSIZE - SUM_FOOTER_SIZE)
1924 continue;
1925 memset(kaddr, 0, F2FS_BLKSIZE);
1926 ret = dev_read_block(kaddr, start++);
1927 ASSERT(ret >= 0);
1928 offset = 0;
1929 }
1930 }
1931 free(kaddr);
1932 }
1933
restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum_blk)1934 static void restore_node_summary(struct f2fs_sb_info *sbi,
1935 unsigned int segno, struct f2fs_summary_block *sum_blk)
1936 {
1937 struct f2fs_node *node_blk;
1938 struct f2fs_summary *sum_entry;
1939 block_t addr;
1940 unsigned int i;
1941 int ret;
1942
1943 node_blk = malloc(F2FS_BLKSIZE);
1944 ASSERT(node_blk);
1945
1946 /* scan the node segment */
1947 addr = START_BLOCK(sbi, segno);
1948 sum_entry = &sum_blk->entries[0];
1949
1950 for (i = 0; i < sbi->blocks_per_seg; i++, sum_entry++) {
1951 ret = dev_read_block(node_blk, addr);
1952 ASSERT(ret >= 0);
1953 sum_entry->nid = node_blk->footer.nid;
1954 addr++;
1955 }
1956 free(node_blk);
1957 }
1958
read_normal_summaries(struct f2fs_sb_info * sbi,int type)1959 static void read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1960 {
1961 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
1962 struct f2fs_summary_block *sum_blk;
1963 struct curseg_info *curseg;
1964 unsigned int segno = 0;
1965 block_t blk_addr = 0;
1966 int ret;
1967
1968 if (IS_DATASEG(type)) {
1969 segno = get_cp(cur_data_segno[type]);
1970 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1971 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1972 else
1973 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1974 } else {
1975 segno = get_cp(cur_node_segno[type - CURSEG_HOT_NODE]);
1976 if (is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1977 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1978 type - CURSEG_HOT_NODE);
1979 else
1980 blk_addr = GET_SUM_BLKADDR(sbi, segno);
1981 }
1982
1983 sum_blk = malloc(sizeof(*sum_blk));
1984 ASSERT(sum_blk);
1985
1986 ret = dev_read_block(sum_blk, blk_addr);
1987 ASSERT(ret >= 0);
1988
1989 if (IS_NODESEG(type) && !is_set_ckpt_flags(cp, CP_UMOUNT_FLAG))
1990 restore_node_summary(sbi, segno, sum_blk);
1991
1992 curseg = CURSEG_I(sbi, type);
1993 memcpy(curseg->sum_blk, sum_blk, sizeof(*sum_blk));
1994 reset_curseg(sbi, type);
1995 free(sum_blk);
1996 }
1997
update_sum_entry(struct f2fs_sb_info * sbi,block_t blk_addr,struct f2fs_summary * sum)1998 void update_sum_entry(struct f2fs_sb_info *sbi, block_t blk_addr,
1999 struct f2fs_summary *sum)
2000 {
2001 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2002 struct f2fs_summary_block *sum_blk;
2003 u32 segno, offset;
2004 int type, ret;
2005 struct seg_entry *se;
2006
2007 if (get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))
2008 return;
2009
2010 segno = GET_SEGNO(sbi, blk_addr);
2011 offset = OFFSET_IN_SEG(sbi, blk_addr);
2012
2013 se = get_seg_entry(sbi, segno);
2014
2015 sum_blk = get_sum_block(sbi, segno, &type);
2016 memcpy(&sum_blk->entries[offset], sum, sizeof(*sum));
2017 sum_blk->footer.entry_type = IS_NODESEG(se->type) ? SUM_TYPE_NODE :
2018 SUM_TYPE_DATA;
2019
2020 /* write SSA all the time */
2021 ret = dev_write_block(sum_blk, GET_SUM_BLKADDR(sbi, segno));
2022 ASSERT(ret >= 0);
2023
2024 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2025 type == SEG_TYPE_MAX)
2026 free(sum_blk);
2027 }
2028
restore_curseg_summaries(struct f2fs_sb_info * sbi)2029 static void restore_curseg_summaries(struct f2fs_sb_info *sbi)
2030 {
2031 int type = CURSEG_HOT_DATA;
2032
2033 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
2034 read_compacted_summaries(sbi);
2035 type = CURSEG_HOT_NODE;
2036 }
2037
2038 for (; type <= CURSEG_COLD_NODE; type++)
2039 read_normal_summaries(sbi, type);
2040 }
2041
build_curseg(struct f2fs_sb_info * sbi)2042 static int build_curseg(struct f2fs_sb_info *sbi)
2043 {
2044 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2045 struct curseg_info *array;
2046 unsigned short blk_off;
2047 unsigned int segno;
2048 int i;
2049
2050 array = malloc(sizeof(*array) * NR_CURSEG_TYPE);
2051 if (!array) {
2052 MSG(1, "\tError: Malloc failed for build_curseg!\n");
2053 return -ENOMEM;
2054 }
2055
2056 SM_I(sbi)->curseg_array = array;
2057
2058 for (i = 0; i < NR_CURSEG_TYPE; i++) {
2059 array[i].sum_blk = calloc(sizeof(*(array[i].sum_blk)), 1);
2060 if (!array[i].sum_blk) {
2061 MSG(1, "\tError: Calloc failed for build_curseg!!\n");
2062 goto seg_cleanup;
2063 }
2064
2065 if (i <= CURSEG_COLD_DATA) {
2066 blk_off = get_cp(cur_data_blkoff[i]);
2067 segno = get_cp(cur_data_segno[i]);
2068 }
2069 if (i > CURSEG_COLD_DATA) {
2070 blk_off = get_cp(cur_node_blkoff[i - CURSEG_HOT_NODE]);
2071 segno = get_cp(cur_node_segno[i - CURSEG_HOT_NODE]);
2072 }
2073 ASSERT(segno < MAIN_SEGS(sbi));
2074 ASSERT(blk_off < DEFAULT_BLOCKS_PER_SEGMENT);
2075
2076 array[i].segno = segno;
2077 array[i].zone = GET_ZONENO_FROM_SEGNO(sbi, segno);
2078 array[i].next_segno = NULL_SEGNO;
2079 array[i].next_blkoff = blk_off;
2080 array[i].alloc_type = cp->alloc_type[i];
2081 }
2082 restore_curseg_summaries(sbi);
2083 return 0;
2084
2085 seg_cleanup:
2086 for(--i ; i >=0; --i)
2087 free(array[i].sum_blk);
2088 free(array);
2089
2090 return -ENOMEM;
2091 }
2092
check_seg_range(struct f2fs_sb_info * sbi,unsigned int segno)2093 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
2094 {
2095 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
2096 ASSERT(segno <= end_segno);
2097 }
2098
current_sit_addr(struct f2fs_sb_info * sbi,unsigned int segno)2099 static inline block_t current_sit_addr(struct f2fs_sb_info *sbi,
2100 unsigned int segno)
2101 {
2102 struct sit_info *sit_i = SIT_I(sbi);
2103 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, segno);
2104 block_t blk_addr = sit_i->sit_base_addr + offset;
2105
2106 check_seg_range(sbi, segno);
2107
2108 /* calculate sit block address */
2109 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
2110 blk_addr += sit_i->sit_blocks;
2111
2112 return blk_addr;
2113 }
2114
get_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_block * sit_blk)2115 void get_current_sit_page(struct f2fs_sb_info *sbi,
2116 unsigned int segno, struct f2fs_sit_block *sit_blk)
2117 {
2118 block_t blk_addr = current_sit_addr(sbi, segno);
2119
2120 ASSERT(dev_read_block(sit_blk, blk_addr) >= 0);
2121 }
2122
rewrite_current_sit_page(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_block * sit_blk)2123 void rewrite_current_sit_page(struct f2fs_sb_info *sbi,
2124 unsigned int segno, struct f2fs_sit_block *sit_blk)
2125 {
2126 block_t blk_addr = current_sit_addr(sbi, segno);
2127
2128 ASSERT(dev_write_block(sit_blk, blk_addr) >= 0);
2129 }
2130
check_block_count(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_sit_entry * raw_sit)2131 void check_block_count(struct f2fs_sb_info *sbi,
2132 unsigned int segno, struct f2fs_sit_entry *raw_sit)
2133 {
2134 struct f2fs_sm_info *sm_info = SM_I(sbi);
2135 unsigned int end_segno = sm_info->segment_count - 1;
2136 int valid_blocks = 0;
2137 unsigned int i;
2138
2139 /* check segment usage */
2140 if (GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg)
2141 ASSERT_MSG("Invalid SIT vblocks: segno=0x%x, %u",
2142 segno, GET_SIT_VBLOCKS(raw_sit));
2143
2144 /* check boundary of a given segment number */
2145 if (segno > end_segno)
2146 ASSERT_MSG("Invalid SEGNO: 0x%x", segno);
2147
2148 /* check bitmap with valid block count */
2149 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2150 valid_blocks += get_bits_in_byte(raw_sit->valid_map[i]);
2151
2152 if (GET_SIT_VBLOCKS(raw_sit) != valid_blocks)
2153 ASSERT_MSG("Wrong SIT valid blocks: segno=0x%x, %u vs. %u",
2154 segno, GET_SIT_VBLOCKS(raw_sit), valid_blocks);
2155
2156 if (GET_SIT_TYPE(raw_sit) >= NO_CHECK_TYPE)
2157 ASSERT_MSG("Wrong SIT type: segno=0x%x, %u",
2158 segno, GET_SIT_TYPE(raw_sit));
2159 }
2160
__seg_info_from_raw_sit(struct seg_entry * se,struct f2fs_sit_entry * raw_sit)2161 void __seg_info_from_raw_sit(struct seg_entry *se,
2162 struct f2fs_sit_entry *raw_sit)
2163 {
2164 se->valid_blocks = GET_SIT_VBLOCKS(raw_sit);
2165 memcpy(se->cur_valid_map, raw_sit->valid_map, SIT_VBLOCK_MAP_SIZE);
2166 se->type = GET_SIT_TYPE(raw_sit);
2167 se->orig_type = GET_SIT_TYPE(raw_sit);
2168 se->mtime = le64_to_cpu(raw_sit->mtime);
2169 }
2170
seg_info_from_raw_sit(struct f2fs_sb_info * sbi,struct seg_entry * se,struct f2fs_sit_entry * raw_sit)2171 void seg_info_from_raw_sit(struct f2fs_sb_info *sbi, struct seg_entry *se,
2172 struct f2fs_sit_entry *raw_sit)
2173 {
2174 __seg_info_from_raw_sit(se, raw_sit);
2175
2176 if (!need_fsync_data_record(sbi))
2177 return;
2178 se->ckpt_valid_blocks = se->valid_blocks;
2179 memcpy(se->ckpt_valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2180 se->ckpt_type = se->type;
2181 }
2182
get_seg_entry(struct f2fs_sb_info * sbi,unsigned int segno)2183 struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
2184 unsigned int segno)
2185 {
2186 struct sit_info *sit_i = SIT_I(sbi);
2187 return &sit_i->sentries[segno];
2188 }
2189
get_seg_vblocks(struct f2fs_sb_info * sbi,struct seg_entry * se)2190 unsigned short get_seg_vblocks(struct f2fs_sb_info *sbi, struct seg_entry *se)
2191 {
2192 if (!need_fsync_data_record(sbi))
2193 return se->valid_blocks;
2194 else
2195 return se->ckpt_valid_blocks;
2196 }
2197
get_seg_bitmap(struct f2fs_sb_info * sbi,struct seg_entry * se)2198 unsigned char *get_seg_bitmap(struct f2fs_sb_info *sbi, struct seg_entry *se)
2199 {
2200 if (!need_fsync_data_record(sbi))
2201 return se->cur_valid_map;
2202 else
2203 return se->ckpt_valid_map;
2204 }
2205
get_seg_type(struct f2fs_sb_info * sbi,struct seg_entry * se)2206 unsigned char get_seg_type(struct f2fs_sb_info *sbi, struct seg_entry *se)
2207 {
2208 if (!need_fsync_data_record(sbi))
2209 return se->type;
2210 else
2211 return se->ckpt_type;
2212 }
2213
get_sum_block(struct f2fs_sb_info * sbi,unsigned int segno,int * ret_type)2214 struct f2fs_summary_block *get_sum_block(struct f2fs_sb_info *sbi,
2215 unsigned int segno, int *ret_type)
2216 {
2217 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2218 struct f2fs_summary_block *sum_blk;
2219 struct curseg_info *curseg;
2220 int type, ret;
2221 u64 ssa_blk;
2222
2223 *ret_type= SEG_TYPE_MAX;
2224
2225 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
2226 for (type = 0; type < NR_CURSEG_NODE_TYPE; type++) {
2227 if (segno == get_cp(cur_node_segno[type])) {
2228 curseg = CURSEG_I(sbi, CURSEG_HOT_NODE + type);
2229 if (!IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2230 ASSERT_MSG("segno [0x%x] indicates a data "
2231 "segment, but should be node",
2232 segno);
2233 *ret_type = -SEG_TYPE_CUR_NODE;
2234 } else {
2235 *ret_type = SEG_TYPE_CUR_NODE;
2236 }
2237 return curseg->sum_blk;
2238 }
2239 }
2240
2241 for (type = 0; type < NR_CURSEG_DATA_TYPE; type++) {
2242 if (segno == get_cp(cur_data_segno[type])) {
2243 curseg = CURSEG_I(sbi, type);
2244 if (IS_SUM_NODE_SEG(curseg->sum_blk->footer)) {
2245 ASSERT_MSG("segno [0x%x] indicates a node "
2246 "segment, but should be data",
2247 segno);
2248 *ret_type = -SEG_TYPE_CUR_DATA;
2249 } else {
2250 *ret_type = SEG_TYPE_CUR_DATA;
2251 }
2252 return curseg->sum_blk;
2253 }
2254 }
2255
2256 sum_blk = calloc(BLOCK_SZ, 1);
2257 ASSERT(sum_blk);
2258
2259 ret = dev_read_block(sum_blk, ssa_blk);
2260 ASSERT(ret >= 0);
2261
2262 if (IS_SUM_NODE_SEG(sum_blk->footer))
2263 *ret_type = SEG_TYPE_NODE;
2264 else if (IS_SUM_DATA_SEG(sum_blk->footer))
2265 *ret_type = SEG_TYPE_DATA;
2266
2267 return sum_blk;
2268 }
2269
get_sum_entry(struct f2fs_sb_info * sbi,u32 blk_addr,struct f2fs_summary * sum_entry)2270 int get_sum_entry(struct f2fs_sb_info *sbi, u32 blk_addr,
2271 struct f2fs_summary *sum_entry)
2272 {
2273 struct f2fs_summary_block *sum_blk;
2274 u32 segno, offset;
2275 int type;
2276
2277 segno = GET_SEGNO(sbi, blk_addr);
2278 offset = OFFSET_IN_SEG(sbi, blk_addr);
2279
2280 sum_blk = get_sum_block(sbi, segno, &type);
2281 memcpy(sum_entry, &(sum_blk->entries[offset]),
2282 sizeof(struct f2fs_summary));
2283 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
2284 type == SEG_TYPE_MAX)
2285 free(sum_blk);
2286 return type;
2287 }
2288
get_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * raw_nat)2289 static void get_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
2290 struct f2fs_nat_entry *raw_nat)
2291 {
2292 struct f2fs_nat_block *nat_block;
2293 pgoff_t block_addr;
2294 int entry_off;
2295 int ret;
2296
2297 if (lookup_nat_in_journal(sbi, nid, raw_nat) >= 0)
2298 return;
2299
2300 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2301 ASSERT(nat_block);
2302
2303 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2304 block_addr = current_nat_addr(sbi, nid, NULL);
2305
2306 ret = dev_read_block(nat_block, block_addr);
2307 ASSERT(ret >= 0);
2308
2309 memcpy(raw_nat, &nat_block->entries[entry_off],
2310 sizeof(struct f2fs_nat_entry));
2311 free(nat_block);
2312 }
2313
update_data_blkaddr(struct f2fs_sb_info * sbi,nid_t nid,u16 ofs_in_node,block_t newaddr)2314 void update_data_blkaddr(struct f2fs_sb_info *sbi, nid_t nid,
2315 u16 ofs_in_node, block_t newaddr)
2316 {
2317 struct f2fs_node *node_blk = NULL;
2318 struct node_info ni;
2319 block_t oldaddr, startaddr, endaddr;
2320 int ret;
2321
2322 node_blk = (struct f2fs_node *)calloc(BLOCK_SZ, 1);
2323 ASSERT(node_blk);
2324
2325 get_node_info(sbi, nid, &ni);
2326
2327 /* read node_block */
2328 ret = dev_read_block(node_blk, ni.blk_addr);
2329 ASSERT(ret >= 0);
2330
2331 /* check its block address */
2332 if (node_blk->footer.nid == node_blk->footer.ino) {
2333 int ofs = get_extra_isize(node_blk);
2334
2335 oldaddr = le32_to_cpu(node_blk->i.i_addr[ofs + ofs_in_node]);
2336 node_blk->i.i_addr[ofs + ofs_in_node] = cpu_to_le32(newaddr);
2337 ret = write_inode(node_blk, ni.blk_addr);
2338 ASSERT(ret >= 0);
2339 } else {
2340 oldaddr = le32_to_cpu(node_blk->dn.addr[ofs_in_node]);
2341 node_blk->dn.addr[ofs_in_node] = cpu_to_le32(newaddr);
2342 ret = dev_write_block(node_blk, ni.blk_addr);
2343 ASSERT(ret >= 0);
2344 }
2345
2346 /* check extent cache entry */
2347 if (node_blk->footer.nid != node_blk->footer.ino) {
2348 get_node_info(sbi, le32_to_cpu(node_blk->footer.ino), &ni);
2349
2350 /* read inode block */
2351 ret = dev_read_block(node_blk, ni.blk_addr);
2352 ASSERT(ret >= 0);
2353 }
2354
2355 startaddr = le32_to_cpu(node_blk->i.i_ext.blk_addr);
2356 endaddr = startaddr + le32_to_cpu(node_blk->i.i_ext.len);
2357 if (oldaddr >= startaddr && oldaddr < endaddr) {
2358 node_blk->i.i_ext.len = 0;
2359
2360 /* update inode block */
2361 ASSERT(write_inode(node_blk, ni.blk_addr) >= 0);
2362 }
2363 free(node_blk);
2364 }
2365
update_nat_blkaddr(struct f2fs_sb_info * sbi,nid_t ino,nid_t nid,block_t newaddr)2366 void update_nat_blkaddr(struct f2fs_sb_info *sbi, nid_t ino,
2367 nid_t nid, block_t newaddr)
2368 {
2369 struct f2fs_nat_block *nat_block;
2370 pgoff_t block_addr;
2371 int entry_off;
2372 int ret;
2373
2374 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2375 ASSERT(nat_block);
2376
2377 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2378 block_addr = current_nat_addr(sbi, nid, NULL);
2379
2380 ret = dev_read_block(nat_block, block_addr);
2381 ASSERT(ret >= 0);
2382
2383 if (ino)
2384 nat_block->entries[entry_off].ino = cpu_to_le32(ino);
2385 nat_block->entries[entry_off].block_addr = cpu_to_le32(newaddr);
2386 if (c.func == FSCK)
2387 F2FS_FSCK(sbi)->entries[nid] = nat_block->entries[entry_off];
2388
2389 ret = dev_write_block(nat_block, block_addr);
2390 ASSERT(ret >= 0);
2391 free(nat_block);
2392 }
2393
get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni)2394 void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
2395 {
2396 struct f2fs_nat_entry raw_nat;
2397
2398 ni->nid = nid;
2399 if (c.func == FSCK && F2FS_FSCK(sbi)->nr_nat_entries) {
2400 node_info_from_raw_nat(ni, &(F2FS_FSCK(sbi)->entries[nid]));
2401 if (ni->blk_addr)
2402 return;
2403 /* nat entry is not cached, read it */
2404 }
2405
2406 get_nat_entry(sbi, nid, &raw_nat);
2407 node_info_from_raw_nat(ni, &raw_nat);
2408 }
2409
build_sit_entries(struct f2fs_sb_info * sbi)2410 static int build_sit_entries(struct f2fs_sb_info *sbi)
2411 {
2412 struct sit_info *sit_i = SIT_I(sbi);
2413 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2414 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2415 struct f2fs_sit_block *sit_blk;
2416 struct seg_entry *se;
2417 struct f2fs_sit_entry sit;
2418 int sit_blk_cnt = SIT_BLK_CNT(sbi);
2419 unsigned int i, segno, end;
2420 unsigned int readed, start_blk = 0;
2421
2422 sit_blk = calloc(BLOCK_SZ, 1);
2423 if (!sit_blk) {
2424 MSG(1, "\tError: Calloc failed for build_sit_entries!\n");
2425 return -ENOMEM;
2426 }
2427
2428 do {
2429 readed = f2fs_ra_meta_pages(sbi, start_blk, MAX_RA_BLOCKS,
2430 META_SIT);
2431
2432 segno = start_blk * sit_i->sents_per_block;
2433 end = (start_blk + readed) * sit_i->sents_per_block;
2434
2435 for (; segno < end && segno < MAIN_SEGS(sbi); segno++) {
2436 se = &sit_i->sentries[segno];
2437
2438 get_current_sit_page(sbi, segno, sit_blk);
2439 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2440
2441 check_block_count(sbi, segno, &sit);
2442 seg_info_from_raw_sit(sbi, se, &sit);
2443 }
2444 start_blk += readed;
2445 } while (start_blk < sit_blk_cnt);
2446
2447
2448 free(sit_blk);
2449
2450 if (sits_in_cursum(journal) > SIT_JOURNAL_ENTRIES) {
2451 MSG(0, "\tError: build_sit_entries truncate n_sits(%u) to "
2452 "SIT_JOURNAL_ENTRIES(%zu)\n",
2453 sits_in_cursum(journal), SIT_JOURNAL_ENTRIES);
2454 journal->n_sits = cpu_to_le16(SIT_JOURNAL_ENTRIES);
2455 c.fix_on = 1;
2456 }
2457
2458 for (i = 0; i < sits_in_cursum(journal); i++) {
2459 segno = le32_to_cpu(segno_in_journal(journal, i));
2460
2461 if (segno >= MAIN_SEGS(sbi)) {
2462 MSG(0, "\tError: build_sit_entries: segno(%u) is invalid!!!\n", segno);
2463 journal->n_sits = cpu_to_le16(i);
2464 c.fix_on = 1;
2465 continue;
2466 }
2467
2468 se = &sit_i->sentries[segno];
2469 sit = sit_in_journal(journal, i);
2470
2471 check_block_count(sbi, segno, &sit);
2472 seg_info_from_raw_sit(sbi, se, &sit);
2473 }
2474 return 0;
2475 }
2476
early_build_segment_manager(struct f2fs_sb_info * sbi)2477 static int early_build_segment_manager(struct f2fs_sb_info *sbi)
2478 {
2479 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2480 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2481 struct f2fs_sm_info *sm_info;
2482
2483 sm_info = malloc(sizeof(struct f2fs_sm_info));
2484 if (!sm_info) {
2485 MSG(1, "\tError: Malloc failed for build_segment_manager!\n");
2486 return -ENOMEM;
2487 }
2488
2489 /* init sm info */
2490 sbi->sm_info = sm_info;
2491 sm_info->seg0_blkaddr = get_sb(segment0_blkaddr);
2492 sm_info->main_blkaddr = get_sb(main_blkaddr);
2493 sm_info->segment_count = get_sb(segment_count);
2494 sm_info->reserved_segments = get_cp(rsvd_segment_count);
2495 sm_info->ovp_segments = get_cp(overprov_segment_count);
2496 sm_info->main_segments = get_sb(segment_count_main);
2497 sm_info->ssa_blkaddr = get_sb(ssa_blkaddr);
2498
2499 if (build_sit_info(sbi) || build_curseg(sbi)) {
2500 free(sm_info);
2501 return -ENOMEM;
2502 }
2503
2504 return 0;
2505 }
2506
late_build_segment_manager(struct f2fs_sb_info * sbi)2507 static int late_build_segment_manager(struct f2fs_sb_info *sbi)
2508 {
2509 if (sbi->seg_manager_done)
2510 return 1; /* this function was already called */
2511
2512 sbi->seg_manager_done = true;
2513 if (build_sit_entries(sbi)) {
2514 free (sbi->sm_info);
2515 return -ENOMEM;
2516 }
2517
2518 return 0;
2519 }
2520
build_sit_area_bitmap(struct f2fs_sb_info * sbi)2521 void build_sit_area_bitmap(struct f2fs_sb_info *sbi)
2522 {
2523 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2524 struct f2fs_sm_info *sm_i = SM_I(sbi);
2525 unsigned int segno = 0;
2526 char *ptr = NULL;
2527 u32 sum_vblocks = 0;
2528 u32 free_segs = 0;
2529 struct seg_entry *se;
2530
2531 fsck->sit_area_bitmap_sz = sm_i->main_segments * SIT_VBLOCK_MAP_SIZE;
2532 fsck->sit_area_bitmap = calloc(1, fsck->sit_area_bitmap_sz);
2533 ASSERT(fsck->sit_area_bitmap);
2534 ptr = fsck->sit_area_bitmap;
2535
2536 ASSERT(fsck->sit_area_bitmap_sz == fsck->main_area_bitmap_sz);
2537
2538 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2539 se = get_seg_entry(sbi, segno);
2540
2541 memcpy(ptr, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2542 ptr += SIT_VBLOCK_MAP_SIZE;
2543
2544 if (se->valid_blocks == 0x0 && is_usable_seg(sbi, segno)) {
2545 if (le32_to_cpu(sbi->ckpt->cur_node_segno[0]) == segno ||
2546 le32_to_cpu(sbi->ckpt->cur_data_segno[0]) == segno ||
2547 le32_to_cpu(sbi->ckpt->cur_node_segno[1]) == segno ||
2548 le32_to_cpu(sbi->ckpt->cur_data_segno[1]) == segno ||
2549 le32_to_cpu(sbi->ckpt->cur_node_segno[2]) == segno ||
2550 le32_to_cpu(sbi->ckpt->cur_data_segno[2]) == segno) {
2551 continue;
2552 } else {
2553 free_segs++;
2554 }
2555 } else {
2556 sum_vblocks += se->valid_blocks;
2557 }
2558 }
2559 fsck->chk.sit_valid_blocks = sum_vblocks;
2560 fsck->chk.sit_free_segs = free_segs;
2561
2562 DBG(1, "Blocks [0x%x : %d] Free Segs [0x%x : %d]\n\n",
2563 sum_vblocks, sum_vblocks,
2564 free_segs, free_segs);
2565 }
2566
rewrite_sit_area_bitmap(struct f2fs_sb_info * sbi)2567 void rewrite_sit_area_bitmap(struct f2fs_sb_info *sbi)
2568 {
2569 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2570 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2571 struct sit_info *sit_i = SIT_I(sbi);
2572 struct f2fs_sit_block *sit_blk;
2573 unsigned int segno = 0;
2574 struct f2fs_summary_block *sum = curseg->sum_blk;
2575 char *ptr = NULL;
2576
2577 sit_blk = calloc(BLOCK_SZ, 1);
2578 ASSERT(sit_blk);
2579 /* remove sit journal */
2580 sum->journal.n_sits = 0;
2581
2582 ptr = fsck->main_area_bitmap;
2583
2584 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2585 struct f2fs_sit_entry *sit;
2586 struct seg_entry *se;
2587 u16 valid_blocks = 0;
2588 u16 type;
2589 int i;
2590
2591 get_current_sit_page(sbi, segno, sit_blk);
2592 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2593 memcpy(sit->valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2594
2595 /* update valid block count */
2596 for (i = 0; i < SIT_VBLOCK_MAP_SIZE; i++)
2597 valid_blocks += get_bits_in_byte(sit->valid_map[i]);
2598
2599 se = get_seg_entry(sbi, segno);
2600 memcpy(se->cur_valid_map, ptr, SIT_VBLOCK_MAP_SIZE);
2601 se->valid_blocks = valid_blocks;
2602 type = se->type;
2603 if (type >= NO_CHECK_TYPE) {
2604 ASSERT_MSG("Invalid type and valid blocks=%x,%x",
2605 segno, valid_blocks);
2606 type = 0;
2607 }
2608 sit->vblocks = cpu_to_le16((type << SIT_VBLOCKS_SHIFT) |
2609 valid_blocks);
2610 rewrite_current_sit_page(sbi, segno, sit_blk);
2611
2612 ptr += SIT_VBLOCK_MAP_SIZE;
2613 }
2614
2615 free(sit_blk);
2616 }
2617
flush_sit_journal_entries(struct f2fs_sb_info * sbi)2618 static int flush_sit_journal_entries(struct f2fs_sb_info *sbi)
2619 {
2620 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
2621 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2622 struct sit_info *sit_i = SIT_I(sbi);
2623 struct f2fs_sit_block *sit_blk;
2624 unsigned int segno;
2625 int i;
2626
2627 sit_blk = calloc(BLOCK_SZ, 1);
2628 ASSERT(sit_blk);
2629 for (i = 0; i < sits_in_cursum(journal); i++) {
2630 struct f2fs_sit_entry *sit;
2631 struct seg_entry *se;
2632
2633 segno = segno_in_journal(journal, i);
2634 se = get_seg_entry(sbi, segno);
2635
2636 get_current_sit_page(sbi, segno, sit_blk);
2637 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2638
2639 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2640 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2641 se->valid_blocks);
2642 sit->mtime = cpu_to_le64(se->mtime);
2643
2644 rewrite_current_sit_page(sbi, segno, sit_blk);
2645 }
2646
2647 free(sit_blk);
2648 journal->n_sits = 0;
2649 return i;
2650 }
2651
flush_nat_journal_entries(struct f2fs_sb_info * sbi)2652 static int flush_nat_journal_entries(struct f2fs_sb_info *sbi)
2653 {
2654 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2655 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2656 struct f2fs_nat_block *nat_block;
2657 pgoff_t block_addr;
2658 int entry_off;
2659 nid_t nid;
2660 int ret;
2661 int i = 0;
2662
2663 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
2664 ASSERT(nat_block);
2665 next:
2666 if (i >= nats_in_cursum(journal)) {
2667 free(nat_block);
2668 journal->n_nats = 0;
2669 return i;
2670 }
2671
2672 nid = le32_to_cpu(nid_in_journal(journal, i));
2673
2674 entry_off = nid % NAT_ENTRY_PER_BLOCK;
2675 block_addr = current_nat_addr(sbi, nid, NULL);
2676
2677 ret = dev_read_block(nat_block, block_addr);
2678 ASSERT(ret >= 0);
2679
2680 memcpy(&nat_block->entries[entry_off], &nat_in_journal(journal, i),
2681 sizeof(struct f2fs_nat_entry));
2682
2683 ret = dev_write_block(nat_block, block_addr);
2684 ASSERT(ret >= 0);
2685 i++;
2686 goto next;
2687 }
2688
flush_journal_entries(struct f2fs_sb_info * sbi)2689 void flush_journal_entries(struct f2fs_sb_info *sbi)
2690 {
2691 int n_nats = flush_nat_journal_entries(sbi);
2692 int n_sits = flush_sit_journal_entries(sbi);
2693
2694 if (n_nats || n_sits)
2695 write_checkpoints(sbi);
2696 }
2697
flush_sit_entries(struct f2fs_sb_info * sbi)2698 void flush_sit_entries(struct f2fs_sb_info *sbi)
2699 {
2700 struct sit_info *sit_i = SIT_I(sbi);
2701 struct f2fs_sit_block *sit_blk;
2702 unsigned int segno = 0;
2703
2704 sit_blk = calloc(BLOCK_SZ, 1);
2705 ASSERT(sit_blk);
2706 /* update free segments */
2707 for (segno = 0; segno < MAIN_SEGS(sbi); segno++) {
2708 struct f2fs_sit_entry *sit;
2709 struct seg_entry *se;
2710
2711 se = get_seg_entry(sbi, segno);
2712
2713 if (!se->dirty)
2714 continue;
2715
2716 get_current_sit_page(sbi, segno, sit_blk);
2717 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, segno)];
2718 memcpy(sit->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2719 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2720 se->valid_blocks);
2721 rewrite_current_sit_page(sbi, segno, sit_blk);
2722 }
2723
2724 free(sit_blk);
2725 }
2726
relocate_curseg_offset(struct f2fs_sb_info * sbi,int type)2727 int relocate_curseg_offset(struct f2fs_sb_info *sbi, int type)
2728 {
2729 struct curseg_info *curseg = CURSEG_I(sbi, type);
2730 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
2731 unsigned int i;
2732
2733 if (c.zoned_model == F2FS_ZONED_HM)
2734 return -EINVAL;
2735
2736 for (i = 0; i < sbi->blocks_per_seg; i++) {
2737 if (!f2fs_test_bit(i, (const char *)se->cur_valid_map))
2738 break;
2739 }
2740
2741 if (i == sbi->blocks_per_seg)
2742 return -EINVAL;
2743
2744 DBG(1, "Update curseg[%d].next_blkoff %u -> %u, alloc_type %s -> SSR\n",
2745 type, curseg->next_blkoff, i,
2746 curseg->alloc_type == LFS ? "LFS" : "SSR");
2747
2748 curseg->next_blkoff = i;
2749 curseg->alloc_type = SSR;
2750
2751 return 0;
2752 }
2753
set_section_type(struct f2fs_sb_info * sbi,unsigned int segno,int type)2754 void set_section_type(struct f2fs_sb_info *sbi, unsigned int segno, int type)
2755 {
2756 unsigned int i;
2757
2758 if (sbi->segs_per_sec == 1)
2759 return;
2760
2761 for (i = 0; i < sbi->segs_per_sec; i++) {
2762 struct seg_entry *se = get_seg_entry(sbi, segno + i);
2763
2764 se->type = type;
2765 }
2766 }
2767
2768 #ifdef HAVE_LINUX_BLKZONED_H
2769
write_pointer_at_zone_start(struct f2fs_sb_info * sbi,unsigned int zone_segno)2770 static bool write_pointer_at_zone_start(struct f2fs_sb_info *sbi,
2771 unsigned int zone_segno)
2772 {
2773 uint64_t sector;
2774 struct blk_zone blkz;
2775 block_t block = START_BLOCK(sbi, zone_segno);
2776 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2777 int ret, j;
2778
2779 if (c.zoned_model != F2FS_ZONED_HM)
2780 return true;
2781
2782 for (j = 0; j < MAX_DEVICES; j++) {
2783 if (!c.devices[j].path)
2784 break;
2785 if (c.devices[j].start_blkaddr <= block &&
2786 block <= c.devices[j].end_blkaddr)
2787 break;
2788 }
2789
2790 if (j >= MAX_DEVICES)
2791 return false;
2792
2793 sector = (block - c.devices[j].start_blkaddr) << log_sectors_per_block;
2794 ret = f2fs_report_zone(j, sector, &blkz);
2795 if (ret)
2796 return false;
2797
2798 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2799 return true;
2800
2801 return blk_zone_sector(&blkz) == blk_zone_wp_sector(&blkz);
2802 }
2803
2804 #else
2805
write_pointer_at_zone_start(struct f2fs_sb_info * UNUSED (sbi),unsigned int UNUSED (zone_segno))2806 static bool write_pointer_at_zone_start(struct f2fs_sb_info *UNUSED(sbi),
2807 unsigned int UNUSED(zone_segno))
2808 {
2809 return true;
2810 }
2811
2812 #endif
2813
find_next_free_block(struct f2fs_sb_info * sbi,u64 * to,int left,int want_type,bool new_sec)2814 int find_next_free_block(struct f2fs_sb_info *sbi, u64 *to, int left,
2815 int want_type, bool new_sec)
2816 {
2817 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2818 struct seg_entry *se;
2819 u32 segno;
2820 u32 offset;
2821 int not_enough = 0;
2822 u64 end_blkaddr = (get_sb(segment_count_main) <<
2823 get_sb(log_blocks_per_seg)) + get_sb(main_blkaddr);
2824
2825 if (*to > 0)
2826 *to -= left;
2827 if (get_free_segments(sbi) <= SM_I(sbi)->reserved_segments + 1)
2828 not_enough = 1;
2829
2830 while (*to >= SM_I(sbi)->main_blkaddr && *to < end_blkaddr) {
2831 unsigned short vblocks;
2832 unsigned char *bitmap;
2833 unsigned char type;
2834
2835 segno = GET_SEGNO(sbi, *to);
2836 offset = OFFSET_IN_SEG(sbi, *to);
2837
2838 se = get_seg_entry(sbi, segno);
2839
2840 vblocks = get_seg_vblocks(sbi, se);
2841 bitmap = get_seg_bitmap(sbi, se);
2842 type = get_seg_type(sbi, se);
2843
2844 if (vblocks == sbi->blocks_per_seg) {
2845 next_segment:
2846 *to = left ? START_BLOCK(sbi, segno) - 1:
2847 START_BLOCK(sbi, segno + 1);
2848 continue;
2849 }
2850 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO)) &&
2851 IS_CUR_SEGNO(sbi, segno))
2852 goto next_segment;
2853 if (vblocks == 0 && not_enough)
2854 goto next_segment;
2855
2856 if (vblocks == 0 && !(segno % sbi->segs_per_sec)) {
2857 struct seg_entry *se2;
2858 unsigned int i;
2859
2860 for (i = 1; i < sbi->segs_per_sec; i++) {
2861 se2 = get_seg_entry(sbi, segno + i);
2862 if (get_seg_vblocks(sbi, se2))
2863 break;
2864 }
2865
2866 if (i == sbi->segs_per_sec &&
2867 write_pointer_at_zone_start(sbi, segno)) {
2868 set_section_type(sbi, segno, want_type);
2869 return 0;
2870 }
2871 }
2872
2873 if (type == want_type && !new_sec &&
2874 !f2fs_test_bit(offset, (const char *)bitmap))
2875 return 0;
2876
2877 *to = left ? *to - 1: *to + 1;
2878 }
2879 return -1;
2880 }
2881
move_one_curseg_info(struct f2fs_sb_info * sbi,u64 from,int left,int i)2882 static void move_one_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left,
2883 int i)
2884 {
2885 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2886 struct curseg_info *curseg = CURSEG_I(sbi, i);
2887 struct f2fs_summary_block buf;
2888 u32 old_segno;
2889 u64 ssa_blk, to;
2890 int ret;
2891
2892 if ((get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
2893 if (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
2894 return;
2895
2896 if (i == CURSEG_HOT_DATA) {
2897 left = 0;
2898 from = SM_I(sbi)->main_blkaddr;
2899 } else {
2900 left = 1;
2901 from = __end_block_addr(sbi);
2902 }
2903 goto bypass_ssa;
2904 }
2905
2906 /* update original SSA too */
2907 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2908 ret = dev_write_block(curseg->sum_blk, ssa_blk);
2909 ASSERT(ret >= 0);
2910 bypass_ssa:
2911 to = from;
2912 ret = find_next_free_block(sbi, &to, left, i,
2913 c.zoned_model == F2FS_ZONED_HM);
2914 ASSERT(ret == 0);
2915
2916 old_segno = curseg->segno;
2917 curseg->segno = GET_SEGNO(sbi, to);
2918 curseg->next_blkoff = OFFSET_IN_SEG(sbi, to);
2919 curseg->alloc_type = c.zoned_model == F2FS_ZONED_HM ? LFS : SSR;
2920
2921 /* update new segno */
2922 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
2923 ret = dev_read_block(&buf, ssa_blk);
2924 ASSERT(ret >= 0);
2925
2926 memcpy(curseg->sum_blk, &buf, SUM_ENTRIES_SIZE);
2927
2928 /* update se->types */
2929 reset_curseg(sbi, i);
2930
2931 FIX_MSG("Move curseg[%d] %x -> %x after %"PRIx64"\n",
2932 i, old_segno, curseg->segno, from);
2933 }
2934
move_curseg_info(struct f2fs_sb_info * sbi,u64 from,int left)2935 void move_curseg_info(struct f2fs_sb_info *sbi, u64 from, int left)
2936 {
2937 int i;
2938
2939 /* update summary blocks having nullified journal entries */
2940 for (i = 0; i < NO_CHECK_TYPE; i++)
2941 move_one_curseg_info(sbi, from, left, i);
2942 }
2943
update_curseg_info(struct f2fs_sb_info * sbi,int type)2944 void update_curseg_info(struct f2fs_sb_info *sbi, int type)
2945 {
2946 if (!relocate_curseg_offset(sbi, type))
2947 return;
2948 move_one_curseg_info(sbi, SM_I(sbi)->main_blkaddr, 0, type);
2949 }
2950
zero_journal_entries(struct f2fs_sb_info * sbi)2951 void zero_journal_entries(struct f2fs_sb_info *sbi)
2952 {
2953 int i;
2954
2955 for (i = 0; i < NO_CHECK_TYPE; i++)
2956 CURSEG_I(sbi, i)->sum_blk->journal.n_nats = 0;
2957 }
2958
write_curseg_info(struct f2fs_sb_info * sbi)2959 void write_curseg_info(struct f2fs_sb_info *sbi)
2960 {
2961 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2962 int i;
2963
2964 for (i = 0; i < NO_CHECK_TYPE; i++) {
2965 cp->alloc_type[i] = CURSEG_I(sbi, i)->alloc_type;
2966 if (i < CURSEG_HOT_NODE) {
2967 set_cp(cur_data_segno[i], CURSEG_I(sbi, i)->segno);
2968 set_cp(cur_data_blkoff[i],
2969 CURSEG_I(sbi, i)->next_blkoff);
2970 } else {
2971 int n = i - CURSEG_HOT_NODE;
2972
2973 set_cp(cur_node_segno[n], CURSEG_I(sbi, i)->segno);
2974 set_cp(cur_node_blkoff[n],
2975 CURSEG_I(sbi, i)->next_blkoff);
2976 }
2977 }
2978 }
2979
lookup_nat_in_journal(struct f2fs_sb_info * sbi,u32 nid,struct f2fs_nat_entry * raw_nat)2980 int lookup_nat_in_journal(struct f2fs_sb_info *sbi, u32 nid,
2981 struct f2fs_nat_entry *raw_nat)
2982 {
2983 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2984 struct f2fs_journal *journal = &curseg->sum_blk->journal;
2985 int i = 0;
2986
2987 for (i = 0; i < nats_in_cursum(journal); i++) {
2988 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
2989 memcpy(raw_nat, &nat_in_journal(journal, i),
2990 sizeof(struct f2fs_nat_entry));
2991 DBG(3, "==> Found nid [0x%x] in nat cache\n", nid);
2992 return i;
2993 }
2994 }
2995 return -1;
2996 }
2997
nullify_nat_entry(struct f2fs_sb_info * sbi,u32 nid)2998 void nullify_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
2999 {
3000 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3001 struct f2fs_journal *journal = &curseg->sum_blk->journal;
3002 struct f2fs_nat_block *nat_block;
3003 pgoff_t block_addr;
3004 int entry_off;
3005 int ret;
3006 int i = 0;
3007
3008 /* check in journal */
3009 for (i = 0; i < nats_in_cursum(journal); i++) {
3010 if (le32_to_cpu(nid_in_journal(journal, i)) == nid) {
3011 memset(&nat_in_journal(journal, i), 0,
3012 sizeof(struct f2fs_nat_entry));
3013 FIX_MSG("Remove nid [0x%x] in nat journal", nid);
3014 return;
3015 }
3016 }
3017 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
3018 ASSERT(nat_block);
3019
3020 entry_off = nid % NAT_ENTRY_PER_BLOCK;
3021 block_addr = current_nat_addr(sbi, nid, NULL);
3022
3023 ret = dev_read_block(nat_block, block_addr);
3024 ASSERT(ret >= 0);
3025
3026 if (nid == F2FS_NODE_INO(sbi) || nid == F2FS_META_INO(sbi)) {
3027 FIX_MSG("nid [0x%x] block_addr= 0x%x -> 0x1", nid,
3028 le32_to_cpu(nat_block->entries[entry_off].block_addr));
3029 nat_block->entries[entry_off].block_addr = cpu_to_le32(0x1);
3030 } else {
3031 memset(&nat_block->entries[entry_off], 0,
3032 sizeof(struct f2fs_nat_entry));
3033 FIX_MSG("Remove nid [0x%x] in NAT", nid);
3034 }
3035
3036 ret = dev_write_block(nat_block, block_addr);
3037 ASSERT(ret >= 0);
3038 free(nat_block);
3039 }
3040
duplicate_checkpoint(struct f2fs_sb_info * sbi)3041 void duplicate_checkpoint(struct f2fs_sb_info *sbi)
3042 {
3043 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3044 unsigned long long dst, src;
3045 void *buf;
3046 unsigned int seg_size = 1 << get_sb(log_blocks_per_seg);
3047 int ret;
3048
3049 if (sbi->cp_backuped)
3050 return;
3051
3052 buf = malloc(F2FS_BLKSIZE * seg_size);
3053 ASSERT(buf);
3054
3055 if (sbi->cur_cp == 1) {
3056 src = get_sb(cp_blkaddr);
3057 dst = src + seg_size;
3058 } else {
3059 dst = get_sb(cp_blkaddr);
3060 src = dst + seg_size;
3061 }
3062
3063 ret = dev_read(buf, src << F2FS_BLKSIZE_BITS,
3064 seg_size << F2FS_BLKSIZE_BITS);
3065 ASSERT(ret >= 0);
3066
3067 ret = dev_write(buf, dst << F2FS_BLKSIZE_BITS,
3068 seg_size << F2FS_BLKSIZE_BITS);
3069 ASSERT(ret >= 0);
3070
3071 free(buf);
3072
3073 ret = f2fs_fsync_device();
3074 ASSERT(ret >= 0);
3075
3076 sbi->cp_backuped = 1;
3077
3078 MSG(0, "Info: Duplicate valid checkpoint to mirror position "
3079 "%llu -> %llu\n", src, dst);
3080 }
3081
write_checkpoint(struct f2fs_sb_info * sbi)3082 void write_checkpoint(struct f2fs_sb_info *sbi)
3083 {
3084 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
3085 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3086 block_t orphan_blks = 0;
3087 unsigned long long cp_blk_no;
3088 u32 flags = CP_UMOUNT_FLAG;
3089 int i, ret;
3090 uint32_t crc = 0;
3091
3092 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
3093 orphan_blks = __start_sum_addr(sbi) - 1;
3094 flags |= CP_ORPHAN_PRESENT_FLAG;
3095 }
3096 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
3097 flags |= CP_TRIMMED_FLAG;
3098 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
3099 flags |= CP_DISABLED_FLAG;
3100 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
3101 flags |= CP_LARGE_NAT_BITMAP_FLAG;
3102 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
3103 } else {
3104 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
3105 }
3106
3107 set_cp(free_segment_count, get_free_segments(sbi));
3108 if (c.func == FSCK) {
3109 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3110
3111 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
3112 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
3113 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
3114 } else {
3115 set_cp(valid_block_count, sbi->total_valid_block_count);
3116 set_cp(valid_node_count, sbi->total_valid_node_count);
3117 set_cp(valid_inode_count, sbi->total_valid_inode_count);
3118 }
3119 set_cp(cp_pack_total_block_count, 8 + orphan_blks + get_sb(cp_payload));
3120
3121 flags = update_nat_bits_flags(sb, cp, flags);
3122 set_cp(ckpt_flags, flags);
3123
3124 crc = f2fs_checkpoint_chksum(cp);
3125 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
3126 cpu_to_le32(crc);
3127
3128 cp_blk_no = get_sb(cp_blkaddr);
3129 if (sbi->cur_cp == 2)
3130 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
3131
3132 /* write the first cp */
3133 ret = dev_write_block(cp, cp_blk_no++);
3134 ASSERT(ret >= 0);
3135
3136 /* skip payload */
3137 cp_blk_no += get_sb(cp_payload);
3138 /* skip orphan blocks */
3139 cp_blk_no += orphan_blks;
3140
3141 /* update summary blocks having nullified journal entries */
3142 for (i = 0; i < NO_CHECK_TYPE; i++) {
3143 struct curseg_info *curseg = CURSEG_I(sbi, i);
3144 u64 ssa_blk;
3145
3146 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
3147 ASSERT(ret >= 0);
3148
3149 if (!(get_sb(feature) & cpu_to_le32(F2FS_FEATURE_RO))) {
3150 /* update original SSA too */
3151 ssa_blk = GET_SUM_BLKADDR(sbi, curseg->segno);
3152 ret = dev_write_block(curseg->sum_blk, ssa_blk);
3153 ASSERT(ret >= 0);
3154 }
3155 }
3156
3157 /* Write nat bits */
3158 if (flags & CP_NAT_BITS_FLAG)
3159 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3160
3161 /* in case of sudden power off */
3162 ret = f2fs_fsync_device();
3163 ASSERT(ret >= 0);
3164
3165 /* write the last cp */
3166 ret = dev_write_block(cp, cp_blk_no++);
3167 ASSERT(ret >= 0);
3168
3169 ret = f2fs_fsync_device();
3170 ASSERT(ret >= 0);
3171 }
3172
write_checkpoints(struct f2fs_sb_info * sbi)3173 void write_checkpoints(struct f2fs_sb_info *sbi)
3174 {
3175 /* copy valid checkpoint to its mirror position */
3176 duplicate_checkpoint(sbi);
3177
3178 /* repair checkpoint at CP #0 position */
3179 sbi->cur_cp = 1;
3180 write_checkpoint(sbi);
3181 }
3182
build_nat_area_bitmap(struct f2fs_sb_info * sbi)3183 void build_nat_area_bitmap(struct f2fs_sb_info *sbi)
3184 {
3185 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3186 struct f2fs_journal *journal = &curseg->sum_blk->journal;
3187 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3188 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3189 struct f2fs_nm_info *nm_i = NM_I(sbi);
3190 struct f2fs_nat_block *nat_block;
3191 struct node_info ni;
3192 u32 nid, nr_nat_blks;
3193 pgoff_t block_off;
3194 pgoff_t block_addr;
3195 int seg_off;
3196 int ret;
3197 unsigned int i;
3198
3199 nat_block = (struct f2fs_nat_block *)calloc(BLOCK_SZ, 1);
3200 ASSERT(nat_block);
3201
3202 /* Alloc & build nat entry bitmap */
3203 nr_nat_blks = (get_sb(segment_count_nat) / 2) <<
3204 sbi->log_blocks_per_seg;
3205
3206 fsck->nr_nat_entries = nr_nat_blks * NAT_ENTRY_PER_BLOCK;
3207 fsck->nat_area_bitmap_sz = (fsck->nr_nat_entries + 7) / 8;
3208 fsck->nat_area_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
3209 ASSERT(fsck->nat_area_bitmap);
3210
3211 fsck->entries = calloc(sizeof(struct f2fs_nat_entry),
3212 fsck->nr_nat_entries);
3213 ASSERT(fsck->entries);
3214
3215 for (block_off = 0; block_off < nr_nat_blks; block_off++) {
3216
3217 seg_off = block_off >> sbi->log_blocks_per_seg;
3218 block_addr = (pgoff_t)(nm_i->nat_blkaddr +
3219 (seg_off << sbi->log_blocks_per_seg << 1) +
3220 (block_off & ((1 << sbi->log_blocks_per_seg) - 1)));
3221
3222 if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
3223 block_addr += sbi->blocks_per_seg;
3224
3225 ret = dev_read_block(nat_block, block_addr);
3226 ASSERT(ret >= 0);
3227
3228 nid = block_off * NAT_ENTRY_PER_BLOCK;
3229 for (i = 0; i < NAT_ENTRY_PER_BLOCK; i++) {
3230 ni.nid = nid + i;
3231
3232 if ((nid + i) == F2FS_NODE_INO(sbi) ||
3233 (nid + i) == F2FS_META_INO(sbi)) {
3234 /*
3235 * block_addr of node/meta inode should be 0x1.
3236 * Set this bit, and fsck_verify will fix it.
3237 */
3238 if (le32_to_cpu(nat_block->entries[i].block_addr) != 0x1) {
3239 ASSERT_MSG("\tError: ino[0x%x] block_addr[0x%x] is invalid\n",
3240 nid + i, le32_to_cpu(nat_block->entries[i].block_addr));
3241 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3242 }
3243 continue;
3244 }
3245
3246 node_info_from_raw_nat(&ni, &nat_block->entries[i]);
3247 if (ni.blk_addr == 0x0)
3248 continue;
3249 if (ni.ino == 0x0) {
3250 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3251 " is invalid\n", ni.ino, ni.blk_addr);
3252 }
3253 if (ni.ino == (nid + i)) {
3254 fsck->nat_valid_inode_cnt++;
3255 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3256 }
3257 if (nid + i == 0) {
3258 /*
3259 * nat entry [0] must be null. If
3260 * it is corrupted, set its bit in
3261 * nat_area_bitmap, fsck_verify will
3262 * nullify it
3263 */
3264 ASSERT_MSG("Invalid nat entry[0]: "
3265 "blk_addr[0x%x]\n", ni.blk_addr);
3266 fsck->chk.valid_nat_entry_cnt--;
3267 }
3268
3269 DBG(3, "nid[0x%8x] addr[0x%16x] ino[0x%8x]\n",
3270 nid + i, ni.blk_addr, ni.ino);
3271 f2fs_set_bit(nid + i, fsck->nat_area_bitmap);
3272 fsck->chk.valid_nat_entry_cnt++;
3273
3274 fsck->entries[nid + i] = nat_block->entries[i];
3275 }
3276 }
3277
3278 /* Traverse nat journal, update the corresponding entries */
3279 for (i = 0; i < nats_in_cursum(journal); i++) {
3280 struct f2fs_nat_entry raw_nat;
3281 nid = le32_to_cpu(nid_in_journal(journal, i));
3282 ni.nid = nid;
3283
3284 DBG(3, "==> Found nid [0x%x] in nat cache, update it\n", nid);
3285
3286 /* Clear the original bit and count */
3287 if (fsck->entries[nid].block_addr != 0x0) {
3288 fsck->chk.valid_nat_entry_cnt--;
3289 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
3290 if (fsck->entries[nid].ino == nid)
3291 fsck->nat_valid_inode_cnt--;
3292 }
3293
3294 /* Use nat entries in journal */
3295 memcpy(&raw_nat, &nat_in_journal(journal, i),
3296 sizeof(struct f2fs_nat_entry));
3297 node_info_from_raw_nat(&ni, &raw_nat);
3298 if (ni.blk_addr != 0x0) {
3299 if (ni.ino == 0x0)
3300 ASSERT_MSG("\tError: ino[0x%8x] or blk_addr[0x%16x]"
3301 " is invalid\n", ni.ino, ni.blk_addr);
3302 if (ni.ino == nid) {
3303 fsck->nat_valid_inode_cnt++;
3304 DBG(3, "ino[0x%8x] maybe is inode\n", ni.ino);
3305 }
3306 f2fs_set_bit(nid, fsck->nat_area_bitmap);
3307 fsck->chk.valid_nat_entry_cnt++;
3308 DBG(3, "nid[0x%x] in nat cache\n", nid);
3309 }
3310 fsck->entries[nid] = raw_nat;
3311 }
3312 free(nat_block);
3313
3314 DBG(1, "valid nat entries (block_addr != 0x0) [0x%8x : %u]\n",
3315 fsck->chk.valid_nat_entry_cnt,
3316 fsck->chk.valid_nat_entry_cnt);
3317 }
3318
check_sector_size(struct f2fs_super_block * sb)3319 static int check_sector_size(struct f2fs_super_block *sb)
3320 {
3321 uint32_t log_sectorsize, log_sectors_per_block;
3322
3323 log_sectorsize = log_base_2(c.sector_size);
3324 log_sectors_per_block = log_base_2(c.sectors_per_blk);
3325
3326 if (log_sectorsize == get_sb(log_sectorsize) &&
3327 log_sectors_per_block == get_sb(log_sectors_per_block))
3328 return 0;
3329
3330 set_sb(log_sectorsize, log_sectorsize);
3331 set_sb(log_sectors_per_block, log_sectors_per_block);
3332
3333 update_superblock(sb, SB_MASK_ALL);
3334 return 0;
3335 }
3336
tune_sb_features(struct f2fs_sb_info * sbi)3337 static int tune_sb_features(struct f2fs_sb_info *sbi)
3338 {
3339 int sb_changed = 0;
3340 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3341
3342 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) &&
3343 c.feature & cpu_to_le32(F2FS_FEATURE_ENCRYPT)) {
3344 sb->feature |= cpu_to_le32(F2FS_FEATURE_ENCRYPT);
3345 MSG(0, "Info: Set Encryption feature\n");
3346 sb_changed = 1;
3347 }
3348 if (!(sb->feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) &&
3349 c.feature & cpu_to_le32(F2FS_FEATURE_CASEFOLD)) {
3350 if (!c.s_encoding) {
3351 ERR_MSG("ERROR: Must specify encoding to enable casefolding.\n");
3352 return -1;
3353 }
3354 sb->feature |= cpu_to_le32(F2FS_FEATURE_CASEFOLD);
3355 MSG(0, "Info: Set Casefold feature\n");
3356 sb_changed = 1;
3357 }
3358 /* TODO: quota needs to allocate inode numbers */
3359
3360 c.feature = sb->feature;
3361 if (!sb_changed)
3362 return 0;
3363
3364 update_superblock(sb, SB_MASK_ALL);
3365 return 0;
3366 }
3367
get_fsync_inode(struct list_head * head,nid_t ino)3368 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
3369 nid_t ino)
3370 {
3371 struct fsync_inode_entry *entry;
3372
3373 list_for_each_entry(entry, head, list)
3374 if (entry->ino == ino)
3375 return entry;
3376
3377 return NULL;
3378 }
3379
add_fsync_inode(struct list_head * head,nid_t ino)3380 static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
3381 nid_t ino)
3382 {
3383 struct fsync_inode_entry *entry;
3384
3385 entry = calloc(sizeof(struct fsync_inode_entry), 1);
3386 if (!entry)
3387 return NULL;
3388 entry->ino = ino;
3389 list_add_tail(&entry->list, head);
3390 return entry;
3391 }
3392
del_fsync_inode(struct fsync_inode_entry * entry)3393 static void del_fsync_inode(struct fsync_inode_entry *entry)
3394 {
3395 list_del(&entry->list);
3396 free(entry);
3397 }
3398
destroy_fsync_dnodes(struct list_head * head)3399 static void destroy_fsync_dnodes(struct list_head *head)
3400 {
3401 struct fsync_inode_entry *entry, *tmp;
3402
3403 list_for_each_entry_safe(entry, tmp, head, list)
3404 del_fsync_inode(entry);
3405 }
3406
find_fsync_inode(struct f2fs_sb_info * sbi,struct list_head * head)3407 static int find_fsync_inode(struct f2fs_sb_info *sbi, struct list_head *head)
3408 {
3409 struct curseg_info *curseg;
3410 struct f2fs_node *node_blk;
3411 block_t blkaddr;
3412 unsigned int loop_cnt = 0;
3413 unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
3414 sbi->total_valid_block_count;
3415 int err = 0;
3416
3417 /* get node pages in the current segment */
3418 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3419 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3420
3421 node_blk = calloc(F2FS_BLKSIZE, 1);
3422 ASSERT(node_blk);
3423
3424 while (1) {
3425 struct fsync_inode_entry *entry;
3426
3427 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3428 break;
3429
3430 err = dev_read_block(node_blk, blkaddr);
3431 if (err)
3432 break;
3433
3434 if (!is_recoverable_dnode(sbi, node_blk))
3435 break;
3436
3437 if (!is_fsync_dnode(node_blk))
3438 goto next;
3439
3440 entry = get_fsync_inode(head, ino_of_node(node_blk));
3441 if (!entry) {
3442 entry = add_fsync_inode(head, ino_of_node(node_blk));
3443 if (!entry) {
3444 err = -1;
3445 break;
3446 }
3447 }
3448 entry->blkaddr = blkaddr;
3449
3450 if (IS_INODE(node_blk) && is_dent_dnode(node_blk))
3451 entry->last_dentry = blkaddr;
3452 next:
3453 /* sanity check in order to detect looped node chain */
3454 if (++loop_cnt >= free_blocks ||
3455 blkaddr == next_blkaddr_of_node(node_blk)) {
3456 MSG(0, "\tdetect looped node chain, blkaddr:%u, next:%u\n",
3457 blkaddr,
3458 next_blkaddr_of_node(node_blk));
3459 err = -1;
3460 break;
3461 }
3462
3463 blkaddr = next_blkaddr_of_node(node_blk);
3464 }
3465
3466 free(node_blk);
3467 return err;
3468 }
3469
do_record_fsync_data(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk,block_t blkaddr)3470 static int do_record_fsync_data(struct f2fs_sb_info *sbi,
3471 struct f2fs_node *node_blk,
3472 block_t blkaddr)
3473 {
3474 unsigned int segno, offset;
3475 struct seg_entry *se;
3476 unsigned int ofs_in_node = 0;
3477 unsigned int start, end;
3478 int err = 0, recorded = 0;
3479
3480 segno = GET_SEGNO(sbi, blkaddr);
3481 se = get_seg_entry(sbi, segno);
3482 offset = OFFSET_IN_SEG(sbi, blkaddr);
3483
3484 if (f2fs_test_bit(offset, (char *)se->cur_valid_map)) {
3485 ASSERT(0);
3486 return -1;
3487 }
3488 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map)) {
3489 ASSERT(0);
3490 return -1;
3491 }
3492
3493 if (!se->ckpt_valid_blocks)
3494 se->ckpt_type = CURSEG_WARM_NODE;
3495
3496 se->ckpt_valid_blocks++;
3497 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3498
3499 MSG(1, "do_record_fsync_data: [node] ino = %u, nid = %u, blkaddr = %u\n",
3500 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3501
3502 /* inline data */
3503 if (IS_INODE(node_blk) && (node_blk->i.i_inline & F2FS_INLINE_DATA))
3504 return 0;
3505 /* xattr node */
3506 if (ofs_of_node(node_blk) == XATTR_NODE_OFFSET)
3507 return 0;
3508
3509 /* step 3: recover data indices */
3510 start = start_bidx_of_node(ofs_of_node(node_blk), node_blk);
3511 end = start + ADDRS_PER_PAGE(sbi, node_blk, NULL);
3512
3513 for (; start < end; start++, ofs_in_node++) {
3514 blkaddr = datablock_addr(node_blk, ofs_in_node);
3515
3516 if (!is_valid_data_blkaddr(blkaddr))
3517 continue;
3518
3519 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR)) {
3520 err = -1;
3521 goto out;
3522 }
3523
3524 segno = GET_SEGNO(sbi, blkaddr);
3525 se = get_seg_entry(sbi, segno);
3526 offset = OFFSET_IN_SEG(sbi, blkaddr);
3527
3528 if (f2fs_test_bit(offset, (char *)se->cur_valid_map))
3529 continue;
3530 if (f2fs_test_bit(offset, (char *)se->ckpt_valid_map))
3531 continue;
3532
3533 if (!se->ckpt_valid_blocks)
3534 se->ckpt_type = CURSEG_WARM_DATA;
3535
3536 se->ckpt_valid_blocks++;
3537 f2fs_set_bit(offset, (char *)se->ckpt_valid_map);
3538
3539 MSG(1, "do_record_fsync_data: [data] ino = %u, nid = %u, blkaddr = %u\n",
3540 ino_of_node(node_blk), ofs_of_node(node_blk), blkaddr);
3541
3542 recorded++;
3543 }
3544 out:
3545 MSG(1, "recover_data: ino = %u, nid = %u, recorded = %d, err = %d\n",
3546 ino_of_node(node_blk), ofs_of_node(node_blk),
3547 recorded, err);
3548 return err;
3549 }
3550
traverse_dnodes(struct f2fs_sb_info * sbi,struct list_head * inode_list)3551 static int traverse_dnodes(struct f2fs_sb_info *sbi,
3552 struct list_head *inode_list)
3553 {
3554 struct curseg_info *curseg;
3555 struct f2fs_node *node_blk;
3556 block_t blkaddr;
3557 int err = 0;
3558
3559 /* get node pages in the current segment */
3560 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
3561 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
3562
3563 node_blk = calloc(F2FS_BLKSIZE, 1);
3564 ASSERT(node_blk);
3565
3566 while (1) {
3567 struct fsync_inode_entry *entry;
3568
3569 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
3570 break;
3571
3572 err = dev_read_block(node_blk, blkaddr);
3573 if (err)
3574 break;
3575
3576 if (!is_recoverable_dnode(sbi, node_blk))
3577 break;
3578
3579 entry = get_fsync_inode(inode_list,
3580 ino_of_node(node_blk));
3581 if (!entry)
3582 goto next;
3583
3584 err = do_record_fsync_data(sbi, node_blk, blkaddr);
3585 if (err)
3586 break;
3587
3588 if (entry->blkaddr == blkaddr)
3589 del_fsync_inode(entry);
3590 next:
3591 blkaddr = next_blkaddr_of_node(node_blk);
3592 }
3593
3594 free(node_blk);
3595 return err;
3596 }
3597
record_fsync_data(struct f2fs_sb_info * sbi)3598 static int record_fsync_data(struct f2fs_sb_info *sbi)
3599 {
3600 struct list_head inode_list = LIST_HEAD_INIT(inode_list);
3601 int ret;
3602
3603 if (!need_fsync_data_record(sbi))
3604 return 0;
3605
3606 ret = find_fsync_inode(sbi, &inode_list);
3607 if (ret)
3608 goto out;
3609
3610 ret = late_build_segment_manager(sbi);
3611 if (ret < 0) {
3612 ERR_MSG("late_build_segment_manager failed\n");
3613 goto out;
3614 }
3615
3616 ret = traverse_dnodes(sbi, &inode_list);
3617 out:
3618 destroy_fsync_dnodes(&inode_list);
3619 return ret;
3620 }
3621
f2fs_do_mount(struct f2fs_sb_info * sbi)3622 int f2fs_do_mount(struct f2fs_sb_info *sbi)
3623 {
3624 struct f2fs_checkpoint *cp = NULL;
3625 struct f2fs_super_block *sb = NULL;
3626 int ret;
3627
3628 sbi->active_logs = NR_CURSEG_TYPE;
3629 ret = validate_super_block(sbi, SB0_ADDR);
3630 if (ret) {
3631 ret = validate_super_block(sbi, SB1_ADDR);
3632 if (ret)
3633 return -1;
3634 }
3635 sb = F2FS_RAW_SUPER(sbi);
3636
3637 ret = check_sector_size(sb);
3638 if (ret)
3639 return -1;
3640
3641 print_raw_sb_info(sb);
3642
3643 init_sb_info(sbi);
3644
3645 ret = get_valid_checkpoint(sbi);
3646 if (ret) {
3647 ERR_MSG("Can't find valid checkpoint\n");
3648 return -1;
3649 }
3650
3651 c.bug_on = 0;
3652
3653 if (sanity_check_ckpt(sbi)) {
3654 ERR_MSG("Checkpoint is polluted\n");
3655 return -1;
3656 }
3657 cp = F2FS_CKPT(sbi);
3658
3659 if (c.func != FSCK && c.func != DUMP &&
3660 !is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG)) {
3661 ERR_MSG("Mount unclean image to replay log first\n");
3662 return -1;
3663 }
3664
3665 if (c.func == FSCK) {
3666 #if defined(__APPLE__)
3667 if (!c.no_kernel_check &&
3668 memcmp(c.sb_version, c.version, VERSION_NAME_LEN)) {
3669 c.auto_fix = 0;
3670 c.fix_on = 1;
3671 memcpy(sbi->raw_super->version,
3672 c.version, VERSION_NAME_LEN);
3673 update_superblock(sbi->raw_super, SB_MASK_ALL);
3674 }
3675 #else
3676 if (!c.no_kernel_check) {
3677 u32 prev_time, cur_time, time_diff;
3678 __le32 *ver_ts_ptr = (__le32 *)(sbi->raw_super->version
3679 + VERSION_NAME_LEN);
3680
3681 cur_time = (u32)get_cp(elapsed_time);
3682 prev_time = le32_to_cpu(*ver_ts_ptr);
3683
3684 MSG(0, "Info: version timestamp cur: %u, prev: %u\n",
3685 cur_time, prev_time);
3686 if (!memcmp(c.sb_version, c.version,
3687 VERSION_NAME_LEN)) {
3688 /* valid prev_time */
3689 if (prev_time != 0 && cur_time > prev_time) {
3690 time_diff = cur_time - prev_time;
3691 if (time_diff < CHECK_PERIOD)
3692 goto out;
3693 c.auto_fix = 0;
3694 c.fix_on = 1;
3695 }
3696 } else {
3697 memcpy(sbi->raw_super->version,
3698 c.version, VERSION_NAME_LEN);
3699 }
3700
3701 *ver_ts_ptr = cpu_to_le32(cur_time);
3702 update_superblock(sbi->raw_super, SB_MASK_ALL);
3703 }
3704 #endif
3705 }
3706 out:
3707 print_ckpt_info(sbi);
3708
3709 if (c.quota_fix) {
3710 if (get_cp(ckpt_flags) & CP_QUOTA_NEED_FSCK_FLAG)
3711 c.fix_on = 1;
3712 }
3713 if (c.layout)
3714 return 1;
3715
3716 if (tune_sb_features(sbi))
3717 return -1;
3718
3719 /* precompute checksum seed for metadata */
3720 if (c.feature & cpu_to_le32(F2FS_FEATURE_INODE_CHKSUM))
3721 c.chksum_seed = f2fs_cal_crc32(~0, sb->uuid, sizeof(sb->uuid));
3722
3723 sbi->total_valid_node_count = get_cp(valid_node_count);
3724 sbi->total_valid_inode_count = get_cp(valid_inode_count);
3725 sbi->user_block_count = get_cp(user_block_count);
3726 sbi->total_valid_block_count = get_cp(valid_block_count);
3727 sbi->last_valid_block_count = sbi->total_valid_block_count;
3728 sbi->alloc_valid_block_count = 0;
3729
3730 if (early_build_segment_manager(sbi)) {
3731 ERR_MSG("early_build_segment_manager failed\n");
3732 return -1;
3733 }
3734
3735 if (build_node_manager(sbi)) {
3736 ERR_MSG("build_node_manager failed\n");
3737 return -1;
3738 }
3739
3740 if (record_fsync_data(sbi)) {
3741 ERR_MSG("record_fsync_data failed\n");
3742 return -1;
3743 }
3744
3745 if (!f2fs_should_proceed(sb, get_cp(ckpt_flags)))
3746 return 1;
3747
3748 if (late_build_segment_manager(sbi) < 0) {
3749 ERR_MSG("late_build_segment_manager failed\n");
3750 return -1;
3751 }
3752
3753 if (f2fs_late_init_nid_bitmap(sbi)) {
3754 ERR_MSG("f2fs_late_init_nid_bitmap failed\n");
3755 return -1;
3756 }
3757
3758 /* Check nat_bits */
3759 if (c.func == FSCK && is_set_ckpt_flags(cp, CP_NAT_BITS_FLAG)) {
3760 if (check_nat_bits(sbi, sb, cp) && c.fix_on)
3761 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
3762 }
3763 return 0;
3764 }
3765
f2fs_do_umount(struct f2fs_sb_info * sbi)3766 void f2fs_do_umount(struct f2fs_sb_info *sbi)
3767 {
3768 struct sit_info *sit_i = SIT_I(sbi);
3769 struct f2fs_sm_info *sm_i = SM_I(sbi);
3770 struct f2fs_nm_info *nm_i = NM_I(sbi);
3771 unsigned int i;
3772
3773 /* free nm_info */
3774 if (c.func == SLOAD || c.func == FSCK)
3775 free(nm_i->nid_bitmap);
3776 free(nm_i->nat_bitmap);
3777 free(sbi->nm_info);
3778
3779 /* free sit_info */
3780 free(sit_i->bitmap);
3781 free(sit_i->sit_bitmap);
3782 free(sit_i->sentries);
3783 free(sm_i->sit_info);
3784
3785 /* free sm_info */
3786 for (i = 0; i < NR_CURSEG_TYPE; i++)
3787 free(sm_i->curseg_array[i].sum_blk);
3788
3789 free(sm_i->curseg_array);
3790 free(sbi->sm_info);
3791
3792 free(sbi->ckpt);
3793 free(sbi->raw_super);
3794 }
3795
3796 #ifdef WITH_ANDROID
f2fs_sparse_initialize_meta(struct f2fs_sb_info * sbi)3797 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi)
3798 {
3799 struct f2fs_super_block *sb = sbi->raw_super;
3800 uint32_t sit_seg_count, sit_size;
3801 uint32_t nat_seg_count, nat_size;
3802 uint64_t sit_seg_addr, nat_seg_addr, payload_addr;
3803 uint32_t seg_size = 1 << get_sb(log_blocks_per_seg);
3804 int ret;
3805
3806 if (!c.sparse_mode)
3807 return 0;
3808
3809 sit_seg_addr = get_sb(sit_blkaddr);
3810 sit_seg_count = get_sb(segment_count_sit);
3811 sit_size = sit_seg_count * seg_size;
3812
3813 DBG(1, "\tSparse: filling sit area at block offset: 0x%08"PRIx64" len: %u\n",
3814 sit_seg_addr, sit_size);
3815 ret = dev_fill(NULL, sit_seg_addr * F2FS_BLKSIZE,
3816 sit_size * F2FS_BLKSIZE);
3817 if (ret) {
3818 MSG(1, "\tError: While zeroing out the sit area "
3819 "on disk!!!\n");
3820 return -1;
3821 }
3822
3823 nat_seg_addr = get_sb(nat_blkaddr);
3824 nat_seg_count = get_sb(segment_count_nat);
3825 nat_size = nat_seg_count * seg_size;
3826
3827 DBG(1, "\tSparse: filling nat area at block offset 0x%08"PRIx64" len: %u\n",
3828 nat_seg_addr, nat_size);
3829 ret = dev_fill(NULL, nat_seg_addr * F2FS_BLKSIZE,
3830 nat_size * F2FS_BLKSIZE);
3831 if (ret) {
3832 MSG(1, "\tError: While zeroing out the nat area "
3833 "on disk!!!\n");
3834 return -1;
3835 }
3836
3837 payload_addr = get_sb(segment0_blkaddr) + 1;
3838
3839 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3840 payload_addr, get_sb(cp_payload));
3841 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3842 get_sb(cp_payload) * F2FS_BLKSIZE);
3843 if (ret) {
3844 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3845 "on disk!!!\n");
3846 return -1;
3847 }
3848
3849 payload_addr += seg_size;
3850
3851 DBG(1, "\tSparse: filling bitmap area at block offset 0x%08"PRIx64" len: %u\n",
3852 payload_addr, get_sb(cp_payload));
3853 ret = dev_fill(NULL, payload_addr * F2FS_BLKSIZE,
3854 get_sb(cp_payload) * F2FS_BLKSIZE);
3855 if (ret) {
3856 MSG(1, "\tError: While zeroing out the nat/sit bitmap area "
3857 "on disk!!!\n");
3858 return -1;
3859 }
3860 return 0;
3861 }
3862 #else
f2fs_sparse_initialize_meta(struct f2fs_sb_info * sbi)3863 int f2fs_sparse_initialize_meta(struct f2fs_sb_info *sbi) { return 0; }
3864 #endif
3865