1 /**
2 * fsck.c
3 *
4 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include "fsck.h"
12 #include "xattr.h"
13 #include "quotaio.h"
14 #include <time.h>
15
16 char *tree_mark;
17 uint32_t tree_mark_size = 256;
18
f2fs_set_main_bitmap(struct f2fs_sb_info * sbi,u32 blk,int type)19 int f2fs_set_main_bitmap(struct f2fs_sb_info *sbi, u32 blk, int type)
20 {
21 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
22 struct seg_entry *se;
23 int fix = 0;
24
25 se = get_seg_entry(sbi, GET_SEGNO(sbi, blk));
26 if (se->type >= NO_CHECK_TYPE)
27 fix = 1;
28 else if (IS_DATASEG(se->type) != IS_DATASEG(type))
29 fix = 1;
30
31 /* just check data and node types */
32 if (fix) {
33 DBG(1, "Wrong segment type [0x%x] %x -> %x",
34 GET_SEGNO(sbi, blk), se->type, type);
35 se->type = type;
36 }
37 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->main_area_bitmap);
38 }
39
f2fs_test_main_bitmap(struct f2fs_sb_info * sbi,u32 blk)40 static inline int f2fs_test_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
41 {
42 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
43
44 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk),
45 fsck->main_area_bitmap);
46 }
47
f2fs_clear_main_bitmap(struct f2fs_sb_info * sbi,u32 blk)48 int f2fs_clear_main_bitmap(struct f2fs_sb_info *sbi, u32 blk)
49 {
50 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
51
52 return f2fs_clear_bit(BLKOFF_FROM_MAIN(sbi, blk),
53 fsck->main_area_bitmap);
54 }
55
f2fs_test_sit_bitmap(struct f2fs_sb_info * sbi,u32 blk)56 static inline int f2fs_test_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
57 {
58 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
59
60 return f2fs_test_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
61 }
62
f2fs_set_sit_bitmap(struct f2fs_sb_info * sbi,u32 blk)63 int f2fs_set_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
64 {
65 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
66
67 return f2fs_set_bit(BLKOFF_FROM_MAIN(sbi, blk), fsck->sit_area_bitmap);
68 }
69
f2fs_clear_sit_bitmap(struct f2fs_sb_info * sbi,u32 blk)70 int f2fs_clear_sit_bitmap(struct f2fs_sb_info *sbi, u32 blk)
71 {
72 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
73
74 return f2fs_clear_bit(BLKOFF_FROM_MAIN(sbi, blk),
75 fsck->sit_area_bitmap);
76 }
77
add_into_hard_link_list(struct f2fs_sb_info * sbi,u32 nid,u32 link_cnt)78 static int add_into_hard_link_list(struct f2fs_sb_info *sbi,
79 u32 nid, u32 link_cnt)
80 {
81 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
82 struct hard_link_node *node = NULL, *tmp = NULL, *prev = NULL;
83
84 node = calloc(sizeof(struct hard_link_node), 1);
85 ASSERT(node != NULL);
86
87 node->nid = nid;
88 node->links = link_cnt;
89 node->actual_links = 1;
90 node->next = NULL;
91
92 if (fsck->hard_link_list_head == NULL) {
93 fsck->hard_link_list_head = node;
94 goto out;
95 }
96
97 tmp = fsck->hard_link_list_head;
98
99 /* Find insertion position */
100 while (tmp && (nid < tmp->nid)) {
101 ASSERT(tmp->nid != nid);
102 prev = tmp;
103 tmp = tmp->next;
104 }
105
106 if (tmp == fsck->hard_link_list_head) {
107 node->next = tmp;
108 fsck->hard_link_list_head = node;
109 } else {
110 prev->next = node;
111 node->next = tmp;
112 }
113
114 out:
115 DBG(2, "ino[0x%x] has hard links [0x%x]\n", nid, link_cnt);
116 return 0;
117 }
118
find_and_dec_hard_link_list(struct f2fs_sb_info * sbi,u32 nid)119 static int find_and_dec_hard_link_list(struct f2fs_sb_info *sbi, u32 nid)
120 {
121 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
122 struct hard_link_node *node = NULL, *prev = NULL;
123
124 if (fsck->hard_link_list_head == NULL)
125 return -EINVAL;
126
127 node = fsck->hard_link_list_head;
128
129 while (node && (nid < node->nid)) {
130 prev = node;
131 node = node->next;
132 }
133
134 if (node == NULL || (nid != node->nid))
135 return -EINVAL;
136
137 /* Decrease link count */
138 node->links = node->links - 1;
139 node->actual_links++;
140
141 /* if link count becomes one, remove the node */
142 if (node->links == 1) {
143 if (fsck->hard_link_list_head == node)
144 fsck->hard_link_list_head = node->next;
145 else
146 prev->next = node->next;
147 free(node);
148 }
149 return 0;
150 }
151
is_valid_ssa_node_blk(struct f2fs_sb_info * sbi,u32 nid,u32 blk_addr)152 static int is_valid_ssa_node_blk(struct f2fs_sb_info *sbi, u32 nid,
153 u32 blk_addr)
154 {
155 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
156 struct f2fs_summary_block *sum_blk;
157 struct f2fs_summary *sum_entry;
158 struct seg_entry * se;
159 u32 segno, offset;
160 int need_fix = 0, ret = 0;
161 int type;
162
163 if (get_sb(feature) & F2FS_FEATURE_RO)
164 return 0;
165
166 segno = GET_SEGNO(sbi, blk_addr);
167 offset = OFFSET_IN_SEG(sbi, blk_addr);
168
169 sum_blk = get_sum_block(sbi, segno, &type);
170
171 if (type != SEG_TYPE_NODE && type != SEG_TYPE_CUR_NODE) {
172 /* can't fix current summary, then drop the block */
173 if (!c.fix_on || type < 0) {
174 ASSERT_MSG("Summary footer is not for node segment");
175 ret = -EINVAL;
176 goto out;
177 }
178
179 need_fix = 1;
180 se = get_seg_entry(sbi, segno);
181 if(IS_NODESEG(se->type)) {
182 ASSERT_MSG("Summary footer indicates a node segment: 0x%x", segno);
183 F2FS_SUMMARY_BLOCK_FOOTER(sum_blk)->entry_type = SUM_TYPE_NODE;
184 } else {
185 ret = -EINVAL;
186 goto out;
187 }
188 }
189
190 sum_entry = &(sum_blk->entries[offset]);
191
192 if (le32_to_cpu(sum_entry->nid) != nid) {
193 if (!c.fix_on || type < 0) {
194 DBG(0, "nid [0x%x]\n", nid);
195 DBG(0, "target blk_addr [0x%x]\n", blk_addr);
196 DBG(0, "summary blk_addr [0x%x]\n",
197 GET_SUM_BLKADDR(sbi,
198 GET_SEGNO(sbi, blk_addr)));
199 DBG(0, "seg no / offset [0x%x / 0x%x]\n",
200 GET_SEGNO(sbi, blk_addr),
201 OFFSET_IN_SEG(sbi, blk_addr));
202 DBG(0, "summary_entry.nid [0x%x]\n",
203 le32_to_cpu(sum_entry->nid));
204 DBG(0, "--> node block's nid [0x%x]\n", nid);
205 ASSERT_MSG("Invalid node seg summary\n");
206 ret = -EINVAL;
207 } else {
208 ASSERT_MSG("Set node summary 0x%x -> [0x%x] [0x%x]",
209 segno, nid, blk_addr);
210 sum_entry->nid = cpu_to_le32(nid);
211 need_fix = 1;
212 }
213 }
214 if (need_fix && f2fs_dev_is_writable()) {
215 u64 ssa_blk;
216 int ret2;
217
218 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
219 ret2 = dev_write_block(sum_blk, ssa_blk);
220 ASSERT(ret2 >= 0);
221 }
222 out:
223 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
224 type == SEG_TYPE_MAX)
225 free(sum_blk);
226 return ret;
227 }
228
is_valid_summary(struct f2fs_sb_info * sbi,struct f2fs_summary * sum,u32 blk_addr)229 static int is_valid_summary(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
230 u32 blk_addr)
231 {
232 u16 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
233 u32 nid = le32_to_cpu(sum->nid);
234 struct f2fs_node *node_blk = NULL;
235 __le32 target_blk_addr;
236 struct node_info ni;
237 int ret = 0;
238
239 node_blk = (struct f2fs_node *)calloc(F2FS_BLKSIZE, 1);
240 ASSERT(node_blk != NULL);
241
242 if (!IS_VALID_NID(sbi, nid))
243 goto out;
244
245 get_node_info(sbi, nid, &ni);
246
247 if (!f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
248 goto out;
249
250 /* read node_block */
251 ret = dev_read_block(node_blk, ni.blk_addr);
252 ASSERT(ret >= 0);
253
254 if (le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid) != nid)
255 goto out;
256
257 /* check its block address */
258 if (IS_INODE(node_blk)) {
259 int ofs = get_extra_isize(node_blk);
260
261 if (ofs + ofs_in_node >= DEF_ADDRS_PER_INODE)
262 goto out;
263 target_blk_addr = node_blk->i.i_addr[ofs + ofs_in_node];
264 } else {
265 if (ofs_in_node >= DEF_ADDRS_PER_BLOCK)
266 goto out;
267 target_blk_addr = node_blk->dn.addr[ofs_in_node];
268 }
269
270 if (blk_addr == le32_to_cpu(target_blk_addr))
271 ret = 1;
272 out:
273 free(node_blk);
274 return ret;
275 }
276
is_valid_ssa_data_blk(struct f2fs_sb_info * sbi,u32 blk_addr,u32 parent_nid,u16 idx_in_node,u8 version)277 static int is_valid_ssa_data_blk(struct f2fs_sb_info *sbi, u32 blk_addr,
278 u32 parent_nid, u16 idx_in_node, u8 version)
279 {
280 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
281 struct f2fs_summary_block *sum_blk;
282 struct f2fs_summary *sum_entry;
283 struct seg_entry * se;
284 u32 segno, offset;
285 int need_fix = 0, ret = 0;
286 int type;
287
288 if (get_sb(feature) & F2FS_FEATURE_RO)
289 return 0;
290
291 segno = GET_SEGNO(sbi, blk_addr);
292 offset = OFFSET_IN_SEG(sbi, blk_addr);
293
294 sum_blk = get_sum_block(sbi, segno, &type);
295
296 if (type != SEG_TYPE_DATA && type != SEG_TYPE_CUR_DATA) {
297 /* can't fix current summary, then drop the block */
298 if (!c.fix_on || type < 0) {
299 ASSERT_MSG("Summary footer is not for data segment");
300 ret = -EINVAL;
301 goto out;
302 }
303
304 need_fix = 1;
305 se = get_seg_entry(sbi, segno);
306 if (IS_DATASEG(se->type)) {
307 ASSERT_MSG("Summary footer indicates a data segment: 0x%x", segno);
308 F2FS_SUMMARY_BLOCK_FOOTER(sum_blk)->entry_type = SUM_TYPE_DATA;
309 } else {
310 ret = -EINVAL;
311 goto out;
312 }
313 }
314
315 sum_entry = &(sum_blk->entries[offset]);
316
317 if (le32_to_cpu(sum_entry->nid) != parent_nid ||
318 sum_entry->version != version ||
319 le16_to_cpu(sum_entry->ofs_in_node) != idx_in_node) {
320 if (!c.fix_on || type < 0) {
321 DBG(0, "summary_entry.nid [0x%x]\n",
322 le32_to_cpu(sum_entry->nid));
323 DBG(0, "summary_entry.version [0x%x]\n",
324 sum_entry->version);
325 DBG(0, "summary_entry.ofs_in_node [0x%x]\n",
326 le16_to_cpu(sum_entry->ofs_in_node));
327 DBG(0, "parent nid [0x%x]\n",
328 parent_nid);
329 DBG(0, "version from nat [0x%x]\n", version);
330 DBG(0, "idx in parent node [0x%x]\n",
331 idx_in_node);
332
333 DBG(0, "Target data block addr [0x%x]\n", blk_addr);
334 ASSERT_MSG("Invalid data seg summary\n");
335 ret = -EINVAL;
336 } else if (is_valid_summary(sbi, sum_entry, blk_addr)) {
337 /* delete wrong index */
338 ret = -EINVAL;
339 } else {
340 ASSERT_MSG("Set data summary 0x%x -> [0x%x] [0x%x] [0x%x]",
341 segno, parent_nid, version, idx_in_node);
342 sum_entry->nid = cpu_to_le32(parent_nid);
343 sum_entry->version = version;
344 sum_entry->ofs_in_node = cpu_to_le16(idx_in_node);
345 need_fix = 1;
346 }
347 }
348 if (need_fix && f2fs_dev_is_writable()) {
349 u64 ssa_blk;
350 int ret2;
351
352 ssa_blk = GET_SUM_BLKADDR(sbi, segno);
353 ret2 = dev_write_block(sum_blk, ssa_blk);
354 ASSERT(ret2 >= 0);
355 }
356 out:
357 if (type == SEG_TYPE_NODE || type == SEG_TYPE_DATA ||
358 type == SEG_TYPE_MAX)
359 free(sum_blk);
360 return ret;
361 }
362
__check_inode_mode(u32 nid,enum FILE_TYPE ftype,u16 mode)363 static int __check_inode_mode(u32 nid, enum FILE_TYPE ftype, u16 mode)
364 {
365 if (ftype >= F2FS_FT_MAX)
366 return 0;
367 /* f2fs_iget will return -EIO if mode is not valid file type */
368 if (!S_ISLNK(mode) && !S_ISREG(mode) && !S_ISDIR(mode) &&
369 !S_ISCHR(mode) && !S_ISBLK(mode) && !S_ISFIFO(mode) &&
370 !S_ISSOCK(mode)) {
371 ASSERT_MSG("inode [0x%x] unknown file type i_mode [0x%x]",
372 nid, mode);
373 return -1;
374 }
375
376 if (S_ISLNK(mode) && ftype != F2FS_FT_SYMLINK)
377 goto err;
378 if (S_ISREG(mode) && ftype != F2FS_FT_REG_FILE)
379 goto err;
380 if (S_ISDIR(mode) && ftype != F2FS_FT_DIR)
381 goto err;
382 if (S_ISCHR(mode) && ftype != F2FS_FT_CHRDEV)
383 goto err;
384 if (S_ISBLK(mode) && ftype != F2FS_FT_BLKDEV)
385 goto err;
386 if (S_ISFIFO(mode) && ftype != F2FS_FT_FIFO)
387 goto err;
388 if (S_ISSOCK(mode) && ftype != F2FS_FT_SOCK)
389 goto err;
390 return 0;
391 err:
392 ASSERT_MSG("inode [0x%x] mismatch i_mode [0x%x vs. 0x%x]",
393 nid, ftype, mode);
394 return -1;
395 }
396
sanity_check_nat(struct f2fs_sb_info * sbi,u32 nid,struct node_info * ni)397 static int sanity_check_nat(struct f2fs_sb_info *sbi, u32 nid,
398 struct node_info *ni)
399 {
400 if (!IS_VALID_NID(sbi, nid)) {
401 ASSERT_MSG("nid is not valid. [0x%x]", nid);
402 return -EINVAL;
403 }
404
405 get_node_info(sbi, nid, ni);
406 if (ni->ino == 0) {
407 ASSERT_MSG("nid[0x%x] ino is 0", nid);
408 return -EINVAL;
409 }
410
411 if (!is_valid_data_blkaddr(ni->blk_addr)) {
412 ASSERT_MSG("nid->blk_addr is 0x%x. [0x%x]", ni->blk_addr, nid);
413 return -EINVAL;
414 }
415
416 if (!f2fs_is_valid_blkaddr(sbi, ni->blk_addr, DATA_GENERIC)) {
417 ASSERT_MSG("blkaddress is not valid. [0x%x]", ni->blk_addr);
418 return -EINVAL;
419 }
420
421 return 0;
422 }
423
fsck_sanity_check_nat(struct f2fs_sb_info * sbi,u32 nid)424 int fsck_sanity_check_nat(struct f2fs_sb_info *sbi, u32 nid)
425 {
426 struct node_info ni;
427
428 return sanity_check_nat(sbi, nid, &ni);
429 }
430
sanity_check_nid(struct f2fs_sb_info * sbi,u32 nid,struct f2fs_node * node_blk,enum FILE_TYPE ftype,enum NODE_TYPE ntype,struct node_info * ni)431 static int sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
432 struct f2fs_node *node_blk,
433 enum FILE_TYPE ftype, enum NODE_TYPE ntype,
434 struct node_info *ni)
435 {
436 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
437 int ret;
438
439 ret = sanity_check_nat(sbi, nid, ni);
440 if (ret)
441 return ret;
442
443 ret = dev_read_block(node_blk, ni->blk_addr);
444 ASSERT(ret >= 0);
445
446 if (ntype == TYPE_INODE &&
447 F2FS_NODE_FOOTER(node_blk)->nid != F2FS_NODE_FOOTER(node_blk)->ino) {
448 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
449 nid, le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid),
450 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino));
451 return -EINVAL;
452 }
453 if (ni->ino != le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino)) {
454 ASSERT_MSG("nid[0x%x] nat_entry->ino[0x%x] footer.ino[0x%x]",
455 nid, ni->ino, le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino));
456 return -EINVAL;
457 }
458 if (ntype != TYPE_INODE && IS_INODE(node_blk)) {
459 ASSERT_MSG("nid[0x%x] footer.nid[0x%x] footer.ino[0x%x]",
460 nid, le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid),
461 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino));
462 return -EINVAL;
463 }
464
465 if (le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid) != nid) {
466 ASSERT_MSG("nid[0x%x] blk_addr[0x%x] footer.nid[0x%x]",
467 nid, ni->blk_addr,
468 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid));
469 return -EINVAL;
470 }
471
472 if (ntype == TYPE_XATTR) {
473 u32 flag = le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->flag);
474
475 if ((flag >> OFFSET_BIT_SHIFT) != XATTR_NODE_OFFSET) {
476 ASSERT_MSG("xnid[0x%x] has wrong ofs:[0x%x]",
477 nid, flag);
478 return -EINVAL;
479 }
480 }
481
482 if ((ntype == TYPE_INODE && ftype == F2FS_FT_DIR) ||
483 (ntype == TYPE_XATTR && ftype == F2FS_FT_XATTR)) {
484 /* not included '.' & '..' */
485 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) != 0) {
486 ASSERT_MSG("Duplicated node blk. nid[0x%x][0x%x]\n",
487 nid, ni->blk_addr);
488 return -EINVAL;
489 }
490 }
491
492 /* this if only from fix_hard_links */
493 if (ftype == F2FS_FT_MAX)
494 return 0;
495
496 if (ntype == TYPE_INODE &&
497 __check_inode_mode(nid, ftype, le16_to_cpu(node_blk->i.i_mode)))
498 return -EINVAL;
499
500 /* workaround to fix later */
501 if (ftype != F2FS_FT_ORPHAN ||
502 f2fs_test_bit(nid, fsck->nat_area_bitmap) != 0) {
503 f2fs_clear_bit(nid, fsck->nat_area_bitmap);
504 /* avoid reusing nid when reconnecting files */
505 f2fs_set_bit(nid, NM_I(sbi)->nid_bitmap);
506 } else
507 ASSERT_MSG("orphan or xattr nid is duplicated [0x%x]\n",
508 nid);
509
510 if (is_valid_ssa_node_blk(sbi, nid, ni->blk_addr)) {
511 ASSERT_MSG("summary node block is not valid. [0x%x]", nid);
512 return -EINVAL;
513 }
514
515 if (f2fs_test_sit_bitmap(sbi, ni->blk_addr) == 0)
516 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]",
517 ni->blk_addr);
518
519 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
520
521 fsck->chk.valid_blk_cnt++;
522 fsck->chk.valid_node_cnt++;
523
524 /* Progress report */
525 if (!c.show_file_map && sbi->total_valid_node_count > 1000) {
526 unsigned int p10 = sbi->total_valid_node_count / 10;
527
528 if (++sbi->fsck->chk.checked_node_cnt % p10)
529 return 0;
530
531 printf("[FSCK] Check node %"PRIu64" / %u (%.2f%%)\n",
532 sbi->fsck->chk.checked_node_cnt,
533 sbi->total_valid_node_count,
534 10 * (float)sbi->fsck->chk.checked_node_cnt /
535 p10);
536 }
537 }
538 return 0;
539 }
540
fsck_sanity_check_nid(struct f2fs_sb_info * sbi,u32 nid,enum FILE_TYPE ftype,enum NODE_TYPE ntype)541 int fsck_sanity_check_nid(struct f2fs_sb_info *sbi, u32 nid,
542 enum FILE_TYPE ftype, enum NODE_TYPE ntype)
543 {
544 struct f2fs_node *node_blk = NULL;
545 struct node_info ni;
546 int ret;
547
548 node_blk = (struct f2fs_node *)calloc(F2FS_BLKSIZE, 1);
549 ASSERT(node_blk != NULL);
550
551 ret = sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni);
552
553 free(node_blk);
554 return ret;
555 }
556
fsck_chk_xattr_blk(struct f2fs_sb_info * sbi,u32 ino,u32 x_nid,u32 * blk_cnt)557 static int fsck_chk_xattr_blk(struct f2fs_sb_info *sbi, u32 ino,
558 u32 x_nid, u32 *blk_cnt)
559 {
560 struct f2fs_node *node_blk = NULL;
561 struct node_info ni;
562 int ret = 0;
563
564 if (x_nid == 0x0)
565 return 0;
566
567 node_blk = (struct f2fs_node *)calloc(F2FS_BLKSIZE, 1);
568 ASSERT(node_blk != NULL);
569
570 /* Sanity check */
571 if (sanity_check_nid(sbi, x_nid, node_blk,
572 F2FS_FT_XATTR, TYPE_XATTR, &ni)) {
573 ret = -EINVAL;
574 goto out;
575 }
576
577 *blk_cnt = *blk_cnt + 1;
578 f2fs_set_main_bitmap(sbi, ni.blk_addr, CURSEG_COLD_NODE);
579 DBG(2, "ino[0x%x] x_nid[0x%x]\n", ino, x_nid);
580 out:
581 free(node_blk);
582 return ret;
583 }
584
fsck_chk_node_blk(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,u32 nid,enum FILE_TYPE ftype,enum NODE_TYPE ntype,u32 * blk_cnt,struct f2fs_compr_blk_cnt * cbc,struct child_info * child)585 int fsck_chk_node_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
586 u32 nid, enum FILE_TYPE ftype, enum NODE_TYPE ntype,
587 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
588 struct child_info *child)
589 {
590 struct node_info ni;
591 struct f2fs_node *node_blk = NULL;
592
593 node_blk = (struct f2fs_node *)calloc(F2FS_BLKSIZE, 1);
594 ASSERT(node_blk != NULL);
595
596 if (sanity_check_nid(sbi, nid, node_blk, ftype, ntype, &ni))
597 goto err;
598
599 if (ntype == TYPE_INODE) {
600 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
601
602 fsck_chk_inode_blk(sbi, nid, ftype, node_blk, blk_cnt, cbc,
603 &ni, child);
604 quota_add_inode_usage(fsck->qctx, nid, &node_blk->i);
605 } else {
606 switch (ntype) {
607 case TYPE_DIRECT_NODE:
608 f2fs_set_main_bitmap(sbi, ni.blk_addr,
609 CURSEG_WARM_NODE);
610 fsck_chk_dnode_blk(sbi, inode, nid, ftype, node_blk,
611 blk_cnt, cbc, child, &ni);
612 break;
613 case TYPE_INDIRECT_NODE:
614 f2fs_set_main_bitmap(sbi, ni.blk_addr,
615 CURSEG_COLD_NODE);
616 fsck_chk_idnode_blk(sbi, inode, ftype, node_blk,
617 blk_cnt, cbc, child);
618 break;
619 case TYPE_DOUBLE_INDIRECT_NODE:
620 f2fs_set_main_bitmap(sbi, ni.blk_addr,
621 CURSEG_COLD_NODE);
622 fsck_chk_didnode_blk(sbi, inode, ftype, node_blk,
623 blk_cnt, cbc, child);
624 break;
625 default:
626 ASSERT(0);
627 }
628 }
629 free(node_blk);
630 return 0;
631 err:
632 free(node_blk);
633 return -EINVAL;
634 }
635
is_sit_bitmap_set(struct f2fs_sb_info * sbi,u32 blk_addr)636 static bool is_sit_bitmap_set(struct f2fs_sb_info *sbi, u32 blk_addr)
637 {
638 struct seg_entry *se;
639 u32 offset;
640
641 se = get_seg_entry(sbi, GET_SEGNO(sbi, blk_addr));
642 offset = OFFSET_IN_SEG(sbi, blk_addr);
643
644 return f2fs_test_bit(offset,
645 (const char *)se->cur_valid_map) != 0;
646 }
647
fsck_chk_root_inode(struct f2fs_sb_info * sbi)648 int fsck_chk_root_inode(struct f2fs_sb_info *sbi)
649 {
650 struct f2fs_node *node_blk;
651 int segment_count = SM_I(sbi)->main_segments;
652 int segno;
653 bool valid_bitmap = true;
654 block_t last_blkaddr = NULL_ADDR;
655 nid_t root_ino = sbi->root_ino_num;
656 u64 last_ctime = 0;
657 u32 last_ctime_nsec = 0;
658 int ret = -EINVAL;
659
660 node_blk = calloc(F2FS_BLKSIZE, 1);
661 ASSERT(node_blk);
662
663 MSG(0, "Info: root inode is corrupted, search and relink it\n");
664
665 retry:
666 for (segno = 0; segno < segment_count; segno++) {
667 struct seg_entry *se = get_seg_entry(sbi, segno);
668 block_t blkaddr = START_BLOCK(sbi, segno);
669 int ret;
670 int i;
671
672 if (IS_DATASEG(se->type))
673 continue;
674
675 dev_readahead(blkaddr << F2FS_BLKSIZE_BITS,
676 sbi->blocks_per_seg << F2FS_BLKSIZE_BITS);
677
678 for (i = 0; i < sbi->blocks_per_seg; i++, blkaddr++) {
679 if (valid_bitmap ^ is_sit_bitmap_set(sbi, blkaddr))
680 continue;
681
682 ret = dev_read_block(node_blk, blkaddr);
683 ASSERT(ret >= 0);
684
685 if (le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino) !=
686 root_ino ||
687 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid) !=
688 root_ino)
689 continue;
690
691 if (!IS_INODE(node_blk))
692 continue;
693
694 if (le32_to_cpu(node_blk->i.i_generation) ||
695 le32_to_cpu(node_blk->i.i_namelen))
696 continue;
697 break;
698 }
699
700 if (i == sbi->blocks_per_seg)
701 continue;
702
703 if (valid_bitmap) {
704 last_blkaddr = blkaddr;
705 MSG(0, "Info: possible root inode blkaddr: 0x%x\n",
706 last_blkaddr);
707 goto fix;
708 }
709
710 if (last_blkaddr == NULL_ADDR)
711 goto init;
712 if (le64_to_cpu(node_blk->i.i_ctime) < last_ctime)
713 continue;
714 if (le64_to_cpu(node_blk->i.i_ctime) == last_ctime &&
715 le32_to_cpu(node_blk->i.i_ctime_nsec) <=
716 last_ctime_nsec)
717 continue;
718 init:
719 last_blkaddr = blkaddr;
720 last_ctime = le64_to_cpu(node_blk->i.i_ctime);
721 last_ctime_nsec = le32_to_cpu(node_blk->i.i_ctime_nsec);
722
723 MSG(0, "Info: possible root inode blkaddr: %u\n",
724 last_blkaddr);
725 }
726
727 if (valid_bitmap) {
728 valid_bitmap = false;
729 goto retry;
730 }
731 fix:
732 if (!last_blkaddr) {
733 MSG(0, "Info: there is no valid root inode\n");
734 } else if (c.fix_on) {
735 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
736
737 FIX_MSG("Relink root inode, blkaddr: 0x%x", last_blkaddr);
738 update_nat_blkaddr(sbi, root_ino, root_ino, last_blkaddr);
739
740 if (f2fs_test_bit(root_ino, fsck->nat_area_bitmap))
741 f2fs_clear_bit(root_ino, fsck->nat_area_bitmap);
742 fsck->chk.valid_nat_entry_cnt++;
743
744 if (!f2fs_test_sit_bitmap(sbi, last_blkaddr))
745 f2fs_set_sit_bitmap(sbi, last_blkaddr);
746 ret = 0;
747 }
748 free(node_blk);
749 return ret;
750 }
751
get_extent_info(struct extent_info * ext,struct f2fs_extent * i_ext)752 static inline void get_extent_info(struct extent_info *ext,
753 struct f2fs_extent *i_ext)
754 {
755 ext->fofs = le32_to_cpu(i_ext->fofs);
756 ext->blk = le32_to_cpu(i_ext->blk_addr);
757 ext->len = le32_to_cpu(i_ext->len);
758 }
759
check_extent_info(struct child_info * child,block_t blkaddr,int last)760 static void check_extent_info(struct child_info *child,
761 block_t blkaddr, int last)
762 {
763 struct extent_info *ei = &child->ei;
764 u32 pgofs = child->pgofs;
765 int is_hole = 0;
766
767 if (!ei->len)
768 return;
769
770 if (child->state & FSCK_UNMATCHED_EXTENT)
771 return;
772
773 if ((child->state & FSCK_INLINE_INODE) && ei->len)
774 goto unmatched;
775
776 if (last) {
777 /* hole exist in the back of extent */
778 if (child->last_blk != ei->blk + ei->len - 1)
779 child->state |= FSCK_UNMATCHED_EXTENT;
780 return;
781 }
782
783 if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR)
784 is_hole = 1;
785
786 if (pgofs >= ei->fofs && pgofs < ei->fofs + ei->len) {
787 /* unmatched blkaddr */
788 if (is_hole || (blkaddr != pgofs - ei->fofs + ei->blk))
789 goto unmatched;
790
791 if (!child->last_blk) {
792 /* hole exists in the front of extent */
793 if (pgofs != ei->fofs)
794 goto unmatched;
795 } else if (child->last_blk + 1 != blkaddr) {
796 /* hole exists in the middle of extent */
797 goto unmatched;
798 }
799 child->last_blk = blkaddr;
800 return;
801 }
802
803 if (is_hole)
804 return;
805
806 if (blkaddr < ei->blk || blkaddr >= ei->blk + ei->len)
807 return;
808 /* unmatched file offset */
809 unmatched:
810 child->state |= FSCK_UNMATCHED_EXTENT;
811 }
812
fsck_reada_node_block(struct f2fs_sb_info * sbi,u32 nid)813 void fsck_reada_node_block(struct f2fs_sb_info *sbi, u32 nid)
814 {
815 struct node_info ni;
816
817 if (nid != 0 && IS_VALID_NID(sbi, nid)) {
818 get_node_info(sbi, nid, &ni);
819 if (f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
820 dev_reada_block(ni.blk_addr);
821 }
822 }
823
fsck_reada_all_direct_node_blocks(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk)824 void fsck_reada_all_direct_node_blocks(struct f2fs_sb_info *sbi,
825 struct f2fs_node *node_blk)
826 {
827 int i;
828
829 for (i = 0; i < NIDS_PER_BLOCK; i++) {
830 u32 nid = le32_to_cpu(node_blk->in.nid[i]);
831
832 fsck_reada_node_block(sbi, nid);
833 }
834 }
835
is_zeroed(const u8 * p,size_t size)836 static bool is_zeroed(const u8 *p, size_t size)
837 {
838 size_t i;
839
840 for (i = 0; i < size; i++) {
841 if (p[i])
842 return false;
843 }
844 return true;
845 }
846
chk_extended_attributes(struct f2fs_sb_info * sbi,u32 nid,struct f2fs_node * inode)847 int chk_extended_attributes(struct f2fs_sb_info *sbi, u32 nid,
848 struct f2fs_node *inode)
849 {
850 void *xattr;
851 void *last_base_addr;
852 struct f2fs_xattr_entry *ent;
853 __u32 xattr_size = XATTR_SIZE(&inode->i);
854 bool need_fix = false;
855
856 if (xattr_size == 0)
857 return 0;
858
859 xattr = read_all_xattrs(sbi, inode, false);
860 ASSERT(xattr);
861
862 last_base_addr = (void *)xattr + xattr_size;
863
864 list_for_each_xattr(ent, xattr) {
865 if ((void *)(ent) + sizeof(__u32) > last_base_addr ||
866 (void *)XATTR_NEXT_ENTRY(ent) > last_base_addr) {
867 ASSERT_MSG("[0x%x] last xattr entry (offset: %lx) "
868 "crosses the boundary",
869 nid, (long int)((void *)ent - xattr));
870 need_fix = true;
871 break;
872 }
873 }
874 if (!need_fix &&
875 !is_zeroed((u8 *)ent, (u8 *)last_base_addr - (u8 *)ent)) {
876 ASSERT_MSG("[0x%x] nonzero bytes in xattr space after "
877 "end of list", nid);
878 need_fix = true;
879 }
880 if (need_fix && c.fix_on) {
881 memset(ent, 0, (u8 *)last_base_addr - (u8 *)ent);
882 write_all_xattrs(sbi, inode, xattr_size, xattr);
883 FIX_MSG("[0x%x] nullify wrong xattr entries", nid);
884 free(xattr);
885 return 1;
886 }
887 free(xattr);
888 return 0;
889 }
890
891 /* start with valid nid and blkaddr */
fsck_chk_inode_blk(struct f2fs_sb_info * sbi,u32 nid,enum FILE_TYPE ftype,struct f2fs_node * node_blk,u32 * blk_cnt,struct f2fs_compr_blk_cnt * cbc,struct node_info * ni,struct child_info * child_d)892 void fsck_chk_inode_blk(struct f2fs_sb_info *sbi, u32 nid,
893 enum FILE_TYPE ftype, struct f2fs_node *node_blk,
894 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
895 struct node_info *ni, struct child_info *child_d)
896 {
897 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
898 struct child_info child;
899 enum NODE_TYPE ntype;
900 u32 i_links = le32_to_cpu(node_blk->i.i_links);
901 u64 i_size = le64_to_cpu(node_blk->i.i_size);
902 u64 i_blocks = le64_to_cpu(node_blk->i.i_blocks);
903 bool compr_supported = c.feature & F2FS_FEATURE_COMPRESSION;
904 u32 i_flags = le32_to_cpu(node_blk->i.i_flags);
905 bool compressed = i_flags & F2FS_COMPR_FL;
906 bool compr_rel = node_blk->i.i_inline & F2FS_COMPRESS_RELEASED;
907 u64 i_compr_blocks = le64_to_cpu(node_blk->i.i_compr_blocks);
908 nid_t i_xattr_nid = le32_to_cpu(node_blk->i.i_xattr_nid);
909 int ofs;
910 char *en;
911 u32 namelen;
912 unsigned int addrs, idx = 0;
913 unsigned short i_gc_failures;
914 int need_fix = 0;
915 int ret;
916 u32 cluster_size = 1 << node_blk->i.i_log_cluster_size;
917
918 if (!compressed)
919 goto check_next;
920
921 if (!compr_supported || (node_blk->i.i_inline & F2FS_INLINE_DATA)) {
922 /*
923 * The 'compression' flag in i_flags affects the traverse of
924 * the node tree. Thus, it must be fixed unconditionally
925 * in the memory (node_blk).
926 */
927 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_COMPR_FL);
928 compressed = false;
929 if (c.fix_on) {
930 need_fix = 1;
931 FIX_MSG("[0x%x] i_flags=0x%x -> 0x%x",
932 nid, i_flags, node_blk->i.i_flags);
933 }
934 i_flags &= ~F2FS_COMPR_FL;
935 }
936 check_next:
937 memset(&child, 0, sizeof(child));
938 child.links = 2;
939 child.p_ino = nid;
940 child.pp_ino = le32_to_cpu(node_blk->i.i_pino);
941 child.dir_level = node_blk->i.i_dir_level;
942
943 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0)
944 fsck->chk.valid_inode_cnt++;
945
946 if (ftype == F2FS_FT_DIR) {
947 f2fs_set_main_bitmap(sbi, ni->blk_addr, CURSEG_HOT_NODE);
948 namelen = le32_to_cpu(node_blk->i.i_namelen);
949 if (namelen > F2FS_NAME_LEN)
950 namelen = F2FS_NAME_LEN;
951 memcpy(child.p_name, node_blk->i.i_name, namelen);
952 } else {
953 if (f2fs_test_main_bitmap(sbi, ni->blk_addr) == 0) {
954 f2fs_set_main_bitmap(sbi, ni->blk_addr,
955 CURSEG_WARM_NODE);
956 if (i_links > 1 && ftype != F2FS_FT_ORPHAN &&
957 !is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
958 /* First time. Create new hard link node */
959 add_into_hard_link_list(sbi, nid, i_links);
960 fsck->chk.multi_hard_link_files++;
961 }
962 } else {
963 DBG(3, "[0x%x] has hard links [0x%x]\n", nid, i_links);
964 if (find_and_dec_hard_link_list(sbi, nid)) {
965 ASSERT_MSG("[0x%x] needs more i_links=0x%x",
966 nid, i_links);
967 if (c.fix_on) {
968 node_blk->i.i_links =
969 cpu_to_le32(i_links + 1);
970 need_fix = 1;
971 FIX_MSG("File: 0x%x "
972 "i_links= 0x%x -> 0x%x",
973 nid, i_links, i_links + 1);
974 }
975 goto skip_blkcnt_fix;
976 }
977 /* No need to go deep into the node */
978 return;
979 }
980 }
981
982 /* readahead xattr node block */
983 fsck_reada_node_block(sbi, i_xattr_nid);
984
985 if (fsck_chk_xattr_blk(sbi, nid, i_xattr_nid, blk_cnt)) {
986 if (c.fix_on) {
987 node_blk->i.i_xattr_nid = 0;
988 need_fix = 1;
989 FIX_MSG("Remove xattr block: 0x%x, x_nid = 0x%x",
990 nid, i_xattr_nid);
991 }
992 }
993
994 if (ftype == F2FS_FT_CHRDEV || ftype == F2FS_FT_BLKDEV ||
995 ftype == F2FS_FT_FIFO || ftype == F2FS_FT_SOCK)
996 goto check;
997
998 /* init extent info */
999 get_extent_info(&child.ei, &node_blk->i.i_ext);
1000 child.last_blk = 0;
1001
1002 if (f2fs_has_extra_isize(&node_blk->i)) {
1003 if (c.feature & F2FS_FEATURE_EXTRA_ATTR) {
1004 unsigned int isize =
1005 le16_to_cpu(node_blk->i.i_extra_isize);
1006 if (isize > 4 * DEF_ADDRS_PER_INODE) {
1007 ASSERT_MSG("[0x%x] wrong i_extra_isize=0x%x",
1008 nid, isize);
1009 if (c.fix_on) {
1010 FIX_MSG("ino[0x%x] recover i_extra_isize "
1011 "from %u to %u",
1012 nid, isize,
1013 calc_extra_isize());
1014 node_blk->i.i_extra_isize =
1015 cpu_to_le16(calc_extra_isize());
1016 need_fix = 1;
1017 }
1018 }
1019 } else {
1020 ASSERT_MSG("[0x%x] wrong extra_attr flag", nid);
1021 if (c.fix_on) {
1022 FIX_MSG("ino[0x%x] remove F2FS_EXTRA_ATTR "
1023 "flag in i_inline:%u",
1024 nid, node_blk->i.i_inline);
1025 /* we don't support tuning F2FS_FEATURE_EXTRA_ATTR now */
1026 node_blk->i.i_inline &= ~F2FS_EXTRA_ATTR;
1027 need_fix = 1;
1028 }
1029 }
1030
1031 if ((c.feature & F2FS_FEATURE_FLEXIBLE_INLINE_XATTR) &&
1032 (node_blk->i.i_inline & F2FS_INLINE_XATTR)) {
1033 unsigned int inline_size =
1034 le16_to_cpu(node_blk->i.i_inline_xattr_size);
1035
1036 if (!inline_size ||
1037 inline_size > MAX_INLINE_XATTR_SIZE) {
1038 ASSERT_MSG("[0x%x] wrong inline_xattr_size:%u",
1039 nid, inline_size);
1040 if (c.fix_on) {
1041 FIX_MSG("ino[0x%x] recover inline xattr size "
1042 "from %u to %u",
1043 nid, inline_size,
1044 DEFAULT_INLINE_XATTR_ADDRS);
1045 node_blk->i.i_inline_xattr_size =
1046 cpu_to_le16(DEFAULT_INLINE_XATTR_ADDRS);
1047 need_fix = 1;
1048 }
1049 }
1050 }
1051 }
1052 ofs = get_extra_isize(node_blk);
1053
1054 if ((node_blk->i.i_flags & cpu_to_le32(F2FS_CASEFOLD_FL)) &&
1055 (!S_ISDIR(le16_to_cpu(node_blk->i.i_mode)) ||
1056 !(c.feature & F2FS_FEATURE_CASEFOLD))) {
1057 ASSERT_MSG("[0x%x] unexpected casefold flag", nid);
1058 if (c.fix_on) {
1059 FIX_MSG("ino[0x%x] clear casefold flag", nid);
1060 node_blk->i.i_flags &= ~cpu_to_le32(F2FS_CASEFOLD_FL);
1061 need_fix = 1;
1062 }
1063 }
1064
1065 if (chk_extended_attributes(sbi, nid, node_blk))
1066 need_fix = 1;
1067
1068 if ((node_blk->i.i_inline & F2FS_INLINE_DATA)) {
1069 unsigned int inline_size = MAX_INLINE_DATA(node_blk);
1070 if (cur_qtype != -1)
1071 qf_szchk_type[cur_qtype] = QF_SZCHK_INLINE;
1072 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
1073
1074 if (blkaddr != NULL_ADDR) {
1075 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
1076 nid, blkaddr);
1077 if (c.fix_on) {
1078 FIX_MSG("inline_data has wrong 0'th block = %x",
1079 blkaddr);
1080 node_blk->i.i_addr[ofs] = NULL_ADDR;
1081 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1082 need_fix = 1;
1083 }
1084 }
1085 if (i_size > inline_size) {
1086 ASSERT_MSG("[0x%x] wrong inline size:%lu",
1087 nid, (unsigned long)i_size);
1088 if (c.fix_on) {
1089 node_blk->i.i_size = cpu_to_le64(inline_size);
1090 FIX_MSG("inline_data has wrong i_size %lu",
1091 (unsigned long)i_size);
1092 need_fix = 1;
1093 }
1094 }
1095 if (!(node_blk->i.i_inline & F2FS_DATA_EXIST)) {
1096 char buf[MAX_INLINE_DATA(node_blk)];
1097 memset(buf, 0, MAX_INLINE_DATA(node_blk));
1098
1099 if (memcmp(buf, inline_data_addr(node_blk),
1100 MAX_INLINE_DATA(node_blk))) {
1101 ASSERT_MSG("[0x%x] junk inline data", nid);
1102 if (c.fix_on) {
1103 FIX_MSG("inline_data has DATA_EXIST");
1104 node_blk->i.i_inline |= F2FS_DATA_EXIST;
1105 need_fix = 1;
1106 }
1107 }
1108 }
1109 DBG(3, "ino[0x%x] has inline data!\n", nid);
1110 child.state |= FSCK_INLINE_INODE;
1111 goto check;
1112 }
1113
1114 if ((node_blk->i.i_inline & F2FS_INLINE_DENTRY)) {
1115 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs]);
1116
1117 DBG(3, "ino[0x%x] has inline dentry!\n", nid);
1118 if (blkaddr != 0) {
1119 ASSERT_MSG("[0x%x] wrong inline reserve blkaddr:%u",
1120 nid, blkaddr);
1121 if (c.fix_on) {
1122 FIX_MSG("inline_dentry has wrong 0'th block = %x",
1123 blkaddr);
1124 node_blk->i.i_addr[ofs] = NULL_ADDR;
1125 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1126 need_fix = 1;
1127 }
1128 }
1129
1130 ret = fsck_chk_inline_dentries(sbi, node_blk, &child);
1131 if (ret < 0) {
1132 if (c.fix_on)
1133 need_fix = 1;
1134 }
1135 child.state |= FSCK_INLINE_INODE;
1136 goto check;
1137 }
1138
1139 /* check data blocks in inode */
1140 addrs = ADDRS_PER_INODE(&node_blk->i);
1141 if (cur_qtype != -1) {
1142 u64 addrs_per_blk = (u64)ADDRS_PER_BLOCK(&node_blk->i);
1143 qf_szchk_type[cur_qtype] = QF_SZCHK_REGFILE;
1144 qf_maxsize[cur_qtype] = (u64)(addrs + 2 * addrs_per_blk +
1145 2 * addrs_per_blk * NIDS_PER_BLOCK +
1146 addrs_per_blk * NIDS_PER_BLOCK *
1147 NIDS_PER_BLOCK) * F2FS_BLKSIZE;
1148 }
1149 for (idx = 0; idx < addrs; idx++, child.pgofs++) {
1150 block_t blkaddr = le32_to_cpu(node_blk->i.i_addr[ofs + idx]);
1151
1152 /* check extent info */
1153 check_extent_info(&child, blkaddr, 0);
1154
1155 if (blkaddr == NULL_ADDR)
1156 continue;
1157 if (blkaddr == COMPRESS_ADDR) {
1158 if (!compressed || (child.pgofs &
1159 (cluster_size - 1)) != 0) {
1160 if (c.fix_on) {
1161 node_blk->i.i_addr[ofs + idx] =
1162 NULL_ADDR;
1163 need_fix = 1;
1164 FIX_MSG("[0x%x] i_addr[%d] = NULL_ADDR",
1165 nid, ofs + idx);
1166 }
1167 continue;
1168 }
1169 if (!compr_rel) {
1170 fsck->chk.valid_blk_cnt++;
1171 *blk_cnt = *blk_cnt + 1;
1172 cbc->cheader_pgofs = child.pgofs;
1173 cbc->cnt++;
1174 }
1175 continue;
1176 }
1177 if (!compr_rel && blkaddr == NEW_ADDR &&
1178 child.pgofs - cbc->cheader_pgofs < cluster_size)
1179 cbc->cnt++;
1180 ret = fsck_chk_data_blk(sbi,
1181 IS_CASEFOLDED(&node_blk->i),
1182 blkaddr,
1183 &child, (i_blocks == *blk_cnt),
1184 ftype, nid, idx, ni->version,
1185 file_is_encrypt(&node_blk->i), node_blk);
1186 if (blkaddr != le32_to_cpu(node_blk->i.i_addr[ofs + idx]))
1187 need_fix = 1;
1188 if (!ret) {
1189 *blk_cnt = *blk_cnt + 1;
1190 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
1191 qf_last_blkofs[cur_qtype] = child.pgofs;
1192 } else if (c.fix_on) {
1193 node_blk->i.i_addr[ofs + idx] = NULL_ADDR;
1194 need_fix = 1;
1195 FIX_MSG("[0x%x] i_addr[%d] = NULL_ADDR", nid, ofs + idx);
1196 }
1197 }
1198
1199 /* readahead node blocks */
1200 for (idx = 0; idx < 5; idx++) {
1201 u32 nid = le32_to_cpu(F2FS_INODE_I_NID(&node_blk->i, idx));
1202 fsck_reada_node_block(sbi, nid);
1203 }
1204
1205 /* check node blocks in inode */
1206 for (idx = 0; idx < 5; idx++) {
1207 nid_t i_nid = le32_to_cpu(F2FS_INODE_I_NID(&node_blk->i, idx));
1208
1209 if (idx == 0 || idx == 1)
1210 ntype = TYPE_DIRECT_NODE;
1211 else if (idx == 2 || idx == 3)
1212 ntype = TYPE_INDIRECT_NODE;
1213 else if (idx == 4)
1214 ntype = TYPE_DOUBLE_INDIRECT_NODE;
1215 else
1216 ASSERT(0);
1217
1218 if (i_nid == 0x0)
1219 goto skip;
1220
1221 ret = fsck_chk_node_blk(sbi, &node_blk->i, i_nid,
1222 ftype, ntype, blk_cnt, cbc, &child);
1223 if (!ret) {
1224 *blk_cnt = *blk_cnt + 1;
1225 } else if (ret == -EINVAL) {
1226 if (c.fix_on) {
1227 F2FS_INODE_I_NID(&node_blk->i, idx) = 0;
1228 need_fix = 1;
1229 FIX_MSG("[0x%x] i_nid[%d] = 0", nid, idx);
1230 }
1231 skip:
1232 if (ntype == TYPE_DIRECT_NODE)
1233 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i);
1234 else if (ntype == TYPE_INDIRECT_NODE)
1235 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1236 NIDS_PER_BLOCK;
1237 else
1238 child.pgofs += ADDRS_PER_BLOCK(&node_blk->i) *
1239 NIDS_PER_BLOCK * NIDS_PER_BLOCK;
1240 }
1241
1242 }
1243
1244 check:
1245 /* check uncovered range in the back of extent */
1246 check_extent_info(&child, 0, 1);
1247
1248 if (child.state & FSCK_UNMATCHED_EXTENT) {
1249 ASSERT_MSG("ino: 0x%x has wrong ext: [pgofs:%u, blk:%u, len:%u]",
1250 nid, child.ei.fofs, child.ei.blk, child.ei.len);
1251 if (c.fix_on)
1252 need_fix = 1;
1253 }
1254
1255 if (i_blocks != *blk_cnt) {
1256 ASSERT_MSG("ino: 0x%x has i_blocks: 0x%08"PRIx64", "
1257 "but has 0x%x blocks",
1258 nid, i_blocks, *blk_cnt);
1259 if (c.fix_on) {
1260 node_blk->i.i_blocks = cpu_to_le64(*blk_cnt);
1261 need_fix = 1;
1262 FIX_MSG("[0x%x] i_blocks=0x%08"PRIx64" -> 0x%x",
1263 nid, i_blocks, *blk_cnt);
1264 }
1265 }
1266
1267 if (compressed && i_compr_blocks != cbc->cnt) {
1268 if (c.fix_on) {
1269 node_blk->i.i_compr_blocks = cpu_to_le64(cbc->cnt);
1270 need_fix = 1;
1271 FIX_MSG("[0x%x] i_compr_blocks=0x%08"PRIx64" -> 0x%x",
1272 nid, i_compr_blocks, cbc->cnt);
1273 }
1274 }
1275
1276 skip_blkcnt_fix:
1277 en = malloc(F2FS_PRINT_NAMELEN);
1278 ASSERT(en);
1279
1280 namelen = le32_to_cpu(node_blk->i.i_namelen);
1281 if (namelen > F2FS_NAME_LEN) {
1282 if (child_d && child_d->i_namelen <= F2FS_NAME_LEN) {
1283 ASSERT_MSG("ino: 0x%x has i_namelen: 0x%x, "
1284 "but has %d characters for name",
1285 nid, namelen, child_d->i_namelen);
1286 if (c.fix_on) {
1287 FIX_MSG("[0x%x] i_namelen=0x%x -> 0x%x", nid, namelen,
1288 child_d->i_namelen);
1289 node_blk->i.i_namelen = cpu_to_le32(child_d->i_namelen);
1290 need_fix = 1;
1291 }
1292 namelen = child_d->i_namelen;
1293 } else
1294 namelen = F2FS_NAME_LEN;
1295 }
1296 pretty_print_filename(node_blk->i.i_name, namelen, en,
1297 file_enc_name(&node_blk->i));
1298 if (ftype == F2FS_FT_ORPHAN)
1299 DBG(1, "Orphan Inode: 0x%x [%s] i_blocks: %u\n\n",
1300 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino),
1301 en, (u32)i_blocks);
1302
1303 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid))
1304 DBG(1, "Quota Inode: 0x%x [%s] i_blocks: %u\n\n",
1305 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino),
1306 en, (u32)i_blocks);
1307
1308 if (ftype == F2FS_FT_DIR) {
1309 DBG(1, "Directory Inode: 0x%x [%s] depth: %d has %d files\n\n",
1310 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino), en,
1311 le32_to_cpu(node_blk->i.i_current_depth),
1312 child.files);
1313
1314 if (i_links != child.links) {
1315 ASSERT_MSG("ino: 0x%x i_links: %u, real links: %u",
1316 nid, i_links, child.links);
1317 if (c.fix_on) {
1318 node_blk->i.i_links = cpu_to_le32(child.links);
1319 need_fix = 1;
1320 FIX_MSG("Dir: 0x%x i_links= 0x%x -> 0x%x",
1321 nid, i_links, child.links);
1322 }
1323 }
1324 if (child.dots < 2 &&
1325 !(node_blk->i.i_inline & F2FS_INLINE_DOTS)) {
1326 ASSERT_MSG("ino: 0x%x dots: %u",
1327 nid, child.dots);
1328 if (c.fix_on) {
1329 node_blk->i.i_inline |= F2FS_INLINE_DOTS;
1330 need_fix = 1;
1331 FIX_MSG("Dir: 0x%x set inline_dots", nid);
1332 }
1333 }
1334 }
1335
1336 i_gc_failures = le16_to_cpu(node_blk->i.i_gc_failures);
1337
1338 /*
1339 * old kernel initialized i_gc_failures as 0x01, in preen mode 2,
1340 * let's skip repairing.
1341 */
1342 if (ftype == F2FS_FT_REG_FILE && i_gc_failures &&
1343 (c.preen_mode != PREEN_MODE_2 || i_gc_failures != 0x01)) {
1344
1345 DBG(1, "Regular Inode: 0x%x [%s] depth: %d\n\n",
1346 le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->ino), en,
1347 i_gc_failures);
1348
1349 if (c.fix_on) {
1350 node_blk->i.i_gc_failures = cpu_to_le16(0);
1351 need_fix = 1;
1352 INFO_MSG("Regular: 0x%x reset i_gc_failures from 0x%x to 0x00",
1353 nid, i_gc_failures);
1354 }
1355 }
1356
1357 free(en);
1358
1359 if (ftype == F2FS_FT_SYMLINK && i_size == 0 &&
1360 i_blocks == (i_xattr_nid ? 3 : 2)) {
1361 node_blk->i.i_size = cpu_to_le64(F2FS_BLKSIZE);
1362 need_fix = 1;
1363 FIX_MSG("Symlink: recover 0x%x with i_size=%lu",
1364 nid, (unsigned long)F2FS_BLKSIZE);
1365 }
1366
1367 if (ftype == F2FS_FT_ORPHAN && i_links) {
1368 ASSERT_MSG("ino: 0x%x is orphan inode, but has i_links: %u",
1369 nid, i_links);
1370 if (c.fix_on) {
1371 node_blk->i.i_links = 0;
1372 need_fix = 1;
1373 FIX_MSG("ino: 0x%x orphan_inode, i_links= 0x%x -> 0",
1374 nid, i_links);
1375 }
1376 }
1377
1378 /* drop extent information to avoid potential wrong access */
1379 if (need_fix && f2fs_dev_is_writable())
1380 node_blk->i.i_ext.len = 0;
1381
1382 if ((c.feature & F2FS_FEATURE_INODE_CHKSUM) &&
1383 f2fs_has_extra_isize(&node_blk->i)) {
1384 __u32 provided, calculated;
1385
1386 provided = le32_to_cpu(node_blk->i.i_inode_checksum);
1387 calculated = f2fs_inode_chksum(node_blk);
1388
1389 if (provided != calculated) {
1390 ASSERT_MSG("ino: 0x%x chksum:0x%x, but calculated one is: 0x%x",
1391 nid, provided, calculated);
1392 if (c.fix_on) {
1393 node_blk->i.i_inode_checksum =
1394 cpu_to_le32(calculated);
1395 need_fix = 1;
1396 FIX_MSG("ino: 0x%x recover, i_inode_checksum= 0x%x -> 0x%x",
1397 nid, provided, calculated);
1398 }
1399 }
1400 }
1401
1402 if (need_fix && f2fs_dev_is_writable()) {
1403 if (c.zoned_model == F2FS_ZONED_HM)
1404 node_blk->i.i_ext.len = 0;
1405 ret = update_block(sbi, node_blk, &ni->blk_addr, NULL);
1406 ASSERT(ret >= 0);
1407 }
1408 }
1409
fsck_chk_dnode_blk(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,u32 nid,enum FILE_TYPE ftype,struct f2fs_node * node_blk,u32 * blk_cnt,struct f2fs_compr_blk_cnt * cbc,struct child_info * child,struct node_info * ni)1410 int fsck_chk_dnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1411 u32 nid, enum FILE_TYPE ftype, struct f2fs_node *node_blk,
1412 u32 *blk_cnt, struct f2fs_compr_blk_cnt *cbc,
1413 struct child_info *child, struct node_info *ni)
1414 {
1415 int idx, ret;
1416 int need_fix = 0;
1417 child->p_ino = nid;
1418 child->pp_ino = le32_to_cpu(inode->i_pino);
1419 u32 i_flags = le32_to_cpu(inode->i_flags);
1420 bool compressed = i_flags & F2FS_COMPR_FL;
1421 bool compr_rel = inode->i_inline & F2FS_COMPRESS_RELEASED;
1422 u32 cluster_size = 1 << inode->i_log_cluster_size;
1423
1424 for (idx = 0; idx < ADDRS_PER_BLOCK(inode); idx++, child->pgofs++) {
1425 block_t blkaddr = le32_to_cpu(node_blk->dn.addr[idx]);
1426
1427 check_extent_info(child, blkaddr, 0);
1428
1429 if (blkaddr == NULL_ADDR)
1430 continue;
1431 if (blkaddr == COMPRESS_ADDR) {
1432 if (!compressed || (child->pgofs &
1433 (cluster_size - 1)) != 0) {
1434 if (c.fix_on) {
1435 node_blk->dn.addr[idx] = NULL_ADDR;
1436 need_fix = 1;
1437 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid,
1438 idx);
1439 }
1440 continue;
1441 }
1442 if (!compr_rel) {
1443 F2FS_FSCK(sbi)->chk.valid_blk_cnt++;
1444 *blk_cnt = *blk_cnt + 1;
1445 cbc->cheader_pgofs = child->pgofs;
1446 cbc->cnt++;
1447 }
1448 continue;
1449 }
1450 if (!compr_rel && blkaddr == NEW_ADDR && child->pgofs -
1451 cbc->cheader_pgofs < cluster_size)
1452 cbc->cnt++;
1453 ret = fsck_chk_data_blk(sbi, IS_CASEFOLDED(inode),
1454 blkaddr, child,
1455 le64_to_cpu(inode->i_blocks) == *blk_cnt, ftype,
1456 nid, idx, ni->version,
1457 file_is_encrypt(inode), node_blk);
1458 if (blkaddr != le32_to_cpu(node_blk->dn.addr[idx]))
1459 need_fix = 1;
1460 if (!ret) {
1461 *blk_cnt = *blk_cnt + 1;
1462 if (cur_qtype != -1 && blkaddr != NEW_ADDR)
1463 qf_last_blkofs[cur_qtype] = child->pgofs;
1464 } else if (c.fix_on) {
1465 node_blk->dn.addr[idx] = NULL_ADDR;
1466 need_fix = 1;
1467 FIX_MSG("[0x%x] dn.addr[%d] = 0", nid, idx);
1468 }
1469 }
1470 if (need_fix && f2fs_dev_is_writable()) {
1471 ret = update_block(sbi, node_blk, &ni->blk_addr, NULL);
1472 ASSERT(ret >= 0);
1473 }
1474 return 0;
1475 }
1476
fsck_chk_idnode_blk(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,enum FILE_TYPE ftype,struct f2fs_node * node_blk,u32 * blk_cnt,struct f2fs_compr_blk_cnt * cbc,struct child_info * child)1477 int fsck_chk_idnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1478 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1479 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1480 {
1481 int need_fix = 0, ret;
1482 int i = 0;
1483
1484 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1485
1486 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1487 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1488 goto skip;
1489 ret = fsck_chk_node_blk(sbi, inode,
1490 le32_to_cpu(node_blk->in.nid[i]),
1491 ftype, TYPE_DIRECT_NODE, blk_cnt,
1492 cbc, child);
1493 if (!ret)
1494 *blk_cnt = *blk_cnt + 1;
1495 else if (ret == -EINVAL) {
1496 if (!c.fix_on)
1497 printf("should delete in.nid[i] = 0;\n");
1498 else {
1499 node_blk->in.nid[i] = 0;
1500 need_fix = 1;
1501 FIX_MSG("Set indirect node 0x%x -> 0", i);
1502 }
1503 skip:
1504 child->pgofs += ADDRS_PER_BLOCK(inode);
1505 }
1506 }
1507
1508 if (need_fix && f2fs_dev_is_writable()) {
1509 struct node_info ni;
1510 nid_t nid = le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid);
1511
1512 get_node_info(sbi, nid, &ni);
1513 ret = update_block(sbi, node_blk, &ni.blk_addr, NULL);
1514 ASSERT(ret >= 0);
1515 }
1516
1517 return 0;
1518 }
1519
fsck_chk_didnode_blk(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,enum FILE_TYPE ftype,struct f2fs_node * node_blk,u32 * blk_cnt,struct f2fs_compr_blk_cnt * cbc,struct child_info * child)1520 int fsck_chk_didnode_blk(struct f2fs_sb_info *sbi, struct f2fs_inode *inode,
1521 enum FILE_TYPE ftype, struct f2fs_node *node_blk, u32 *blk_cnt,
1522 struct f2fs_compr_blk_cnt *cbc, struct child_info *child)
1523 {
1524 int i = 0;
1525 int need_fix = 0, ret = 0;
1526
1527 fsck_reada_all_direct_node_blocks(sbi, node_blk);
1528
1529 for (i = 0; i < NIDS_PER_BLOCK; i++) {
1530 if (le32_to_cpu(node_blk->in.nid[i]) == 0x0)
1531 goto skip;
1532 ret = fsck_chk_node_blk(sbi, inode,
1533 le32_to_cpu(node_blk->in.nid[i]),
1534 ftype, TYPE_INDIRECT_NODE, blk_cnt, cbc, child);
1535 if (!ret)
1536 *blk_cnt = *blk_cnt + 1;
1537 else if (ret == -EINVAL) {
1538 if (!c.fix_on)
1539 printf("should delete in.nid[i] = 0;\n");
1540 else {
1541 node_blk->in.nid[i] = 0;
1542 need_fix = 1;
1543 FIX_MSG("Set double indirect node 0x%x -> 0", i);
1544 }
1545 skip:
1546 child->pgofs += ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
1547 }
1548 }
1549
1550 if (need_fix && f2fs_dev_is_writable()) {
1551 struct node_info ni;
1552 nid_t nid = le32_to_cpu(F2FS_NODE_FOOTER(node_blk)->nid);
1553
1554 get_node_info(sbi, nid, &ni);
1555 ret = update_block(sbi, node_blk, &ni.blk_addr, NULL);
1556 ASSERT(ret >= 0);
1557 }
1558
1559 return 0;
1560 }
1561
1562 static const char *lookup_table =
1563 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
1564
1565 /**
1566 * base64_encode() -
1567 *
1568 * Encodes the input string using characters from the set [A-Za-z0-9+,].
1569 * The encoded string is roughly 4/3 times the size of the input string.
1570 */
base64_encode(const u8 * src,int len,char * dst)1571 static int base64_encode(const u8 *src, int len, char *dst)
1572 {
1573 int i, bits = 0, ac = 0;
1574 char *cp = dst;
1575
1576 for (i = 0; i < len; i++) {
1577 ac += src[i] << bits;
1578 bits += 8;
1579 do {
1580 *cp++ = lookup_table[ac & 0x3f];
1581 ac >>= 6;
1582 bits -= 6;
1583 } while (bits >= 6);
1584 }
1585 if (bits)
1586 *cp++ = lookup_table[ac & 0x3f];
1587 return cp - dst;
1588 }
1589
pretty_print_filename(const u8 * raw_name,u32 len,char out[F2FS_PRINT_NAMELEN],int enc_name)1590 void pretty_print_filename(const u8 *raw_name, u32 len,
1591 char out[F2FS_PRINT_NAMELEN], int enc_name)
1592 {
1593 len = min(len, (u32)F2FS_NAME_LEN);
1594
1595 if (enc_name)
1596 len = base64_encode(raw_name, len, out);
1597 else
1598 memcpy(out, raw_name, len);
1599 out[len] = 0;
1600 }
1601
print_dentry(struct f2fs_sb_info * sbi,__u8 * name,u8 * bitmap,struct f2fs_dir_entry * dentry,int max,int idx,int last_blk,int enc_name)1602 static void print_dentry(struct f2fs_sb_info *sbi, __u8 *name,
1603 u8 *bitmap, struct f2fs_dir_entry *dentry,
1604 int max, int idx, int last_blk, int enc_name)
1605 {
1606 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1607 u32 depth = fsck->dentry_depth;
1608 int last_de = 0;
1609 int next_idx = 0;
1610 u32 name_len;
1611 unsigned int i;
1612 int bit_offset;
1613 char new[F2FS_PRINT_NAMELEN];
1614
1615 if (!c.show_dentry && !c.show_file_map)
1616 return;
1617
1618 name_len = le16_to_cpu(dentry[idx].name_len);
1619 next_idx = idx + (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1620
1621 bit_offset = find_next_bit_le(bitmap, max, next_idx);
1622 if (bit_offset >= max && last_blk)
1623 last_de = 1;
1624
1625 if (tree_mark_size <= depth) {
1626 tree_mark_size *= 2;
1627 ASSERT(tree_mark_size != 0);
1628 tree_mark = realloc(tree_mark, tree_mark_size);
1629 ASSERT(tree_mark != NULL);
1630 }
1631
1632 if (last_de)
1633 tree_mark[depth] = '`';
1634 else
1635 tree_mark[depth] = '|';
1636
1637 if (tree_mark[depth - 1] == '`')
1638 tree_mark[depth - 1] = ' ';
1639
1640 pretty_print_filename(name, name_len, new, enc_name);
1641
1642 if (c.show_file_map) {
1643 struct f2fs_dentry *d = fsck->dentry;
1644
1645 if (dentry[idx].file_type != F2FS_FT_REG_FILE)
1646 return;
1647
1648 while (d) {
1649 if (d->depth > 1)
1650 printf("/%s", d->name);
1651 d = d->next;
1652 }
1653 printf("/%s", new);
1654 if (dump_node(sbi, le32_to_cpu(dentry[idx].ino), 0, NULL, 0, 0))
1655 printf("\33[2K\r");
1656 } else {
1657 for (i = 1; i < depth; i++)
1658 printf("%c ", tree_mark[i]);
1659
1660 printf("%c-- %s <ino = 0x%x>, <encrypted (%d)>\n",
1661 last_de ? '`' : '|',
1662 new, le32_to_cpu(dentry[idx].ino),
1663 enc_name);
1664 }
1665 }
1666
f2fs_check_hash_code(int encoding,int casefolded,struct f2fs_dir_entry * dentry,const unsigned char * name,u32 len,int enc_name)1667 static int f2fs_check_hash_code(int encoding, int casefolded,
1668 struct f2fs_dir_entry *dentry,
1669 const unsigned char *name, u32 len, int enc_name)
1670 {
1671 /* Casefolded Encrypted names require a key to compute siphash */
1672 if (enc_name && casefolded)
1673 return 0;
1674
1675 f2fs_hash_t hash_code = f2fs_dentry_hash(encoding, casefolded, name, len);
1676 /* fix hash_code made by old buggy code */
1677 if (dentry->hash_code != hash_code) {
1678 char new[F2FS_PRINT_NAMELEN];
1679
1680 pretty_print_filename(name, len, new, enc_name);
1681 FIX_MSG("Mismatch hash_code for \"%s\" [%x:%x]",
1682 new, le32_to_cpu(dentry->hash_code),
1683 hash_code);
1684 dentry->hash_code = cpu_to_le32(hash_code);
1685 return 1;
1686 }
1687 return 0;
1688 }
1689
1690
__get_current_level(int dir_level,u32 pgofs)1691 static int __get_current_level(int dir_level, u32 pgofs)
1692 {
1693 unsigned int bidx = 0;
1694 int i;
1695
1696 for (i = 0; i < MAX_DIR_HASH_DEPTH; i++) {
1697 bidx += dir_buckets(i, dir_level) * bucket_blocks(i);
1698 if (bidx > pgofs)
1699 break;
1700 }
1701 return i;
1702 }
1703
f2fs_check_dirent_position(const struct f2fs_dir_entry * dentry,const char * printable_name,u32 pgofs,u8 dir_level,u32 pino)1704 static int f2fs_check_dirent_position(const struct f2fs_dir_entry *dentry,
1705 const char *printable_name,
1706 u32 pgofs, u8 dir_level, u32 pino)
1707 {
1708 unsigned int nbucket, nblock;
1709 unsigned int bidx, end_block;
1710 int level;
1711
1712 level = __get_current_level(dir_level, pgofs);
1713
1714 nbucket = dir_buckets(level, dir_level);
1715 nblock = bucket_blocks(level);
1716
1717 bidx = dir_block_index(level, dir_level,
1718 le32_to_cpu(dentry->hash_code) % nbucket);
1719 end_block = bidx + nblock;
1720
1721 if (pgofs >= bidx && pgofs < end_block)
1722 return 0;
1723
1724 ASSERT_MSG("Wrong position of dirent pino:%u, name:%s, level:%d, "
1725 "dir_level:%d, pgofs:%u, correct range:[%u, %u]\n",
1726 pino, printable_name, level, dir_level, pgofs, bidx,
1727 end_block - 1);
1728 return 1;
1729 }
1730
__chk_dots_dentries(struct f2fs_sb_info * sbi,int casefolded,struct f2fs_dir_entry * dentry,struct child_info * child,u8 * name,int len,__u8 (* filename)[F2FS_SLOT_LEN],int enc_name)1731 static int __chk_dots_dentries(struct f2fs_sb_info *sbi,
1732 int casefolded,
1733 struct f2fs_dir_entry *dentry,
1734 struct child_info *child,
1735 u8 *name, int len,
1736 __u8 (*filename)[F2FS_SLOT_LEN],
1737 int enc_name)
1738 {
1739 int fixed = 0;
1740
1741 if ((name[0] == '.' && len == 1)) {
1742 if (le32_to_cpu(dentry->ino) != child->p_ino) {
1743 ASSERT_MSG("Bad inode number[0x%x] for '.', parent_ino is [0x%x]\n",
1744 le32_to_cpu(dentry->ino), child->p_ino);
1745 dentry->ino = cpu_to_le32(child->p_ino);
1746 fixed = 1;
1747 }
1748 }
1749
1750 if (name[0] == '.' && name[1] == '.' && len == 2) {
1751 if (child->p_ino == F2FS_ROOT_INO(sbi)) {
1752 if (le32_to_cpu(dentry->ino) != F2FS_ROOT_INO(sbi)) {
1753 ASSERT_MSG("Bad inode number[0x%x] for '..'\n",
1754 le32_to_cpu(dentry->ino));
1755 dentry->ino = cpu_to_le32(F2FS_ROOT_INO(sbi));
1756 fixed = 1;
1757 }
1758 } else if (le32_to_cpu(dentry->ino) != child->pp_ino) {
1759 ASSERT_MSG("Bad inode number[0x%x] for '..', parent parent ino is [0x%x]\n",
1760 le32_to_cpu(dentry->ino), child->pp_ino);
1761 dentry->ino = cpu_to_le32(child->pp_ino);
1762 fixed = 1;
1763 }
1764 }
1765
1766 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry, name, len, enc_name))
1767 fixed = 1;
1768
1769 if (name[len] != '\0') {
1770 ASSERT_MSG("'.' is not NULL terminated\n");
1771 name[len] = '\0';
1772 memcpy(*filename, name, len);
1773 fixed = 1;
1774 }
1775 return fixed;
1776 }
1777
nullify_dentry(struct f2fs_dir_entry * dentry,int offs,__u8 (* filename)[F2FS_SLOT_LEN],u8 ** bitmap)1778 static void nullify_dentry(struct f2fs_dir_entry *dentry, int offs,
1779 __u8 (*filename)[F2FS_SLOT_LEN], u8 **bitmap)
1780 {
1781 memset(dentry, 0, sizeof(struct f2fs_dir_entry));
1782 test_and_clear_bit_le(offs, *bitmap);
1783 memset(*filename, 0, F2FS_SLOT_LEN);
1784 }
1785
__chk_dentries(struct f2fs_sb_info * sbi,int casefolded,struct child_info * child,u8 * bitmap,struct f2fs_dir_entry * dentry,__u8 (* filenames)[F2FS_SLOT_LEN],int max,int last_blk,int enc_name)1786 static int __chk_dentries(struct f2fs_sb_info *sbi, int casefolded,
1787 struct child_info *child,
1788 u8 *bitmap, struct f2fs_dir_entry *dentry,
1789 __u8 (*filenames)[F2FS_SLOT_LEN],
1790 int max, int last_blk, int enc_name)
1791 {
1792 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1793 enum FILE_TYPE ftype;
1794 int dentries = 0;
1795 u32 blk_cnt;
1796 struct f2fs_compr_blk_cnt cbc;
1797 u8 *name;
1798 char en[F2FS_PRINT_NAMELEN];
1799 u16 name_len;
1800 int ret = 0;
1801 int fixed = 0;
1802 int i, slots;
1803
1804 /* readahead inode blocks */
1805 for (i = 0; i < max; i++) {
1806 u32 ino;
1807
1808 if (test_bit_le(i, bitmap) == 0)
1809 continue;
1810
1811 ino = le32_to_cpu(dentry[i].ino);
1812
1813 if (IS_VALID_NID(sbi, ino)) {
1814 struct node_info ni;
1815
1816 get_node_info(sbi, ino, &ni);
1817 if (f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1818 DATA_GENERIC)) {
1819 dev_reada_block(ni.blk_addr);
1820 name_len = le16_to_cpu(dentry[i].name_len);
1821 if (name_len > 0)
1822 i += (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN - 1;
1823 }
1824 }
1825 }
1826
1827 for (i = 0; i < max;) {
1828 if (test_bit_le(i, bitmap) == 0) {
1829 i++;
1830 continue;
1831 }
1832 if (!IS_VALID_NID(sbi, le32_to_cpu(dentry[i].ino))) {
1833 ASSERT_MSG("Bad dentry 0x%x with invalid NID/ino 0x%x",
1834 i, le32_to_cpu(dentry[i].ino));
1835 if (c.fix_on) {
1836 FIX_MSG("Clear bad dentry 0x%x with bad ino 0x%x",
1837 i, le32_to_cpu(dentry[i].ino));
1838 test_and_clear_bit_le(i, bitmap);
1839 fixed = 1;
1840 }
1841 i++;
1842 continue;
1843 }
1844
1845 ftype = dentry[i].file_type;
1846 if ((ftype <= F2FS_FT_UNKNOWN || ftype > F2FS_FT_LAST_FILE_TYPE)) {
1847 ASSERT_MSG("Bad dentry 0x%x with unexpected ftype 0x%x",
1848 le32_to_cpu(dentry[i].ino), ftype);
1849 if (c.fix_on) {
1850 FIX_MSG("Clear bad dentry 0x%x with bad ftype 0x%x",
1851 i, ftype);
1852 test_and_clear_bit_le(i, bitmap);
1853 fixed = 1;
1854 }
1855 i++;
1856 continue;
1857 }
1858
1859 name_len = le16_to_cpu(dentry[i].name_len);
1860
1861 if (name_len == 0 || name_len > F2FS_NAME_LEN) {
1862 ASSERT_MSG("Bad dentry 0x%x with invalid name_len", i);
1863 if (c.fix_on) {
1864 FIX_MSG("Clear bad dentry 0x%x", i);
1865 test_and_clear_bit_le(i, bitmap);
1866 fixed = 1;
1867 }
1868 i++;
1869 continue;
1870 }
1871 name = calloc(name_len + 1, 1);
1872 ASSERT(name);
1873
1874 memcpy(name, filenames[i], name_len);
1875 slots = (name_len + F2FS_SLOT_LEN - 1) / F2FS_SLOT_LEN;
1876
1877 /* Becareful. 'dentry.file_type' is not imode. */
1878 if (ftype == F2FS_FT_DIR) {
1879 if ((name[0] == '.' && name_len == 1) ||
1880 (name[0] == '.' && name[1] == '.' &&
1881 name_len == 2)) {
1882 ret = __chk_dots_dentries(sbi, casefolded, &dentry[i],
1883 child, name, name_len, &filenames[i],
1884 enc_name);
1885 switch (ret) {
1886 case 1:
1887 fixed = 1;
1888 fallthrough;
1889 case 0:
1890 child->dots++;
1891 break;
1892 }
1893
1894 if (child->dots > 2) {
1895 ASSERT_MSG("More than one '.' or '..', should delete the extra one\n");
1896 nullify_dentry(&dentry[i], i,
1897 &filenames[i], &bitmap);
1898 child->dots--;
1899 fixed = 1;
1900 }
1901
1902 i++;
1903 free(name);
1904 continue;
1905 }
1906 }
1907
1908 if (f2fs_check_hash_code(get_encoding(sbi), casefolded, dentry + i, name, name_len, enc_name))
1909 fixed = 1;
1910
1911 pretty_print_filename(name, name_len, en, enc_name);
1912
1913 if (max == NR_DENTRY_IN_BLOCK) {
1914 ret = f2fs_check_dirent_position(dentry + i, en,
1915 child->pgofs, child->dir_level,
1916 child->p_ino);
1917 if (ret) {
1918 if (c.fix_on) {
1919 FIX_MSG("Clear bad dentry 0x%x", i);
1920 test_and_clear_bit_le(i, bitmap);
1921 fixed = 1;
1922 }
1923 i++;
1924 free(name);
1925 continue;
1926 }
1927 }
1928
1929 DBG(1, "[%3u]-[0x%x] name[%s] len[0x%x] ino[0x%x] type[0x%x]\n",
1930 fsck->dentry_depth, i, en, name_len,
1931 le32_to_cpu(dentry[i].ino),
1932 dentry[i].file_type);
1933
1934 print_dentry(sbi, name, bitmap,
1935 dentry, max, i, last_blk, enc_name);
1936
1937 blk_cnt = 1;
1938 cbc.cnt = 0;
1939 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
1940 child->i_namelen = name_len;
1941 ret = fsck_chk_node_blk(sbi,
1942 NULL, le32_to_cpu(dentry[i].ino),
1943 ftype, TYPE_INODE, &blk_cnt, &cbc, child);
1944
1945 if (ret && c.fix_on) {
1946 int j;
1947
1948 for (j = 0; j < slots; j++)
1949 test_and_clear_bit_le(i + j, bitmap);
1950 FIX_MSG("Unlink [0x%x] - %s len[0x%x], type[0x%x]",
1951 le32_to_cpu(dentry[i].ino),
1952 en, name_len,
1953 dentry[i].file_type);
1954 fixed = 1;
1955 } else if (ret == 0) {
1956 if (ftype == F2FS_FT_DIR)
1957 child->links++;
1958 dentries++;
1959 child->files++;
1960 }
1961
1962 i += slots;
1963 free(name);
1964 }
1965 return fixed ? -1 : dentries;
1966 }
1967
fsck_chk_inline_dentries(struct f2fs_sb_info * sbi,struct f2fs_node * node_blk,struct child_info * child)1968 int fsck_chk_inline_dentries(struct f2fs_sb_info *sbi,
1969 struct f2fs_node *node_blk, struct child_info *child)
1970 {
1971 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
1972 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
1973 struct f2fs_dentry *new_dentry;
1974 struct f2fs_dentry_ptr d;
1975 void *inline_dentry;
1976 int dentries;
1977
1978 inline_dentry = inline_data_addr(node_blk);
1979 ASSERT(inline_dentry != NULL);
1980
1981 make_dentry_ptr(&d, node_blk, inline_dentry, 2);
1982
1983 fsck->dentry_depth++;
1984 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
1985 ASSERT(new_dentry != NULL);
1986
1987 new_dentry->depth = fsck->dentry_depth;
1988 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
1989 cur_dentry->next = new_dentry;
1990 fsck->dentry_end = new_dentry;
1991
1992 dentries = __chk_dentries(sbi, IS_CASEFOLDED(&node_blk->i), child,
1993 d.bitmap, d.dentry, d.filename, d.max, 1,
1994 file_is_encrypt(&node_blk->i));// pass through
1995 if (dentries < 0) {
1996 DBG(1, "[%3d] Inline Dentry Block Fixed hash_codes\n\n",
1997 fsck->dentry_depth);
1998 } else {
1999 DBG(1, "[%3d] Inline Dentry Block Done : "
2000 "dentries:%d in %d slots (len:%d)\n\n",
2001 fsck->dentry_depth, dentries,
2002 d.max, F2FS_NAME_LEN);
2003 }
2004 fsck->dentry = cur_dentry;
2005 fsck->dentry_end = cur_dentry;
2006 cur_dentry->next = NULL;
2007 free(new_dentry);
2008 fsck->dentry_depth--;
2009 return dentries;
2010 }
2011
fsck_chk_dentry_blk(struct f2fs_sb_info * sbi,int casefolded,u32 blk_addr,struct child_info * child,int last_blk,int enc_name,struct f2fs_node * node_blk)2012 int fsck_chk_dentry_blk(struct f2fs_sb_info *sbi, int casefolded, u32 blk_addr,
2013 struct child_info *child, int last_blk, int enc_name,
2014 struct f2fs_node *node_blk)
2015 {
2016 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2017 struct f2fs_dentry_block *de_blk;
2018 struct f2fs_dentry *cur_dentry = fsck->dentry_end;
2019 struct f2fs_dentry *new_dentry;
2020 int dentries, ret;
2021
2022 de_blk = (struct f2fs_dentry_block *)calloc(F2FS_BLKSIZE, 1);
2023 ASSERT(de_blk != NULL);
2024
2025 ret = dev_read_block(de_blk, blk_addr);
2026 ASSERT(ret >= 0);
2027
2028 fsck->dentry_depth++;
2029 new_dentry = calloc(sizeof(struct f2fs_dentry), 1);
2030 ASSERT(new_dentry != NULL);
2031 new_dentry->depth = fsck->dentry_depth;
2032 memcpy(new_dentry->name, child->p_name, F2FS_NAME_LEN);
2033 cur_dentry->next = new_dentry;
2034 fsck->dentry_end = new_dentry;
2035
2036 dentries = __chk_dentries(sbi, casefolded, child,
2037 de_blk->dentry_bitmap,
2038 F2FS_DENTRY_BLOCK_DENTRIES(de_blk), F2FS_DENTRY_BLOCK_FILENAMES(de_blk),
2039 NR_DENTRY_IN_BLOCK, last_blk, enc_name);
2040
2041 if (dentries < 0 && f2fs_dev_is_writable()) {
2042 ret = update_block(sbi, de_blk, &blk_addr, node_blk);
2043 ASSERT(ret >= 0);
2044 DBG(1, "[%3d] Dentry Block [0x%x] Fixed hash_codes\n\n",
2045 fsck->dentry_depth, blk_addr);
2046 } else {
2047 DBG(1, "[%3d] Dentry Block [0x%x] Done : "
2048 "dentries:%d in %d slots (len:%d)\n\n",
2049 fsck->dentry_depth, blk_addr, dentries,
2050 NR_DENTRY_IN_BLOCK, F2FS_NAME_LEN);
2051 }
2052 fsck->dentry = cur_dentry;
2053 fsck->dentry_end = cur_dentry;
2054 cur_dentry->next = NULL;
2055 free(new_dentry);
2056 fsck->dentry_depth--;
2057 free(de_blk);
2058 return 0;
2059 }
2060
fsck_chk_data_blk(struct f2fs_sb_info * sbi,int casefolded,u32 blk_addr,struct child_info * child,int last_blk,enum FILE_TYPE ftype,u32 parent_nid,u16 idx_in_node,u8 ver,int enc_name,struct f2fs_node * node_blk)2061 int fsck_chk_data_blk(struct f2fs_sb_info *sbi, int casefolded,
2062 u32 blk_addr, struct child_info *child, int last_blk,
2063 enum FILE_TYPE ftype, u32 parent_nid, u16 idx_in_node, u8 ver,
2064 int enc_name, struct f2fs_node *node_blk)
2065 {
2066 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2067
2068 /* Is it reserved block? */
2069 if (blk_addr == NEW_ADDR) {
2070 fsck->chk.valid_blk_cnt++;
2071 return 0;
2072 }
2073
2074 if (!f2fs_is_valid_blkaddr(sbi, blk_addr, DATA_GENERIC)) {
2075 ASSERT_MSG("blkaddress is not valid. [0x%x]", blk_addr);
2076 return -EINVAL;
2077 }
2078
2079 if (is_valid_ssa_data_blk(sbi, blk_addr, parent_nid,
2080 idx_in_node, ver)) {
2081 ASSERT_MSG("summary data block is not valid. [0x%x]",
2082 parent_nid);
2083 return -EINVAL;
2084 }
2085
2086 if (f2fs_test_sit_bitmap(sbi, blk_addr) == 0)
2087 ASSERT_MSG("SIT bitmap is 0x0. blk_addr[0x%x]", blk_addr);
2088
2089 if (f2fs_test_main_bitmap(sbi, blk_addr) != 0)
2090 ASSERT_MSG("Duplicated data [0x%x]. pnid[0x%x] idx[0x%x]",
2091 blk_addr, parent_nid, idx_in_node);
2092
2093 fsck->chk.valid_blk_cnt++;
2094
2095 if (ftype == F2FS_FT_DIR) {
2096 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_HOT_DATA);
2097 return fsck_chk_dentry_blk(sbi, casefolded, blk_addr, child,
2098 last_blk, enc_name, node_blk);
2099 } else {
2100 f2fs_set_main_bitmap(sbi, blk_addr, CURSEG_WARM_DATA);
2101 }
2102 return 0;
2103 }
2104
fsck_chk_orphan_node(struct f2fs_sb_info * sbi)2105 int fsck_chk_orphan_node(struct f2fs_sb_info *sbi)
2106 {
2107 u32 blk_cnt = 0;
2108 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
2109 block_t start_blk, orphan_blkaddr, i, j;
2110 struct f2fs_orphan_block *orphan_blk, *new_blk;
2111 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2112 u32 entry_count;
2113
2114 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ORPHAN_PRESENT_FLAG))
2115 return 0;
2116
2117 start_blk = __start_cp_addr(sbi) + 1 + get_sb(cp_payload);
2118 orphan_blkaddr = __start_sum_addr(sbi) - 1 - get_sb(cp_payload);
2119
2120 f2fs_ra_meta_pages(sbi, start_blk, orphan_blkaddr, META_CP);
2121
2122 orphan_blk = calloc(F2FS_BLKSIZE, 1);
2123 ASSERT(orphan_blk);
2124
2125 new_blk = calloc(F2FS_BLKSIZE, 1);
2126 ASSERT(new_blk);
2127
2128 for (i = 0; i < orphan_blkaddr; i++) {
2129 int ret = dev_read_block(orphan_blk, start_blk + i);
2130 u32 new_entry_count = 0;
2131
2132 ASSERT(ret >= 0);
2133 entry_count = le32_to_cpu(F2FS_ORPHAN_BLOCK_FOOTER(orphan_blk)->entry_count);
2134
2135 for (j = 0; j < entry_count; j++) {
2136 nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
2137 DBG(1, "[%3d] ino [0x%x]\n", i, ino);
2138 struct node_info ni;
2139 blk_cnt = 1;
2140 cbc.cnt = 0;
2141 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
2142
2143 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
2144 get_node_info(sbi, ino, &ni);
2145 if (!IS_VALID_NID(sbi, ino) ||
2146 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
2147 DATA_GENERIC)) {
2148 free(orphan_blk);
2149 free(new_blk);
2150 return -EINVAL;
2151 }
2152
2153 continue;
2154 }
2155
2156 ret = fsck_chk_node_blk(sbi, NULL, ino,
2157 F2FS_FT_ORPHAN, TYPE_INODE, &blk_cnt,
2158 &cbc, NULL);
2159 if (!ret)
2160 new_blk->ino[new_entry_count++] =
2161 orphan_blk->ino[j];
2162 else if (ret && c.fix_on)
2163 FIX_MSG("[0x%x] remove from orphan list", ino);
2164 else if (ret)
2165 ASSERT_MSG("[0x%x] wrong orphan inode", ino);
2166 }
2167 if (f2fs_dev_is_writable() && c.fix_on &&
2168 entry_count != new_entry_count) {
2169 F2FS_ORPHAN_BLOCK_FOOTER(new_blk)->entry_count = cpu_to_le32(new_entry_count);
2170 ret = dev_write_block(new_blk, start_blk + i);
2171 ASSERT(ret >= 0);
2172 }
2173 memset(orphan_blk, 0, F2FS_BLKSIZE);
2174 memset(new_blk, 0, F2FS_BLKSIZE);
2175 }
2176 free(orphan_blk);
2177 free(new_blk);
2178
2179 return 0;
2180 }
2181
fsck_chk_quota_node(struct f2fs_sb_info * sbi)2182 int fsck_chk_quota_node(struct f2fs_sb_info *sbi)
2183 {
2184 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2185 enum quota_type qtype;
2186 int ret = 0;
2187 u32 blk_cnt = 0;
2188 struct f2fs_compr_blk_cnt cbc = {0, CHEADER_PGOFS_NONE};
2189
2190 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
2191 cur_qtype = qtype;
2192 if (sb->qf_ino[qtype] == 0)
2193 continue;
2194 nid_t ino = QUOTA_INO(sb, qtype);
2195 struct node_info ni;
2196
2197 DBG(1, "qtype [%d] ino [0x%x]\n", qtype, ino);
2198 blk_cnt = 1;
2199 cbc.cnt = 0;
2200 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
2201
2202 if (c.preen_mode == PREEN_MODE_1 && !c.fix_on) {
2203 get_node_info(sbi, ino, &ni);
2204 if (!IS_VALID_NID(sbi, ino) ||
2205 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
2206 DATA_GENERIC))
2207 return -EINVAL;
2208 continue;
2209 }
2210 ret = fsck_chk_node_blk(sbi, NULL, ino,
2211 F2FS_FT_REG_FILE, TYPE_INODE, &blk_cnt,
2212 &cbc, NULL);
2213 if (ret) {
2214 ASSERT_MSG("wrong quota inode, qtype [%d] ino [0x%x]",
2215 qtype, ino);
2216 qf_szchk_type[qtype] = QF_SZCHK_ERR;
2217 if (c.fix_on)
2218 f2fs_rebuild_qf_inode(sbi, qtype);
2219 }
2220 }
2221 cur_qtype = -1;
2222 return ret;
2223 }
2224
2225 static void fsck_disconnect_file(struct f2fs_sb_info *sbi, nid_t ino,
2226 bool dealloc);
2227
fsck_chk_quota_files(struct f2fs_sb_info * sbi)2228 int fsck_chk_quota_files(struct f2fs_sb_info *sbi)
2229 {
2230 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2231 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2232 enum quota_type qtype;
2233 f2fs_ino_t ino;
2234 int ret = 0;
2235 int needs_writeout;
2236
2237 /* Return if quota feature is disabled */
2238 if (!fsck->qctx)
2239 return 0;
2240
2241 for (qtype = 0; qtype < F2FS_MAX_QUOTAS; qtype++) {
2242 ino = sb->qf_ino[qtype];
2243 if (!ino)
2244 continue;
2245
2246 DBG(1, "Checking Quota file ([%3d] ino [0x%x])\n", qtype, ino);
2247 needs_writeout = 0;
2248 ret = quota_compare_and_update(sbi, qtype, &needs_writeout,
2249 c.preserve_limits);
2250 if (ret == 0 && needs_writeout == 0) {
2251 DBG(1, "OK\n");
2252 continue;
2253 }
2254
2255 /* Something is wrong */
2256 if (c.fix_on) {
2257 DBG(0, "Fixing Quota file ([%3d] ino [0x%x])\n",
2258 qtype, ino);
2259 fsck_disconnect_file(sbi, ino, true);
2260 f2fs_rebuild_qf_inode(sbi, qtype);
2261 f2fs_filesize_update(sbi, ino, 0);
2262 ret = quota_write_inode(sbi, qtype);
2263 if (!ret) {
2264 c.quota_fixed = true;
2265 DBG(1, "OK\n");
2266 } else {
2267 ASSERT_MSG("Unable to write quota file");
2268 }
2269 } else {
2270 ASSERT_MSG("Quota file is missing or invalid"
2271 " quota file content found.");
2272 }
2273 }
2274 return ret;
2275 }
2276
fsck_chk_meta(struct f2fs_sb_info * sbi)2277 int fsck_chk_meta(struct f2fs_sb_info *sbi)
2278 {
2279 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2280 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2281 struct seg_entry *se;
2282 unsigned int sit_valid_segs = 0, sit_node_blks = 0;
2283 unsigned int i;
2284
2285 /* 1. check sit usage with CP: curseg is lost? */
2286 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2287 se = get_seg_entry(sbi, i);
2288 if (se->valid_blocks != 0)
2289 sit_valid_segs++;
2290 else if (IS_CUR_SEGNO(sbi, i)) {
2291 /* curseg has not been written back to device */
2292 MSG(1, "\tInfo: curseg %u is counted in valid segs\n", i);
2293 sit_valid_segs++;
2294 }
2295 if (IS_NODESEG(se->type))
2296 sit_node_blks += se->valid_blocks;
2297 }
2298 if (fsck->chk.sit_free_segs + sit_valid_segs !=
2299 get_usable_seg_count(sbi)) {
2300 ASSERT_MSG("SIT usage does not match: sit_free_segs %u, "
2301 "sit_valid_segs %u, total_segs %u",
2302 fsck->chk.sit_free_segs, sit_valid_segs,
2303 get_usable_seg_count(sbi));
2304 return -EINVAL;
2305 }
2306
2307 /* 2. check node count */
2308 if (fsck->chk.valid_nat_entry_cnt != sit_node_blks) {
2309 ASSERT_MSG("node count does not match: valid_nat_entry_cnt %u,"
2310 " sit_node_blks %u",
2311 fsck->chk.valid_nat_entry_cnt, sit_node_blks);
2312 return -EINVAL;
2313 }
2314
2315 /* 3. check SIT with CP */
2316 if (fsck->chk.sit_free_segs != le32_to_cpu(cp->free_segment_count)) {
2317 ASSERT_MSG("free segs does not match: sit_free_segs %u, "
2318 "free_segment_count %u",
2319 fsck->chk.sit_free_segs,
2320 le32_to_cpu(cp->free_segment_count));
2321 return -EINVAL;
2322 }
2323
2324 /* 4. check NAT with CP */
2325 if (fsck->chk.valid_nat_entry_cnt !=
2326 le32_to_cpu(cp->valid_node_count)) {
2327 ASSERT_MSG("valid node does not match: valid_nat_entry_cnt %u,"
2328 " valid_node_count %u",
2329 fsck->chk.valid_nat_entry_cnt,
2330 le32_to_cpu(cp->valid_node_count));
2331 return -EINVAL;
2332 }
2333
2334 /* 4. check orphan inode simply */
2335 if (fsck_chk_orphan_node(sbi))
2336 return -EINVAL;
2337
2338 /* 5. check nat entry -- must be done before quota check */
2339 for (i = 0; i < fsck->nr_nat_entries; i++) {
2340 u32 blk = le32_to_cpu(fsck->entries[i].block_addr);
2341 nid_t ino = le32_to_cpu(fsck->entries[i].ino);
2342
2343 if (!blk)
2344 /*
2345 * skip entry whose ino is 0, otherwise, we will
2346 * get a negative number by BLKOFF_FROM_MAIN(sbi, blk)
2347 */
2348 continue;
2349
2350 if (!f2fs_is_valid_blkaddr(sbi, blk, DATA_GENERIC)) {
2351 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2352 " is in valid\n",
2353 ino, blk);
2354 return -EINVAL;
2355 }
2356
2357 if (!f2fs_test_sit_bitmap(sbi, blk)) {
2358 MSG(0, "\tError: nat entry[ino %u block_addr 0x%x]"
2359 " not find it in sit_area_bitmap\n",
2360 ino, blk);
2361 return -EINVAL;
2362 }
2363
2364 if (!IS_VALID_NID(sbi, ino)) {
2365 MSG(0, "\tError: nat_entry->ino %u exceeds the range"
2366 " of nat entries %u\n",
2367 ino, fsck->nr_nat_entries);
2368 return -EINVAL;
2369 }
2370
2371 if (!f2fs_test_bit(ino, fsck->nat_area_bitmap)) {
2372 MSG(0, "\tError: nat_entry->ino %u is not set in"
2373 " nat_area_bitmap\n", ino);
2374 return -EINVAL;
2375 }
2376 }
2377
2378 /* 6. check quota inode simply */
2379 if (fsck_chk_quota_node(sbi))
2380 return -EINVAL;
2381
2382 if (fsck->nat_valid_inode_cnt != le32_to_cpu(cp->valid_inode_count)) {
2383 ASSERT_MSG("valid inode does not match: nat_valid_inode_cnt %u,"
2384 " valid_inode_count %u",
2385 fsck->nat_valid_inode_cnt,
2386 le32_to_cpu(cp->valid_inode_count));
2387 return -EINVAL;
2388 }
2389
2390 return 0;
2391 }
2392
fsck_chk_checkpoint(struct f2fs_sb_info * sbi)2393 void fsck_chk_checkpoint(struct f2fs_sb_info *sbi)
2394 {
2395 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2396
2397 if (get_cp(ckpt_flags) & CP_LARGE_NAT_BITMAP_FLAG) {
2398 if (get_cp(checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2399 ASSERT_MSG("Deprecated layout of large_nat_bitmap, "
2400 "chksum_offset:%u", get_cp(checksum_offset));
2401 c.fix_chksum = 1;
2402 }
2403 }
2404 }
2405
fsck_init(struct f2fs_sb_info * sbi)2406 void fsck_init(struct f2fs_sb_info *sbi)
2407 {
2408 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2409 struct f2fs_sm_info *sm_i = SM_I(sbi);
2410
2411 /*
2412 * We build three bitmap for main/sit/nat so that may check consistency
2413 * of filesystem.
2414 * 1. main_area_bitmap will be used to check whether all blocks of main
2415 * area is used or not.
2416 * 2. nat_area_bitmap has bitmap information of used nid in NAT.
2417 * 3. sit_area_bitmap has bitmap information of used main block.
2418 * At Last sequence, we compare main_area_bitmap with sit_area_bitmap.
2419 */
2420 fsck->nr_main_blks = sm_i->main_segments << sbi->log_blocks_per_seg;
2421 fsck->main_area_bitmap_sz = (fsck->nr_main_blks + 7) / 8;
2422 fsck->main_area_bitmap = calloc(fsck->main_area_bitmap_sz, 1);
2423 ASSERT(fsck->main_area_bitmap != NULL);
2424
2425 build_nat_area_bitmap(sbi);
2426
2427 build_sit_area_bitmap(sbi);
2428
2429 ASSERT(tree_mark_size != 0);
2430 tree_mark = calloc(tree_mark_size, 1);
2431 ASSERT(tree_mark != NULL);
2432 fsck->dentry = calloc(sizeof(struct f2fs_dentry), 1);
2433 ASSERT(fsck->dentry != NULL);
2434 memcpy(fsck->dentry->name, "/", 1);
2435 fsck->dentry_end = fsck->dentry;
2436
2437 c.quota_fixed = false;
2438 }
2439
fix_hard_links(struct f2fs_sb_info * sbi)2440 static void fix_hard_links(struct f2fs_sb_info *sbi)
2441 {
2442 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2443 struct hard_link_node *tmp, *node;
2444 struct f2fs_node *node_blk = NULL;
2445 struct node_info ni;
2446 int ret;
2447
2448 if (fsck->hard_link_list_head == NULL)
2449 return;
2450
2451 node_blk = (struct f2fs_node *)calloc(F2FS_BLKSIZE, 1);
2452 ASSERT(node_blk != NULL);
2453
2454 node = fsck->hard_link_list_head;
2455 while (node) {
2456 /* Sanity check */
2457 if (sanity_check_nid(sbi, node->nid, node_blk,
2458 F2FS_FT_MAX, TYPE_INODE, &ni))
2459 FIX_MSG("Failed to fix, rerun fsck.f2fs");
2460
2461 node_blk->i.i_links = cpu_to_le32(node->actual_links);
2462
2463 FIX_MSG("File: 0x%x i_links= 0x%x -> 0x%x",
2464 node->nid, node->links, node->actual_links);
2465
2466 ret = update_block(sbi, node_blk, &ni.blk_addr, NULL);
2467 ASSERT(ret >= 0);
2468 tmp = node;
2469 node = node->next;
2470 free(tmp);
2471 }
2472 free(node_blk);
2473 }
2474
fix_nat_entries(struct f2fs_sb_info * sbi)2475 static void fix_nat_entries(struct f2fs_sb_info *sbi)
2476 {
2477 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2478 u32 i;
2479
2480 for (i = 0; i < fsck->nr_nat_entries; i++)
2481 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
2482 nullify_nat_entry(sbi, i);
2483 }
2484
flush_curseg_sit_entries(struct f2fs_sb_info * sbi)2485 static void flush_curseg_sit_entries(struct f2fs_sb_info *sbi)
2486 {
2487 struct sit_info *sit_i = SIT_I(sbi);
2488 struct f2fs_sit_block *sit_blk;
2489 int i;
2490
2491 sit_blk = calloc(F2FS_BLKSIZE, 1);
2492 ASSERT(sit_blk);
2493 /* update curseg sit entries, since we may change
2494 * a segment type in move_curseg_info
2495 */
2496 for (i = 0; i < NO_CHECK_TYPE; i++) {
2497 struct curseg_info *curseg = CURSEG_I(sbi, i);
2498 struct f2fs_sit_entry *sit;
2499 struct seg_entry *se;
2500
2501 se = get_seg_entry(sbi, curseg->segno);
2502 get_current_sit_page(sbi, curseg->segno, sit_blk);
2503 sit = &sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, curseg->segno)];
2504 sit->vblocks = cpu_to_le16((se->type << SIT_VBLOCKS_SHIFT) |
2505 se->valid_blocks);
2506 rewrite_current_sit_page(sbi, curseg->segno, sit_blk);
2507 }
2508
2509 free(sit_blk);
2510 }
2511
fix_checksum(struct f2fs_sb_info * sbi)2512 static void fix_checksum(struct f2fs_sb_info *sbi)
2513 {
2514 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2515 struct f2fs_nm_info *nm_i = NM_I(sbi);
2516 struct sit_info *sit_i = SIT_I(sbi);
2517 void *bitmap_offset;
2518
2519 if (!c.fix_chksum)
2520 return;
2521
2522 bitmap_offset = cp->sit_nat_version_bitmap + sizeof(__le32);
2523
2524 memcpy(bitmap_offset, nm_i->nat_bitmap, nm_i->bitmap_size);
2525 memcpy(bitmap_offset + nm_i->bitmap_size,
2526 sit_i->sit_bitmap, sit_i->bitmap_size);
2527 }
2528
fix_checkpoint(struct f2fs_sb_info * sbi)2529 static void fix_checkpoint(struct f2fs_sb_info *sbi)
2530 {
2531 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2532 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2533 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
2534 unsigned long long cp_blk_no;
2535 u32 flags = c.alloc_failed ? CP_FSCK_FLAG :
2536 (c.roll_forward ? 0 : CP_UMOUNT_FLAG);
2537 block_t orphan_blks = 0;
2538 block_t cp_blocks;
2539 u32 i;
2540 int ret;
2541 uint32_t crc = 0;
2542
2543 /* should call from fsck */
2544 ASSERT(c.func == FSCK);
2545
2546 if (is_set_ckpt_flags(cp, CP_ORPHAN_PRESENT_FLAG)) {
2547 orphan_blks = __start_sum_addr(sbi) - 1;
2548 flags |= CP_ORPHAN_PRESENT_FLAG;
2549 }
2550 if (is_set_ckpt_flags(cp, CP_TRIMMED_FLAG))
2551 flags |= CP_TRIMMED_FLAG;
2552 if (is_set_ckpt_flags(cp, CP_DISABLED_FLAG))
2553 flags |= CP_DISABLED_FLAG;
2554 if (is_set_ckpt_flags(cp, CP_LARGE_NAT_BITMAP_FLAG)) {
2555 flags |= CP_LARGE_NAT_BITMAP_FLAG;
2556 set_cp(checksum_offset, CP_MIN_CHKSUM_OFFSET);
2557 } else {
2558 set_cp(checksum_offset, CP_CHKSUM_OFFSET);
2559 }
2560
2561 if (flags & CP_UMOUNT_FLAG)
2562 cp_blocks = 8;
2563 else
2564 cp_blocks = 5;
2565
2566 set_cp(cp_pack_total_block_count, cp_blocks +
2567 orphan_blks + get_sb(cp_payload));
2568
2569 flags = update_nat_bits_flags(sb, cp, flags);
2570 flags |= CP_NOCRC_RECOVERY_FLAG;
2571 set_cp(ckpt_flags, flags);
2572
2573 set_cp(free_segment_count, get_free_segments(sbi));
2574 set_cp(valid_block_count, fsck->chk.valid_blk_cnt);
2575 set_cp(valid_node_count, fsck->chk.valid_node_cnt);
2576 set_cp(valid_inode_count, fsck->chk.valid_inode_cnt);
2577
2578 crc = f2fs_checkpoint_chksum(cp);
2579 *((__le32 *)((unsigned char *)cp + get_cp(checksum_offset))) =
2580 cpu_to_le32(crc);
2581
2582 cp_blk_no = get_sb(cp_blkaddr);
2583 if (sbi->cur_cp == 2)
2584 cp_blk_no += 1 << get_sb(log_blocks_per_seg);
2585
2586 ret = dev_write_block(cp, cp_blk_no++);
2587 ASSERT(ret >= 0);
2588
2589 for (i = 0; i < get_sb(cp_payload); i++) {
2590 ret = dev_write_block(((unsigned char *)cp) +
2591 (i + 1) * F2FS_BLKSIZE, cp_blk_no++);
2592 ASSERT(ret >= 0);
2593 }
2594
2595 cp_blk_no += orphan_blks;
2596
2597 for (i = 0; i < NO_CHECK_TYPE; i++) {
2598 struct curseg_info *curseg = CURSEG_I(sbi, i);
2599
2600 if (!(flags & CP_UMOUNT_FLAG) && IS_NODESEG(i))
2601 continue;
2602
2603 ret = dev_write_block(curseg->sum_blk, cp_blk_no++);
2604 ASSERT(ret >= 0);
2605 }
2606
2607 /* Write nat bits */
2608 if (flags & CP_NAT_BITS_FLAG)
2609 write_nat_bits(sbi, sb, cp, sbi->cur_cp);
2610
2611 ret = f2fs_fsync_device();
2612 ASSERT(ret >= 0);
2613
2614 ret = dev_write_block(cp, cp_blk_no++);
2615 ASSERT(ret >= 0);
2616
2617 ret = f2fs_fsync_device();
2618 ASSERT(ret >= 0);
2619
2620 MSG(0, "Info: fix_checkpoint() cur_cp:%d\n", sbi->cur_cp);
2621 }
2622
fix_checkpoints(struct f2fs_sb_info * sbi)2623 static void fix_checkpoints(struct f2fs_sb_info *sbi)
2624 {
2625 /* copy valid checkpoint to its mirror position */
2626 duplicate_checkpoint(sbi);
2627
2628 /* repair checkpoint at CP #0 position */
2629 sbi->cur_cp = 1;
2630 fix_checkpoint(sbi);
2631 }
2632
2633 #ifdef HAVE_LINUX_BLKZONED_H
2634
2635 /*
2636 * Refer valid block map and return offset of the last valid block in the zone.
2637 * Obtain valid block map from SIT and fsync data.
2638 * If there is no valid block in the zone, return -1.
2639 */
last_vblk_off_in_zone(struct f2fs_sb_info * sbi,unsigned int zone_segno)2640 static int last_vblk_off_in_zone(struct f2fs_sb_info *sbi,
2641 unsigned int zone_segno)
2642 {
2643 int s, b;
2644 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
2645 struct seg_entry *se;
2646
2647 for (s = segs_per_zone - 1; s >= 0; s--) {
2648 se = get_seg_entry(sbi, zone_segno + s);
2649
2650 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
2651 if (f2fs_test_bit(b, (const char *)se->cur_valid_map))
2652 return b + (s << sbi->log_blocks_per_seg);
2653 }
2654
2655 return -1;
2656 }
2657
check_curseg_write_pointer(struct f2fs_sb_info * sbi,int type)2658 static int check_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
2659 {
2660 struct curseg_info *curseg = CURSEG_I(sbi, type);
2661 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
2662 struct blk_zone blkz;
2663 block_t cs_block, wp_block;
2664 uint64_t cs_sector, wp_sector;
2665 int i, ret;
2666 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
2667
2668 if (!is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
2669 return -EINVAL;
2670
2671 /* get the device the curseg points to */
2672 cs_block = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
2673 for (i = 0; i < MAX_DEVICES; i++) {
2674 if (!c.devices[i].path)
2675 break;
2676 if (c.devices[i].start_blkaddr <= cs_block &&
2677 cs_block <= c.devices[i].end_blkaddr)
2678 break;
2679 }
2680
2681 if (i >= MAX_DEVICES)
2682 return -EINVAL;
2683
2684 if (c.devices[i].zoned_model != F2FS_ZONED_HM)
2685 return 0;
2686
2687 /* get write pointer position of the zone the curseg points to */
2688 cs_sector = (cs_block - c.devices[i].start_blkaddr)
2689 << log_sectors_per_block;
2690 ret = f2fs_report_zone(i, cs_sector, &blkz);
2691 if (ret)
2692 return ret;
2693
2694 if (blk_zone_type(&blkz) != BLK_ZONE_TYPE_SEQWRITE_REQ)
2695 return 0;
2696
2697 /* check consistency between the curseg and the write pointer */
2698 wp_block = c.devices[i].start_blkaddr +
2699 (blk_zone_wp_sector(&blkz) >> log_sectors_per_block);
2700 wp_sector = blk_zone_wp_sector(&blkz);
2701
2702 if (cs_sector == wp_sector) {
2703 return 0;
2704 } else if (cs_sector > wp_sector) {
2705 MSG(0, "Inconsistent write pointer with curseg %d: "
2706 "curseg %d[0x%x,0x%x] > wp[0x%x,0x%x]\n",
2707 type, type, curseg->segno, curseg->next_blkoff,
2708 GET_SEGNO(sbi, wp_block),
2709 OFFSET_IN_SEG(sbi, wp_block));
2710 if (!c.fix_on)
2711 fsck->chk.wp_inconsistent_zones++;
2712 } else {
2713 MSG(0, "Write pointer goes advance from curseg %d: "
2714 "curseg %d[0x%x,0x%x] wp[0x%x,0x%x]\n",
2715 type, type, curseg->segno, curseg->next_blkoff,
2716 GET_SEGNO(sbi, wp_block), OFFSET_IN_SEG(sbi, wp_block));
2717 }
2718
2719 return -EINVAL;
2720 }
2721
2722 #else
2723
check_curseg_write_pointer(struct f2fs_sb_info * UNUSED (sbi),int UNUSED (type))2724 static int check_curseg_write_pointer(struct f2fs_sb_info *UNUSED(sbi),
2725 int UNUSED(type))
2726 {
2727 return 0;
2728 }
2729
2730 #endif
2731
check_curseg_offset(struct f2fs_sb_info * sbi,int type,bool check_wp)2732 int check_curseg_offset(struct f2fs_sb_info *sbi, int type, bool check_wp)
2733 {
2734 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
2735 struct curseg_info *curseg = CURSEG_I(sbi, type);
2736 struct seg_entry *se;
2737 int j, nblocks;
2738
2739 if ((get_sb(feature) & F2FS_FEATURE_RO) &&
2740 type != CURSEG_HOT_DATA && type != CURSEG_HOT_NODE)
2741 return 0;
2742
2743 if ((curseg->next_blkoff >> 3) >= SIT_VBLOCK_MAP_SIZE) {
2744 ASSERT_MSG("Next block offset:%u is invalid, type:%d",
2745 curseg->next_blkoff, type);
2746 return -EINVAL;
2747 }
2748 se = get_seg_entry(sbi, curseg->segno);
2749 if (f2fs_test_bit(curseg->next_blkoff,
2750 (const char *)se->cur_valid_map)) {
2751 ASSERT_MSG("Next block offset is not free, type:%d", type);
2752 return -EINVAL;
2753 }
2754 if (curseg->alloc_type == SSR)
2755 return 0;
2756
2757 nblocks = sbi->blocks_per_seg;
2758 for (j = curseg->next_blkoff + 1; j < nblocks; j++) {
2759 if (f2fs_test_bit(j, (const char *)se->cur_valid_map)) {
2760 ASSERT_MSG("For LFS curseg, space after .next_blkoff "
2761 "should be unused, type:%d", type);
2762 return -EINVAL;
2763 }
2764 }
2765
2766 if (check_wp && c.zoned_model == F2FS_ZONED_HM)
2767 return check_curseg_write_pointer(sbi, type);
2768
2769 return 0;
2770 }
2771
check_curseg_offsets(struct f2fs_sb_info * sbi,bool check_wp)2772 int check_curseg_offsets(struct f2fs_sb_info *sbi, bool check_wp)
2773 {
2774 int i, ret;
2775
2776 for (i = 0; i < NO_CHECK_TYPE; i++) {
2777 ret = check_curseg_offset(sbi, i, check_wp);
2778 if (ret)
2779 return ret;
2780 }
2781 return 0;
2782 }
2783
fix_curseg_info(struct f2fs_sb_info * sbi,bool check_wp)2784 static void fix_curseg_info(struct f2fs_sb_info *sbi, bool check_wp)
2785 {
2786 int i, need_update = 0;
2787
2788 for (i = 0; i < NO_CHECK_TYPE; i++) {
2789 if (check_curseg_offset(sbi, i, check_wp)) {
2790 update_curseg_info(sbi, i);
2791 need_update = 1;
2792 }
2793 }
2794
2795 if (need_update) {
2796 write_curseg_info(sbi);
2797 flush_curseg_sit_entries(sbi);
2798 }
2799 }
2800
check_sit_types(struct f2fs_sb_info * sbi)2801 int check_sit_types(struct f2fs_sb_info *sbi)
2802 {
2803 unsigned int i;
2804 int err = 0;
2805
2806 for (i = 0; i < MAIN_SEGS(sbi); i++) {
2807 struct seg_entry *se;
2808
2809 se = get_seg_entry(sbi, i);
2810 if (se->orig_type != se->type) {
2811 if (se->orig_type == CURSEG_COLD_DATA &&
2812 se->type <= CURSEG_COLD_DATA) {
2813 se->type = se->orig_type;
2814 } else {
2815 FIX_MSG("Wrong segment type [0x%x] %x -> %x",
2816 i, se->orig_type, se->type);
2817 err = -EINVAL;
2818 }
2819 }
2820 }
2821 return err;
2822 }
2823
fsck_get_lpf(struct f2fs_sb_info * sbi)2824 static struct f2fs_node *fsck_get_lpf(struct f2fs_sb_info *sbi)
2825 {
2826 struct f2fs_node *node;
2827 struct node_info ni;
2828 nid_t lpf_ino;
2829 int err;
2830
2831 /* read root inode first */
2832 node = calloc(F2FS_BLKSIZE, 1);
2833 ASSERT(node);
2834 get_node_info(sbi, F2FS_ROOT_INO(sbi), &ni);
2835 err = dev_read_block(node, ni.blk_addr);
2836 ASSERT(err >= 0);
2837
2838 /* lookup lost+found in root directory */
2839 lpf_ino = f2fs_lookup(sbi, node, (u8 *)LPF, strlen(LPF));
2840 if (lpf_ino) { /* found */
2841 get_node_info(sbi, lpf_ino, &ni);
2842 err = dev_read_block(node, ni.blk_addr);
2843 ASSERT(err >= 0);
2844 DBG(1, "Found lost+found 0x%x at blkaddr [0x%x]\n",
2845 lpf_ino, ni.blk_addr);
2846 if (!S_ISDIR(le16_to_cpu(node->i.i_mode))) {
2847 ASSERT_MSG("lost+found is not directory [0%o]\n",
2848 le16_to_cpu(node->i.i_mode));
2849 /* FIXME: give up? */
2850 goto out;
2851 }
2852
2853 /* Must convert inline dentry before adding inodes */
2854 err = convert_inline_dentry(sbi, node, &ni.blk_addr);
2855 if (err) {
2856 MSG(0, "Convert inline dentry for ino=%x failed.\n",
2857 lpf_ino);
2858 goto out;
2859 }
2860 } else { /* not found, create it */
2861 struct dentry de;
2862
2863 memset(&de, 0, sizeof(de));
2864 de.name = (u8 *) LPF;
2865 de.len = strlen(LPF);
2866 de.mode = 0x41c0;
2867 de.pino = F2FS_ROOT_INO(sbi),
2868 de.file_type = F2FS_FT_DIR,
2869 de.uid = getuid();
2870 de.gid = getgid();
2871 de.mtime = time(NULL);
2872
2873 err = f2fs_mkdir(sbi, &de);
2874 if (err) {
2875 ASSERT_MSG("Failed create lost+found");
2876 goto out;
2877 }
2878
2879 get_node_info(sbi, de.ino, &ni);
2880 err = dev_read_block(node, ni.blk_addr);
2881 ASSERT(err >= 0);
2882 DBG(1, "Create lost+found 0x%x at blkaddr [0x%x]\n",
2883 de.ino, ni.blk_addr);
2884 }
2885
2886 c.lpf_ino = le32_to_cpu(F2FS_NODE_FOOTER(node)->ino);
2887 return node;
2888 out:
2889 free(node);
2890 return NULL;
2891 }
2892
fsck_do_reconnect_file(struct f2fs_sb_info * sbi,struct f2fs_node * lpf,struct f2fs_node * fnode)2893 static int fsck_do_reconnect_file(struct f2fs_sb_info *sbi,
2894 struct f2fs_node *lpf,
2895 struct f2fs_node *fnode)
2896 {
2897 char name[80];
2898 size_t namelen;
2899 nid_t ino = le32_to_cpu(F2FS_NODE_FOOTER(fnode)->ino);
2900 struct node_info ni;
2901 int ftype, ret;
2902
2903 namelen = snprintf(name, 80, "%u", ino);
2904 if (namelen >= 80)
2905 /* ignore terminating '\0', should never happen */
2906 namelen = 79;
2907
2908 if (f2fs_lookup(sbi, lpf, (u8 *)name, namelen)) {
2909 ASSERT_MSG("Name %s already exist in lost+found", name);
2910 return -EEXIST;
2911 }
2912
2913 get_node_info(sbi, le32_to_cpu(F2FS_NODE_FOOTER(lpf)->ino), &ni);
2914 ftype = map_de_type(le16_to_cpu(fnode->i.i_mode));
2915 ret = f2fs_add_link(sbi, lpf, (unsigned char *)name, namelen,
2916 ino, ftype, &ni.blk_addr, 0);
2917 if (ret) {
2918 ASSERT_MSG("Failed to add inode [0x%x] to lost+found", ino);
2919 return -EINVAL;
2920 }
2921
2922 /* update fnode */
2923 memcpy(fnode->i.i_name, name, namelen);
2924 fnode->i.i_namelen = cpu_to_le32(namelen);
2925 fnode->i.i_pino = c.lpf_ino;
2926 get_node_info(sbi, le32_to_cpu(F2FS_NODE_FOOTER(fnode)->ino), &ni);
2927 ret = update_block(sbi, fnode, &ni.blk_addr, NULL);
2928 ASSERT(ret >= 0);
2929
2930 DBG(1, "Reconnect inode [0x%x] to lost+found\n", ino);
2931 return 0;
2932 }
2933
release_inode_cnt(struct f2fs_sb_info * sbi,bool dealloc)2934 static inline void release_inode_cnt(struct f2fs_sb_info *sbi, bool dealloc)
2935 {
2936 F2FS_FSCK(sbi)->chk.valid_inode_cnt--;
2937 if (dealloc)
2938 sbi->total_valid_inode_count--;
2939 }
2940
release_node_cnt(struct f2fs_sb_info * sbi,bool dealloc)2941 static inline void release_node_cnt(struct f2fs_sb_info *sbi, bool dealloc)
2942 {
2943 F2FS_FSCK(sbi)->chk.valid_node_cnt--;
2944 if (dealloc)
2945 sbi->total_valid_node_count--;
2946 }
2947
release_block_cnt(struct f2fs_sb_info * sbi,bool dealloc)2948 static inline void release_block_cnt(struct f2fs_sb_info *sbi, bool dealloc)
2949 {
2950 F2FS_FSCK(sbi)->chk.valid_blk_cnt--;
2951 if (dealloc)
2952 sbi->total_valid_block_count--;
2953 }
2954
release_block(struct f2fs_sb_info * sbi,u64 blkaddr,bool dealloc)2955 static inline void release_block(struct f2fs_sb_info *sbi, u64 blkaddr,
2956 bool dealloc)
2957 {
2958 f2fs_clear_main_bitmap(sbi, blkaddr);
2959 if (dealloc) {
2960 struct seg_entry *se;
2961 u64 offset;
2962
2963 se = get_seg_entry(sbi, GET_SEGNO(sbi, blkaddr));
2964 offset = OFFSET_IN_SEG(sbi, blkaddr);
2965 se->valid_blocks--;
2966 f2fs_clear_bit(offset, (char *)se->cur_valid_map);
2967 if (need_fsync_data_record(sbi))
2968 f2fs_clear_bit(offset, (char *)se->ckpt_valid_map);
2969 se->dirty = 1;
2970 f2fs_clear_sit_bitmap(sbi, blkaddr);
2971 }
2972 }
2973
release_nat_entry(struct f2fs_sb_info * sbi,u32 nid)2974 static inline void release_nat_entry(struct f2fs_sb_info *sbi, u32 nid)
2975 {
2976 nullify_nat_entry(sbi, nid);
2977 F2FS_FSCK(sbi)->chk.valid_nat_entry_cnt--;
2978 }
2979
fsck_disconnect_file_dnode(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,nid_t nid,bool dealloc)2980 static void fsck_disconnect_file_dnode(struct f2fs_sb_info *sbi,
2981 struct f2fs_inode *inode, nid_t nid, bool dealloc)
2982 {
2983 struct f2fs_node *node;
2984 struct node_info ni;
2985 u32 addr;
2986 int i, err;
2987
2988 node = calloc(F2FS_BLKSIZE, 1);
2989 ASSERT(node);
2990
2991 get_node_info(sbi, nid, &ni);
2992 err = dev_read_block(node, ni.blk_addr);
2993 ASSERT(err >= 0);
2994
2995 release_node_cnt(sbi, dealloc);
2996 release_block_cnt(sbi, dealloc);
2997 release_block(sbi, ni.blk_addr, dealloc);
2998
2999 for (i = 0; i < ADDRS_PER_BLOCK(inode); i++) {
3000 addr = le32_to_cpu(node->dn.addr[i]);
3001 if (!addr)
3002 continue;
3003 release_block_cnt(sbi, dealloc);
3004 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
3005 continue;
3006 release_block(sbi, addr, dealloc);
3007 }
3008
3009 if (dealloc)
3010 release_nat_entry(sbi, nid);
3011
3012 free(node);
3013 }
3014
fsck_disconnect_file_idnode(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,nid_t nid,bool dealloc)3015 static void fsck_disconnect_file_idnode(struct f2fs_sb_info *sbi,
3016 struct f2fs_inode *inode, nid_t nid, bool dealloc)
3017 {
3018 struct f2fs_node *node;
3019 struct node_info ni;
3020 nid_t tmp;
3021 int i, err;
3022
3023 node = calloc(F2FS_BLKSIZE, 1);
3024 ASSERT(node);
3025
3026 get_node_info(sbi, nid, &ni);
3027 err = dev_read_block(node, ni.blk_addr);
3028 ASSERT(err >= 0);
3029
3030 release_node_cnt(sbi, dealloc);
3031 release_block_cnt(sbi, dealloc);
3032 release_block(sbi, ni.blk_addr, dealloc);
3033
3034 for (i = 0; i < NIDS_PER_BLOCK; i++) {
3035 tmp = le32_to_cpu(node->in.nid[i]);
3036 if (!tmp)
3037 continue;
3038 fsck_disconnect_file_dnode(sbi, inode, tmp, dealloc);
3039 }
3040
3041 if (dealloc)
3042 release_nat_entry(sbi, nid);
3043
3044 free(node);
3045 }
3046
fsck_disconnect_file_didnode(struct f2fs_sb_info * sbi,struct f2fs_inode * inode,nid_t nid,bool dealloc)3047 static void fsck_disconnect_file_didnode(struct f2fs_sb_info *sbi,
3048 struct f2fs_inode *inode, nid_t nid, bool dealloc)
3049 {
3050 struct f2fs_node *node;
3051 struct node_info ni;
3052 nid_t tmp;
3053 int i, err;
3054
3055 node = calloc(F2FS_BLKSIZE, 1);
3056 ASSERT(node);
3057
3058 get_node_info(sbi, nid, &ni);
3059 err = dev_read_block(node, ni.blk_addr);
3060 ASSERT(err >= 0);
3061
3062 release_node_cnt(sbi, dealloc);
3063 release_block_cnt(sbi, dealloc);
3064 release_block(sbi, ni.blk_addr, dealloc);
3065
3066 for (i = 0; i < NIDS_PER_BLOCK; i++) {
3067 tmp = le32_to_cpu(node->in.nid[i]);
3068 if (!tmp)
3069 continue;
3070 fsck_disconnect_file_idnode(sbi, inode, tmp, dealloc);
3071 }
3072
3073 if (dealloc)
3074 release_nat_entry(sbi, nid);
3075
3076 free(node);
3077 }
3078
fsck_disconnect_file(struct f2fs_sb_info * sbi,nid_t ino,bool dealloc)3079 static void fsck_disconnect_file(struct f2fs_sb_info *sbi, nid_t ino,
3080 bool dealloc)
3081 {
3082 struct f2fs_node *node;
3083 struct node_info ni;
3084 nid_t nid;
3085 int ofs, i, err;
3086
3087 node = calloc(F2FS_BLKSIZE, 1);
3088 ASSERT(node);
3089
3090 get_node_info(sbi, ino, &ni);
3091 err = dev_read_block(node, ni.blk_addr);
3092 ASSERT(err >= 0);
3093
3094 /* clear inode counters */
3095 release_inode_cnt(sbi, dealloc);
3096 release_node_cnt(sbi, dealloc);
3097 release_block_cnt(sbi, dealloc);
3098 release_block(sbi, ni.blk_addr, dealloc);
3099
3100 /* clear xnid counters */
3101 if (node->i.i_xattr_nid) {
3102 nid = le32_to_cpu(node->i.i_xattr_nid);
3103 release_node_cnt(sbi, dealloc);
3104 release_block_cnt(sbi, dealloc);
3105 get_node_info(sbi, nid, &ni);
3106 release_block(sbi, ni.blk_addr, dealloc);
3107
3108 if (dealloc)
3109 release_nat_entry(sbi, nid);
3110 }
3111
3112 /* clear data counters */
3113 if (!(node->i.i_inline & (F2FS_INLINE_DATA | F2FS_INLINE_DENTRY))) {
3114 ofs = get_extra_isize(node);
3115 for (i = 0; i < ADDRS_PER_INODE(&node->i); i++) {
3116 block_t addr = le32_to_cpu(node->i.i_addr[ofs + i]);
3117 if (!addr)
3118 continue;
3119 release_block_cnt(sbi, dealloc);
3120 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
3121 continue;
3122 release_block(sbi, addr, dealloc);
3123 }
3124 }
3125
3126 for (i = 0; i < 5; i++) {
3127 nid = le32_to_cpu(F2FS_INODE_I_NID(&node->i, i));
3128 if (!nid)
3129 continue;
3130
3131 switch (i) {
3132 case 0: /* direct node */
3133 case 1:
3134 fsck_disconnect_file_dnode(sbi, &node->i, nid,
3135 dealloc);
3136 break;
3137 case 2: /* indirect node */
3138 case 3:
3139 fsck_disconnect_file_idnode(sbi, &node->i, nid,
3140 dealloc);
3141 break;
3142 case 4: /* double indirect node */
3143 fsck_disconnect_file_didnode(sbi, &node->i, nid,
3144 dealloc);
3145 break;
3146 }
3147 }
3148
3149 if (dealloc)
3150 release_nat_entry(sbi, ino);
3151
3152 free(node);
3153 }
3154
3155 /*
3156 * Scan unreachable nids and find only regular file inodes. If these files
3157 * are not corrupted, reconnect them to lost+found.
3158 *
3159 * Since all unreachable nodes are already checked, we can allocate new
3160 * blocks safely.
3161 *
3162 * This function returns the number of files been reconnected.
3163 */
fsck_reconnect_file(struct f2fs_sb_info * sbi)3164 static int fsck_reconnect_file(struct f2fs_sb_info *sbi)
3165 {
3166 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3167 struct f2fs_node *lpf_node, *node;
3168 struct node_info ni;
3169 char *reconnect_bitmap;
3170 u32 blk_cnt;
3171 struct f2fs_compr_blk_cnt cbc;
3172 nid_t nid;
3173 int err, cnt = 0, ftype;
3174
3175 node = calloc(F2FS_BLKSIZE, 1);
3176 ASSERT(node);
3177
3178 reconnect_bitmap = calloc(fsck->nat_area_bitmap_sz, 1);
3179 ASSERT(reconnect_bitmap);
3180
3181 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
3182 if (f2fs_test_bit(nid, fsck->nat_area_bitmap)) {
3183 if (is_qf_ino(F2FS_RAW_SUPER(sbi), nid)) {
3184 DBG(1, "Not support quota inode [0x%x]\n",
3185 nid);
3186 continue;
3187 }
3188
3189 get_node_info(sbi, nid, &ni);
3190 err = dev_read_block(node, ni.blk_addr);
3191 ASSERT(err >= 0);
3192
3193 /* reconnection will restore these nodes if needed */
3194 if (!IS_INODE(node)) {
3195 DBG(1, "Not support non-inode node [0x%x]\n",
3196 nid);
3197 continue;
3198 }
3199
3200 if (S_ISDIR(le16_to_cpu(node->i.i_mode))) {
3201 DBG(1, "Not support directory inode [0x%x]\n",
3202 nid);
3203 continue;
3204 }
3205
3206 ftype = map_de_type(le16_to_cpu(node->i.i_mode));
3207 if (sanity_check_nid(sbi, nid, node, ftype,
3208 TYPE_INODE, &ni)) {
3209 ASSERT_MSG("Invalid nid [0x%x]\n", nid);
3210 continue;
3211 }
3212
3213 DBG(1, "Check inode 0x%x\n", nid);
3214 blk_cnt = 1;
3215 cbc.cnt = 0;
3216 cbc.cheader_pgofs = CHEADER_PGOFS_NONE;
3217 fsck_chk_inode_blk(sbi, nid, ftype, node,
3218 &blk_cnt, &cbc, &ni, NULL);
3219
3220 f2fs_set_bit(nid, reconnect_bitmap);
3221 }
3222 }
3223
3224 lpf_node = fsck_get_lpf(sbi);
3225 if (!lpf_node)
3226 goto out;
3227
3228 for (nid = 0; nid < fsck->nr_nat_entries; nid++) {
3229 if (f2fs_test_bit(nid, reconnect_bitmap)) {
3230 get_node_info(sbi, nid, &ni);
3231 err = dev_read_block(node, ni.blk_addr);
3232 ASSERT(err >= 0);
3233
3234 if (fsck_do_reconnect_file(sbi, lpf_node, node)) {
3235 DBG(1, "Failed to reconnect inode [0x%x]\n",
3236 nid);
3237 fsck_disconnect_file(sbi, nid, false);
3238 continue;
3239 }
3240
3241 quota_add_inode_usage(fsck->qctx, nid, &node->i);
3242
3243 DBG(1, "Reconnected inode [0x%x] to lost+found\n", nid);
3244 cnt++;
3245 }
3246 }
3247
3248 out:
3249 free(node);
3250 free(lpf_node);
3251 free(reconnect_bitmap);
3252 return cnt;
3253 }
3254
3255 #ifdef HAVE_LINUX_BLKZONED_H
3256
3257 struct write_pointer_check_data {
3258 struct f2fs_sb_info *sbi;
3259 int dev_index;
3260 };
3261
chk_and_fix_wp_with_sit(int UNUSED (i),void * blkzone,void * opaque)3262 static int chk_and_fix_wp_with_sit(int UNUSED(i), void *blkzone, void *opaque)
3263 {
3264 struct blk_zone *blkz = (struct blk_zone *)blkzone;
3265 struct write_pointer_check_data *wpd = opaque;
3266 struct f2fs_sb_info *sbi = wpd->sbi;
3267 struct device_info *dev = c.devices + wpd->dev_index;
3268 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3269 block_t zone_block, wp_block, wp_blkoff;
3270 unsigned int zone_segno, wp_segno;
3271 int i, ret, last_valid_blkoff;
3272 int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
3273 unsigned int segs_per_zone = sbi->segs_per_sec * sbi->secs_per_zone;
3274
3275 if (blk_zone_conv(blkz))
3276 return 0;
3277
3278 zone_block = dev->start_blkaddr
3279 + (blk_zone_sector(blkz) >> log_sectors_per_block);
3280 zone_segno = GET_SEGNO(sbi, zone_block);
3281 if (zone_segno >= MAIN_SEGS(sbi))
3282 return 0;
3283
3284 wp_block = dev->start_blkaddr
3285 + (blk_zone_wp_sector(blkz) >> log_sectors_per_block);
3286 wp_segno = GET_SEGNO(sbi, wp_block);
3287 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
3288
3289 last_valid_blkoff = last_vblk_off_in_zone(sbi, zone_segno);
3290
3291 /* if a curseg points to the zone, do not finishing zone */
3292 for (i = 0; i < NO_CHECK_TYPE; i++) {
3293 struct curseg_info *cs = CURSEG_I(sbi, i);
3294
3295 if (zone_segno <= cs->segno &&
3296 cs->segno < zone_segno + segs_per_zone) {
3297 /*
3298 * When there is no valid block in the zone, check
3299 * write pointer is at zone start. If not, reset
3300 * the write pointer.
3301 */
3302 if (last_valid_blkoff < 0 &&
3303 blk_zone_wp_sector(blkz) != blk_zone_sector(blkz)) {
3304 if (!c.fix_on) {
3305 MSG(0, "Inconsistent write pointer: "
3306 "wp[0x%x,0x%x]\n",
3307 wp_segno, wp_blkoff);
3308 fsck->chk.wp_inconsistent_zones++;
3309 return 0;
3310 }
3311
3312 FIX_MSG("Reset write pointer of zone at "
3313 "segment 0x%x", zone_segno);
3314 ret = f2fs_reset_zone(wpd->dev_index, blkz);
3315 if (ret) {
3316 printf("[FSCK] Write pointer reset "
3317 "failed: %s\n", dev->path);
3318 return ret;
3319 }
3320 fsck->chk.wp_fixed = 1;
3321 }
3322 return 0;
3323 }
3324 }
3325
3326 /*
3327 * If valid blocks exist in the zone beyond the write pointer, it
3328 * is a bug. No need to fix because the zone is not selected for the
3329 * write. Just report it.
3330 */
3331 if (last_valid_blkoff + zone_block > wp_block) {
3332 MSG(0, "Unexpected invalid write pointer: wp[0x%x,0x%x]\n",
3333 wp_segno, wp_blkoff);
3334 if (!c.fix_on)
3335 fsck->chk.wp_inconsistent_zones++;
3336 }
3337
3338 if (!c.fix_on)
3339 return 0;
3340
3341 ret = f2fs_finish_zone(wpd->dev_index, blkz);
3342 if (ret) {
3343 u64 fill_sects = blk_zone_length(blkz) -
3344 (blk_zone_wp_sector(blkz) - blk_zone_sector(blkz));
3345 printf("[FSCK] Finishing zone failed: %s\n", dev->path);
3346 ret = dev_fill(NULL, wp_block * F2FS_BLKSIZE,
3347 (fill_sects >> log_sectors_per_block) * F2FS_BLKSIZE);
3348 if (ret)
3349 printf("[FSCK] Fill up zone failed: %s\n", dev->path);
3350 }
3351
3352 if (!ret)
3353 fsck->chk.wp_fixed = 1;
3354 return ret;
3355 }
3356
fix_wp_sit_alignment(struct f2fs_sb_info * sbi)3357 static void fix_wp_sit_alignment(struct f2fs_sb_info *sbi)
3358 {
3359 unsigned int i;
3360 struct write_pointer_check_data wpd = { sbi, 0 };
3361
3362 if (c.zoned_model != F2FS_ZONED_HM)
3363 return;
3364
3365 for (i = 0; i < MAX_DEVICES; i++) {
3366 if (!c.devices[i].path)
3367 break;
3368 if (c.devices[i].zoned_model != F2FS_ZONED_HM)
3369 break;
3370
3371 wpd.dev_index = i;
3372 if (f2fs_report_zones(i, chk_and_fix_wp_with_sit, &wpd)) {
3373 printf("[FSCK] Write pointer check failed: %s\n",
3374 c.devices[i].path);
3375 return;
3376 }
3377 }
3378 }
3379
3380 #else
3381
fix_wp_sit_alignment(struct f2fs_sb_info * UNUSED (sbi))3382 static void fix_wp_sit_alignment(struct f2fs_sb_info *UNUSED(sbi))
3383 {
3384 return;
3385 }
3386
3387 #endif
3388
3389 /*
3390 * Check and fix consistency with write pointers at the beginning of
3391 * fsck so that following writes by fsck do not fail.
3392 */
fsck_chk_and_fix_write_pointers(struct f2fs_sb_info * sbi)3393 void fsck_chk_and_fix_write_pointers(struct f2fs_sb_info *sbi)
3394 {
3395 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3396
3397 if (c.zoned_model != F2FS_ZONED_HM)
3398 return;
3399
3400 if (c.fix_on) {
3401 flush_nat_journal_entries(sbi);
3402 flush_sit_journal_entries(sbi);
3403
3404 if (check_curseg_offsets(sbi, true))
3405 fix_curseg_info(sbi, true);
3406
3407 fix_wp_sit_alignment(sbi);
3408 fsck->chk.wp_fixed = 1;
3409 }
3410 }
3411
fsck_chk_curseg_info(struct f2fs_sb_info * sbi)3412 int fsck_chk_curseg_info(struct f2fs_sb_info *sbi)
3413 {
3414 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3415 struct curseg_info *curseg;
3416 struct seg_entry *se;
3417 struct f2fs_summary_block *sum_blk;
3418 int i, ret = 0;
3419
3420 for (i = 0; i < NO_CHECK_TYPE; i++) {
3421 curseg = CURSEG_I(sbi, i);
3422 se = get_seg_entry(sbi, curseg->segno);
3423 sum_blk = curseg->sum_blk;
3424
3425 if ((get_sb(feature) & F2FS_FEATURE_RO) &&
3426 (i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE))
3427 continue;
3428
3429 if (se->type != i) {
3430 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3431 "type(SIT) [%d]", i, curseg->segno,
3432 se->type);
3433 if (c.fix_on || c.preen_mode)
3434 se->type = i;
3435 ret = -1;
3436 }
3437 if (i <= CURSEG_COLD_DATA && IS_SUM_DATA_SEG(sum_blk)) {
3438 continue;
3439 } else if (i > CURSEG_COLD_DATA && IS_SUM_NODE_SEG(sum_blk)) {
3440 continue;
3441 } else {
3442 ASSERT_MSG("Incorrect curseg [%d]: segno [0x%x] "
3443 "type(SSA) [%d]", i, curseg->segno,
3444 F2FS_SUMMARY_BLOCK_FOOTER(sum_blk)->entry_type);
3445 if (c.fix_on || c.preen_mode)
3446 F2FS_SUMMARY_BLOCK_FOOTER(sum_blk)->entry_type =
3447 i <= CURSEG_COLD_DATA ?
3448 SUM_TYPE_DATA : SUM_TYPE_NODE;
3449 ret = -1;
3450 }
3451 }
3452
3453 return ret;
3454 }
3455
fsck_verify(struct f2fs_sb_info * sbi)3456 int fsck_verify(struct f2fs_sb_info *sbi)
3457 {
3458 unsigned int i = 0;
3459 int ret = 0;
3460 int force = 0;
3461 u32 nr_unref_nid = 0;
3462 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3463 struct hard_link_node *node = NULL;
3464 bool verify_failed = false;
3465 uint64_t max_blks, data_secs, node_secs, free_blks;
3466
3467 if (c.show_file_map)
3468 return 0;
3469
3470 printf("\n");
3471
3472 if (c.zoned_model == F2FS_ZONED_HM) {
3473 printf("[FSCK] Write pointers consistency ");
3474 if (fsck->chk.wp_inconsistent_zones == 0x0) {
3475 printf(" [Ok..]\n");
3476 } else {
3477 printf(" [Fail] [0x%x]\n",
3478 fsck->chk.wp_inconsistent_zones);
3479 verify_failed = true;
3480 }
3481
3482 if (fsck->chk.wp_fixed && c.fix_on)
3483 force = 1;
3484 }
3485
3486 if (c.feature & F2FS_FEATURE_LOST_FOUND) {
3487 for (i = 0; i < fsck->nr_nat_entries; i++)
3488 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0)
3489 break;
3490 if (i < fsck->nr_nat_entries) {
3491 i = fsck_reconnect_file(sbi);
3492 printf("[FSCK] Reconnect %u files to lost+found\n", i);
3493 }
3494 }
3495
3496 for (i = 0; i < fsck->nr_nat_entries; i++) {
3497 if (f2fs_test_bit(i, fsck->nat_area_bitmap) != 0) {
3498 struct node_info ni;
3499
3500 get_node_info(sbi, i, &ni);
3501 printf("NID[0x%x] is unreachable, blkaddr:0x%x\n",
3502 i, ni.blk_addr);
3503 nr_unref_nid++;
3504 }
3505 }
3506
3507 if (fsck->hard_link_list_head != NULL) {
3508 node = fsck->hard_link_list_head;
3509 while (node) {
3510 printf("NID[0x%x] has [0x%x] more unreachable links\n",
3511 node->nid, node->links);
3512 node = node->next;
3513 }
3514 c.bug_on = 1;
3515 }
3516
3517 data_secs = round_up(sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3518 node_secs = round_up(sbi->total_valid_block_count -
3519 sbi->total_valid_node_count, BLKS_PER_SEC(sbi));
3520 free_blks = (sbi->total_sections - data_secs - node_secs) *
3521 BLKS_PER_SEC(sbi);
3522 max_blks = SM_I(sbi)->main_blkaddr + (data_secs + node_secs) *
3523 BLKS_PER_SEC(sbi);
3524 printf("[FSCK] Max image size: %"PRIu64" MB, Free space: %"PRIu64" MB\n",
3525 max_blks >> (20 - F2FS_BLKSIZE_BITS),
3526 free_blks >> (20 - F2FS_BLKSIZE_BITS));
3527 printf("[FSCK] Unreachable nat entries ");
3528 if (nr_unref_nid == 0x0) {
3529 printf(" [Ok..] [0x%x]\n", nr_unref_nid);
3530 } else {
3531 printf(" [Fail] [0x%x]\n", nr_unref_nid);
3532 verify_failed = true;
3533 }
3534
3535 printf("[FSCK] SIT valid block bitmap checking ");
3536 if (memcmp(fsck->sit_area_bitmap, fsck->main_area_bitmap,
3537 fsck->sit_area_bitmap_sz) == 0x0) {
3538 printf("[Ok..]\n");
3539 } else {
3540 printf("[Fail]\n");
3541 verify_failed = true;
3542 }
3543
3544 printf("[FSCK] Hard link checking for regular file ");
3545 if (fsck->hard_link_list_head == NULL) {
3546 printf(" [Ok..] [0x%x]\n", fsck->chk.multi_hard_link_files);
3547 } else {
3548 printf(" [Fail] [0x%x]\n", fsck->chk.multi_hard_link_files);
3549 verify_failed = true;
3550 }
3551
3552 printf("[FSCK] valid_block_count matching with CP ");
3553 if (sbi->total_valid_block_count == fsck->chk.valid_blk_cnt) {
3554 printf(" [Ok..] [0x%x]\n", (u32)fsck->chk.valid_blk_cnt);
3555 } else {
3556 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_block_count,
3557 (u32)fsck->chk.valid_blk_cnt);
3558 verify_failed = true;
3559 }
3560
3561 printf("[FSCK] valid_node_count matching with CP (de lookup) ");
3562 if (sbi->total_valid_node_count == fsck->chk.valid_node_cnt) {
3563 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_node_cnt);
3564 } else {
3565 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_node_count,
3566 fsck->chk.valid_node_cnt);
3567 verify_failed = true;
3568 }
3569
3570 printf("[FSCK] valid_node_count matching with CP (nat lookup)");
3571 if (sbi->total_valid_node_count == fsck->chk.valid_nat_entry_cnt) {
3572 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_nat_entry_cnt);
3573 } else {
3574 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_node_count,
3575 fsck->chk.valid_nat_entry_cnt);
3576 verify_failed = true;
3577 }
3578
3579 printf("[FSCK] valid_inode_count matched with CP ");
3580 if (sbi->total_valid_inode_count == fsck->chk.valid_inode_cnt) {
3581 printf(" [Ok..] [0x%x]\n", fsck->chk.valid_inode_cnt);
3582 } else {
3583 printf(" [Fail] [0x%x, 0x%x]\n", sbi->total_valid_inode_count,
3584 fsck->chk.valid_inode_cnt);
3585 verify_failed = true;
3586 }
3587
3588 printf("[FSCK] free segment_count matched with CP ");
3589 if (le32_to_cpu(F2FS_CKPT(sbi)->free_segment_count) ==
3590 fsck->chk.sit_free_segs) {
3591 printf(" [Ok..] [0x%x]\n", fsck->chk.sit_free_segs);
3592 } else {
3593 printf(" [Fail] [0x%x, 0x%x]\n",
3594 le32_to_cpu(F2FS_CKPT(sbi)->free_segment_count),
3595 fsck->chk.sit_free_segs);
3596 verify_failed = true;
3597 }
3598
3599 printf("[FSCK] next block offset is free ");
3600 if (check_curseg_offsets(sbi, false) == 0) {
3601 printf(" [Ok..]\n");
3602 } else {
3603 printf(" [Fail]\n");
3604 verify_failed = true;
3605 }
3606
3607 printf("[FSCK] fixing SIT types\n");
3608 if (check_sit_types(sbi) != 0)
3609 force = 1;
3610
3611 printf("[FSCK] other corrupted bugs ");
3612 if (c.bug_on == 0) {
3613 printf(" [Ok..]\n");
3614 } else {
3615 printf(" [Fail]\n");
3616 ret = EXIT_ERR_CODE;
3617 }
3618
3619 if (verify_failed) {
3620 ret = EXIT_ERR_CODE;
3621 c.bug_on = 1;
3622 }
3623
3624 #ifndef WITH_ANDROID
3625 if (nr_unref_nid && !c.ro) {
3626 char ans[255] = {0};
3627 int res;
3628
3629 printf("\nDo you want to restore lost files into ./lost_found/? [Y/N] ");
3630 res = scanf("%s", ans);
3631 ASSERT(res >= 0);
3632 if (!strcasecmp(ans, "y")) {
3633 for (i = 0; i < fsck->nr_nat_entries; i++) {
3634 if (f2fs_test_bit(i, fsck->nat_area_bitmap))
3635 dump_node(sbi, i, 1, NULL, 1, 0);
3636 }
3637 }
3638 }
3639 #endif
3640
3641 /* fix global metadata */
3642 if (force || (c.fix_on && f2fs_dev_is_writable())) {
3643 struct f2fs_checkpoint *cp = F2FS_CKPT(sbi);
3644 struct f2fs_super_block *sb = F2FS_RAW_SUPER(sbi);
3645
3646 if (force || c.bug_on || c.bug_nat_bits || c.quota_fixed) {
3647 if (c.zoned_model != F2FS_ZONED_HM) {
3648 /* flush nats to write_nit_bits below */
3649 flush_journal_entries(sbi);
3650 }
3651 fix_hard_links(sbi);
3652 fix_nat_entries(sbi);
3653 rewrite_sit_area_bitmap(sbi);
3654 if (c.zoned_model == F2FS_ZONED_HM) {
3655 struct curseg_info *curseg;
3656 u64 ssa_blk;
3657
3658 for (i = 0; i < NO_CHECK_TYPE; i++) {
3659 curseg = CURSEG_I(sbi, i);
3660 ssa_blk = GET_SUM_BLKADDR(sbi,
3661 curseg->segno);
3662 ret = dev_write_block(curseg->sum_blk,
3663 ssa_blk);
3664 ASSERT(ret >= 0);
3665 }
3666 if (c.roll_forward)
3667 restore_curseg_warm_node_info(sbi);
3668 write_curseg_info(sbi);
3669 } else {
3670 fix_curseg_info(sbi, false);
3671 }
3672 fix_checksum(sbi);
3673 fix_checkpoints(sbi);
3674 } else if (is_set_ckpt_flags(cp, CP_FSCK_FLAG) ||
3675 is_set_ckpt_flags(cp, CP_QUOTA_NEED_FSCK_FLAG)) {
3676 write_checkpoints(sbi);
3677 }
3678
3679 if (c.abnormal_stop)
3680 memset(sb->s_stop_reason, 0, MAX_STOP_REASON);
3681
3682 if (c.fs_errors)
3683 memset(sb->s_errors, 0, MAX_F2FS_ERRORS);
3684
3685 if (c.abnormal_stop || c.fs_errors)
3686 update_superblock(sb, SB_MASK_ALL);
3687
3688 /* to return FSCK_ERROR_CORRECTED */
3689 ret = 0;
3690 }
3691 return ret;
3692 }
3693
fsck_free(struct f2fs_sb_info * sbi)3694 void fsck_free(struct f2fs_sb_info *sbi)
3695 {
3696 struct f2fs_fsck *fsck = F2FS_FSCK(sbi);
3697
3698 if (fsck->qctx)
3699 quota_release_context(&fsck->qctx);
3700
3701 if (fsck->main_area_bitmap)
3702 free(fsck->main_area_bitmap);
3703
3704 if (fsck->nat_area_bitmap)
3705 free(fsck->nat_area_bitmap);
3706
3707 if (fsck->sit_area_bitmap)
3708 free(fsck->sit_area_bitmap);
3709
3710 if (fsck->entries)
3711 free(fsck->entries);
3712
3713 if (tree_mark)
3714 free(tree_mark);
3715
3716 while (fsck->dentry) {
3717 struct f2fs_dentry *dentry = fsck->dentry;
3718
3719 fsck->dentry = fsck->dentry->next;
3720 free(dentry);
3721 }
3722 }
3723