1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) Qu Wenruo 2017. All rights reserved.
4 */
5
6 /*
7 * The module is used to catch unexpected/corrupted tree block data.
8 * Such behavior can be caused either by a fuzzed image or bugs.
9 *
10 * The objective is to do leaf/node validation checks when tree block is read
11 * from disk, and check *every* possible member, so other code won't
12 * need to checking them again.
13 *
14 * Due to the potential and unwanted damage, every checker needs to be
15 * carefully reviewed otherwise so it does not prevent mount of valid images.
16 */
17
18 #include "ctree.h"
19 #include "tree-checker.h"
20 #include "disk-io.h"
21 #include "compression.h"
22 #include "volumes.h"
23
24 /*
25 * Error message should follow the following format:
26 * corrupt <type>: <identifier>, <reason>[, <bad_value>]
27 *
28 * @type: leaf or node
29 * @identifier: the necessary info to locate the leaf/node.
30 * It's recommened to decode key.objecitd/offset if it's
31 * meaningful.
32 * @reason: describe the error
33 * @bad_value: optional, it's recommened to output bad value and its
34 * expected value (range).
35 *
36 * Since comma is used to separate the components, only space is allowed
37 * inside each component.
38 */
39
40 /*
41 * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
42 * Allows callers to customize the output.
43 */
44 __printf(4, 5)
45 __cold
generic_err(const struct btrfs_fs_info * fs_info,const struct extent_buffer * eb,int slot,const char * fmt,...)46 static void generic_err(const struct btrfs_fs_info *fs_info,
47 const struct extent_buffer *eb, int slot,
48 const char *fmt, ...)
49 {
50 struct va_format vaf;
51 va_list args;
52
53 va_start(args, fmt);
54
55 vaf.fmt = fmt;
56 vaf.va = &args;
57
58 btrfs_crit(fs_info,
59 "corrupt %s: root=%llu block=%llu slot=%d, %pV",
60 btrfs_header_level(eb) == 0 ? "leaf" : "node",
61 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
62 va_end(args);
63 }
64
65 /*
66 * Customized reporter for extent data item, since its key objectid and
67 * offset has its own meaning.
68 */
69 __printf(4, 5)
70 __cold
file_extent_err(const struct btrfs_fs_info * fs_info,const struct extent_buffer * eb,int slot,const char * fmt,...)71 static void file_extent_err(const struct btrfs_fs_info *fs_info,
72 const struct extent_buffer *eb, int slot,
73 const char *fmt, ...)
74 {
75 struct btrfs_key key;
76 struct va_format vaf;
77 va_list args;
78
79 btrfs_item_key_to_cpu(eb, &key, slot);
80 va_start(args, fmt);
81
82 vaf.fmt = fmt;
83 vaf.va = &args;
84
85 btrfs_crit(fs_info,
86 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
87 btrfs_header_level(eb) == 0 ? "leaf" : "node",
88 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
89 key.objectid, key.offset, &vaf);
90 va_end(args);
91 }
92
93 /*
94 * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
95 * Else return 1
96 */
97 #define CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, name, alignment) \
98 ({ \
99 if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
100 file_extent_err((fs_info), (leaf), (slot), \
101 "invalid %s for file extent, have %llu, should be aligned to %u", \
102 (#name), btrfs_file_extent_##name((leaf), (fi)), \
103 (alignment)); \
104 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \
105 })
106
check_extent_data_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)107 static int check_extent_data_item(struct btrfs_fs_info *fs_info,
108 struct extent_buffer *leaf,
109 struct btrfs_key *key, int slot)
110 {
111 struct btrfs_file_extent_item *fi;
112 u32 sectorsize = fs_info->sectorsize;
113 u32 item_size = btrfs_item_size_nr(leaf, slot);
114
115 if (!IS_ALIGNED(key->offset, sectorsize)) {
116 file_extent_err(fs_info, leaf, slot,
117 "unaligned file_offset for file extent, have %llu should be aligned to %u",
118 key->offset, sectorsize);
119 return -EUCLEAN;
120 }
121
122 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
123
124 if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
125 file_extent_err(fs_info, leaf, slot,
126 "invalid type for file extent, have %u expect range [0, %u]",
127 btrfs_file_extent_type(leaf, fi),
128 BTRFS_FILE_EXTENT_TYPES);
129 return -EUCLEAN;
130 }
131
132 /*
133 * Support for new compression/encrption must introduce incompat flag,
134 * and must be caught in open_ctree().
135 */
136 if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
137 file_extent_err(fs_info, leaf, slot,
138 "invalid compression for file extent, have %u expect range [0, %u]",
139 btrfs_file_extent_compression(leaf, fi),
140 BTRFS_COMPRESS_TYPES);
141 return -EUCLEAN;
142 }
143 if (btrfs_file_extent_encryption(leaf, fi)) {
144 file_extent_err(fs_info, leaf, slot,
145 "invalid encryption for file extent, have %u expect 0",
146 btrfs_file_extent_encryption(leaf, fi));
147 return -EUCLEAN;
148 }
149 if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
150 /* Inline extent must have 0 as key offset */
151 if (key->offset) {
152 file_extent_err(fs_info, leaf, slot,
153 "invalid file_offset for inline file extent, have %llu expect 0",
154 key->offset);
155 return -EUCLEAN;
156 }
157
158 /* Compressed inline extent has no on-disk size, skip it */
159 if (btrfs_file_extent_compression(leaf, fi) !=
160 BTRFS_COMPRESS_NONE)
161 return 0;
162
163 /* Uncompressed inline extent size must match item size */
164 if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
165 btrfs_file_extent_ram_bytes(leaf, fi)) {
166 file_extent_err(fs_info, leaf, slot,
167 "invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
168 item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
169 btrfs_file_extent_ram_bytes(leaf, fi));
170 return -EUCLEAN;
171 }
172 return 0;
173 }
174
175 /* Regular or preallocated extent has fixed item size */
176 if (item_size != sizeof(*fi)) {
177 file_extent_err(fs_info, leaf, slot,
178 "invalid item size for reg/prealloc file extent, have %u expect %zu",
179 item_size, sizeof(*fi));
180 return -EUCLEAN;
181 }
182 if (CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, ram_bytes, sectorsize) ||
183 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_bytenr, sectorsize) ||
184 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, disk_num_bytes, sectorsize) ||
185 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, offset, sectorsize) ||
186 CHECK_FE_ALIGNED(fs_info, leaf, slot, fi, num_bytes, sectorsize))
187 return -EUCLEAN;
188 return 0;
189 }
190
check_csum_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)191 static int check_csum_item(struct btrfs_fs_info *fs_info,
192 struct extent_buffer *leaf, struct btrfs_key *key,
193 int slot)
194 {
195 u32 sectorsize = fs_info->sectorsize;
196 u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
197
198 if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
199 generic_err(fs_info, leaf, slot,
200 "invalid key objectid for csum item, have %llu expect %llu",
201 key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
202 return -EUCLEAN;
203 }
204 if (!IS_ALIGNED(key->offset, sectorsize)) {
205 generic_err(fs_info, leaf, slot,
206 "unaligned key offset for csum item, have %llu should be aligned to %u",
207 key->offset, sectorsize);
208 return -EUCLEAN;
209 }
210 if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
211 generic_err(fs_info, leaf, slot,
212 "unaligned item size for csum item, have %u should be aligned to %u",
213 btrfs_item_size_nr(leaf, slot), csumsize);
214 return -EUCLEAN;
215 }
216 return 0;
217 }
218
219 /*
220 * Customized reported for dir_item, only important new info is key->objectid,
221 * which represents inode number
222 */
223 __printf(4, 5)
224 __cold
dir_item_err(const struct btrfs_fs_info * fs_info,const struct extent_buffer * eb,int slot,const char * fmt,...)225 static void dir_item_err(const struct btrfs_fs_info *fs_info,
226 const struct extent_buffer *eb, int slot,
227 const char *fmt, ...)
228 {
229 struct btrfs_key key;
230 struct va_format vaf;
231 va_list args;
232
233 btrfs_item_key_to_cpu(eb, &key, slot);
234 va_start(args, fmt);
235
236 vaf.fmt = fmt;
237 vaf.va = &args;
238
239 btrfs_crit(fs_info,
240 "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
241 btrfs_header_level(eb) == 0 ? "leaf" : "node",
242 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
243 key.objectid, &vaf);
244 va_end(args);
245 }
246
check_dir_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)247 static int check_dir_item(struct btrfs_fs_info *fs_info,
248 struct extent_buffer *leaf,
249 struct btrfs_key *key, int slot)
250 {
251 struct btrfs_dir_item *di;
252 u32 item_size = btrfs_item_size_nr(leaf, slot);
253 u32 cur = 0;
254
255 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
256 while (cur < item_size) {
257 u32 name_len;
258 u32 data_len;
259 u32 max_name_len;
260 u32 total_size;
261 u32 name_hash;
262 u8 dir_type;
263
264 /* header itself should not cross item boundary */
265 if (cur + sizeof(*di) > item_size) {
266 dir_item_err(fs_info, leaf, slot,
267 "dir item header crosses item boundary, have %zu boundary %u",
268 cur + sizeof(*di), item_size);
269 return -EUCLEAN;
270 }
271
272 /* dir type check */
273 dir_type = btrfs_dir_type(leaf, di);
274 if (dir_type >= BTRFS_FT_MAX) {
275 dir_item_err(fs_info, leaf, slot,
276 "invalid dir item type, have %u expect [0, %u)",
277 dir_type, BTRFS_FT_MAX);
278 return -EUCLEAN;
279 }
280
281 if (key->type == BTRFS_XATTR_ITEM_KEY &&
282 dir_type != BTRFS_FT_XATTR) {
283 dir_item_err(fs_info, leaf, slot,
284 "invalid dir item type for XATTR key, have %u expect %u",
285 dir_type, BTRFS_FT_XATTR);
286 return -EUCLEAN;
287 }
288 if (dir_type == BTRFS_FT_XATTR &&
289 key->type != BTRFS_XATTR_ITEM_KEY) {
290 dir_item_err(fs_info, leaf, slot,
291 "xattr dir type found for non-XATTR key");
292 return -EUCLEAN;
293 }
294 if (dir_type == BTRFS_FT_XATTR)
295 max_name_len = XATTR_NAME_MAX;
296 else
297 max_name_len = BTRFS_NAME_LEN;
298
299 /* Name/data length check */
300 name_len = btrfs_dir_name_len(leaf, di);
301 data_len = btrfs_dir_data_len(leaf, di);
302 if (name_len > max_name_len) {
303 dir_item_err(fs_info, leaf, slot,
304 "dir item name len too long, have %u max %u",
305 name_len, max_name_len);
306 return -EUCLEAN;
307 }
308 if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
309 dir_item_err(fs_info, leaf, slot,
310 "dir item name and data len too long, have %u max %u",
311 name_len + data_len,
312 BTRFS_MAX_XATTR_SIZE(fs_info));
313 return -EUCLEAN;
314 }
315
316 if (data_len && dir_type != BTRFS_FT_XATTR) {
317 dir_item_err(fs_info, leaf, slot,
318 "dir item with invalid data len, have %u expect 0",
319 data_len);
320 return -EUCLEAN;
321 }
322
323 total_size = sizeof(*di) + name_len + data_len;
324
325 /* header and name/data should not cross item boundary */
326 if (cur + total_size > item_size) {
327 dir_item_err(fs_info, leaf, slot,
328 "dir item data crosses item boundary, have %u boundary %u",
329 cur + total_size, item_size);
330 return -EUCLEAN;
331 }
332
333 /*
334 * Special check for XATTR/DIR_ITEM, as key->offset is name
335 * hash, should match its name
336 */
337 if (key->type == BTRFS_DIR_ITEM_KEY ||
338 key->type == BTRFS_XATTR_ITEM_KEY) {
339 char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
340
341 read_extent_buffer(leaf, namebuf,
342 (unsigned long)(di + 1), name_len);
343 name_hash = btrfs_name_hash(namebuf, name_len);
344 if (key->offset != name_hash) {
345 dir_item_err(fs_info, leaf, slot,
346 "name hash mismatch with key, have 0x%016x expect 0x%016llx",
347 name_hash, key->offset);
348 return -EUCLEAN;
349 }
350 }
351 cur += total_size;
352 di = (struct btrfs_dir_item *)((void *)di + total_size);
353 }
354 return 0;
355 }
356
357 __printf(4, 5)
358 __cold
block_group_err(const struct btrfs_fs_info * fs_info,const struct extent_buffer * eb,int slot,const char * fmt,...)359 static void block_group_err(const struct btrfs_fs_info *fs_info,
360 const struct extent_buffer *eb, int slot,
361 const char *fmt, ...)
362 {
363 struct btrfs_key key;
364 struct va_format vaf;
365 va_list args;
366
367 btrfs_item_key_to_cpu(eb, &key, slot);
368 va_start(args, fmt);
369
370 vaf.fmt = fmt;
371 vaf.va = &args;
372
373 btrfs_crit(fs_info,
374 "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
375 btrfs_header_level(eb) == 0 ? "leaf" : "node",
376 btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
377 key.objectid, key.offset, &vaf);
378 va_end(args);
379 }
380
check_block_group_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)381 static int check_block_group_item(struct btrfs_fs_info *fs_info,
382 struct extent_buffer *leaf,
383 struct btrfs_key *key, int slot)
384 {
385 struct btrfs_block_group_item bgi;
386 u32 item_size = btrfs_item_size_nr(leaf, slot);
387 u64 flags;
388 u64 type;
389
390 /*
391 * Here we don't really care about alignment since extent allocator can
392 * handle it. We care more about the size.
393 */
394 if (key->offset == 0) {
395 block_group_err(fs_info, leaf, slot,
396 "invalid block group size 0");
397 return -EUCLEAN;
398 }
399
400 if (item_size != sizeof(bgi)) {
401 block_group_err(fs_info, leaf, slot,
402 "invalid item size, have %u expect %zu",
403 item_size, sizeof(bgi));
404 return -EUCLEAN;
405 }
406
407 read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
408 sizeof(bgi));
409 if (btrfs_block_group_chunk_objectid(&bgi) !=
410 BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
411 block_group_err(fs_info, leaf, slot,
412 "invalid block group chunk objectid, have %llu expect %llu",
413 btrfs_block_group_chunk_objectid(&bgi),
414 BTRFS_FIRST_CHUNK_TREE_OBJECTID);
415 return -EUCLEAN;
416 }
417
418 if (btrfs_block_group_used(&bgi) > key->offset) {
419 block_group_err(fs_info, leaf, slot,
420 "invalid block group used, have %llu expect [0, %llu)",
421 btrfs_block_group_used(&bgi), key->offset);
422 return -EUCLEAN;
423 }
424
425 flags = btrfs_block_group_flags(&bgi);
426 if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
427 block_group_err(fs_info, leaf, slot,
428 "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
429 flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
430 hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
431 return -EUCLEAN;
432 }
433
434 type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
435 if (type != BTRFS_BLOCK_GROUP_DATA &&
436 type != BTRFS_BLOCK_GROUP_METADATA &&
437 type != BTRFS_BLOCK_GROUP_SYSTEM &&
438 type != (BTRFS_BLOCK_GROUP_METADATA |
439 BTRFS_BLOCK_GROUP_DATA)) {
440 block_group_err(fs_info, leaf, slot,
441 "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
442 type, hweight64(type),
443 BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
444 BTRFS_BLOCK_GROUP_SYSTEM,
445 BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
446 return -EUCLEAN;
447 }
448 return 0;
449 }
450
451 __printf(3,4)
452 __cold
extent_err(const struct extent_buffer * eb,int slot,const char * fmt,...)453 static void extent_err(const struct extent_buffer *eb, int slot,
454 const char *fmt, ...)
455 {
456 struct btrfs_key key;
457 struct va_format vaf;
458 va_list args;
459 u64 bytenr;
460 u64 len;
461
462 btrfs_item_key_to_cpu(eb, &key, slot);
463 bytenr = key.objectid;
464 if (key.type == BTRFS_METADATA_ITEM_KEY)
465 len = eb->fs_info->nodesize;
466 else
467 len = key.offset;
468 va_start(args, fmt);
469
470 vaf.fmt = fmt;
471 vaf.va = &args;
472
473 btrfs_crit(eb->fs_info,
474 "corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
475 btrfs_header_level(eb) == 0 ? "leaf" : "node",
476 eb->start, slot, bytenr, len, &vaf);
477 va_end(args);
478 }
479
check_extent_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)480 static int check_extent_item(struct extent_buffer *leaf,
481 struct btrfs_key *key, int slot)
482 {
483 struct btrfs_fs_info *fs_info = leaf->fs_info;
484 struct btrfs_extent_item *ei;
485 bool is_tree_block = false;
486 unsigned long ptr; /* Current pointer inside inline refs */
487 unsigned long end; /* Extent item end */
488 const u32 item_size = btrfs_item_size_nr(leaf, slot);
489 u64 flags;
490 u64 generation;
491 u64 total_refs; /* Total refs in btrfs_extent_item */
492 u64 inline_refs = 0; /* found total inline refs */
493
494 if (key->type == BTRFS_METADATA_ITEM_KEY &&
495 !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
496 generic_err(fs_info, leaf, slot,
497 "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
498 return -EUCLEAN;
499 }
500 /* key->objectid is the bytenr for both key types */
501 if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
502 generic_err(fs_info, leaf, slot,
503 "invalid key objectid, have %llu expect to be aligned to %u",
504 key->objectid, fs_info->sectorsize);
505 return -EUCLEAN;
506 }
507
508 /* key->offset is tree level for METADATA_ITEM_KEY */
509 if (key->type == BTRFS_METADATA_ITEM_KEY &&
510 key->offset >= BTRFS_MAX_LEVEL) {
511 extent_err(leaf, slot,
512 "invalid tree level, have %llu expect [0, %u]",
513 key->offset, BTRFS_MAX_LEVEL - 1);
514 return -EUCLEAN;
515 }
516
517 /*
518 * EXTENT/METADATA_ITEM consists of:
519 * 1) One btrfs_extent_item
520 * Records the total refs, type and generation of the extent.
521 *
522 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
523 * Records the first key and level of the tree block.
524 *
525 * 2) Zero or more btrfs_extent_inline_ref(s)
526 * Each inline ref has one btrfs_extent_inline_ref shows:
527 * 2.1) The ref type, one of the 4
528 * TREE_BLOCK_REF Tree block only
529 * SHARED_BLOCK_REF Tree block only
530 * EXTENT_DATA_REF Data only
531 * SHARED_DATA_REF Data only
532 * 2.2) Ref type specific data
533 * Either using btrfs_extent_inline_ref::offset, or specific
534 * data structure.
535 */
536 if (item_size < sizeof(*ei)) {
537 extent_err(leaf, slot,
538 "invalid item size, have %u expect [%zu, %u)",
539 item_size, sizeof(*ei),
540 BTRFS_LEAF_DATA_SIZE(fs_info));
541 return -EUCLEAN;
542 }
543 end = item_size + btrfs_item_ptr_offset(leaf, slot);
544
545 /* Checks against extent_item */
546 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
547 flags = btrfs_extent_flags(leaf, ei);
548 total_refs = btrfs_extent_refs(leaf, ei);
549 generation = btrfs_extent_generation(leaf, ei);
550 if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
551 extent_err(leaf, slot,
552 "invalid generation, have %llu expect (0, %llu]",
553 generation,
554 btrfs_super_generation(fs_info->super_copy) + 1);
555 return -EUCLEAN;
556 }
557 if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA |
558 BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
559 extent_err(leaf, slot,
560 "invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
561 flags, BTRFS_EXTENT_FLAG_DATA |
562 BTRFS_EXTENT_FLAG_TREE_BLOCK);
563 return -EUCLEAN;
564 }
565 is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
566 if (is_tree_block) {
567 if (key->type == BTRFS_EXTENT_ITEM_KEY &&
568 key->offset != fs_info->nodesize) {
569 extent_err(leaf, slot,
570 "invalid extent length, have %llu expect %u",
571 key->offset, fs_info->nodesize);
572 return -EUCLEAN;
573 }
574 } else {
575 if (key->type != BTRFS_EXTENT_ITEM_KEY) {
576 extent_err(leaf, slot,
577 "invalid key type, have %u expect %u for data backref",
578 key->type, BTRFS_EXTENT_ITEM_KEY);
579 return -EUCLEAN;
580 }
581 if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
582 extent_err(leaf, slot,
583 "invalid extent length, have %llu expect aligned to %u",
584 key->offset, fs_info->sectorsize);
585 return -EUCLEAN;
586 }
587 }
588 ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);
589
590 /* Check the special case of btrfs_tree_block_info */
591 if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
592 struct btrfs_tree_block_info *info;
593
594 info = (struct btrfs_tree_block_info *)ptr;
595 if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
596 extent_err(leaf, slot,
597 "invalid tree block info level, have %u expect [0, %u]",
598 btrfs_tree_block_level(leaf, info),
599 BTRFS_MAX_LEVEL - 1);
600 return -EUCLEAN;
601 }
602 ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
603 }
604
605 /* Check inline refs */
606 while (ptr < end) {
607 struct btrfs_extent_inline_ref *iref;
608 struct btrfs_extent_data_ref *dref;
609 struct btrfs_shared_data_ref *sref;
610 u64 dref_offset;
611 u64 inline_offset;
612 u8 inline_type;
613
614 if (ptr + sizeof(*iref) > end) {
615 extent_err(leaf, slot,
616 "inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
617 ptr, sizeof(*iref), end);
618 return -EUCLEAN;
619 }
620 iref = (struct btrfs_extent_inline_ref *)ptr;
621 inline_type = btrfs_extent_inline_ref_type(leaf, iref);
622 inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
623 if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
624 extent_err(leaf, slot,
625 "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
626 ptr, inline_type, end);
627 return -EUCLEAN;
628 }
629
630 switch (inline_type) {
631 /* inline_offset is subvolid of the owner, no need to check */
632 case BTRFS_TREE_BLOCK_REF_KEY:
633 inline_refs++;
634 break;
635 /* Contains parent bytenr */
636 case BTRFS_SHARED_BLOCK_REF_KEY:
637 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
638 extent_err(leaf, slot,
639 "invalid tree parent bytenr, have %llu expect aligned to %u",
640 inline_offset, fs_info->sectorsize);
641 return -EUCLEAN;
642 }
643 inline_refs++;
644 break;
645 /*
646 * Contains owner subvolid, owner key objectid, adjusted offset.
647 * The only obvious corruption can happen in that offset.
648 */
649 case BTRFS_EXTENT_DATA_REF_KEY:
650 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
651 dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
652 if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
653 extent_err(leaf, slot,
654 "invalid data ref offset, have %llu expect aligned to %u",
655 dref_offset, fs_info->sectorsize);
656 return -EUCLEAN;
657 }
658 inline_refs += btrfs_extent_data_ref_count(leaf, dref);
659 break;
660 /* Contains parent bytenr and ref count */
661 case BTRFS_SHARED_DATA_REF_KEY:
662 sref = (struct btrfs_shared_data_ref *)(iref + 1);
663 if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
664 extent_err(leaf, slot,
665 "invalid data parent bytenr, have %llu expect aligned to %u",
666 inline_offset, fs_info->sectorsize);
667 return -EUCLEAN;
668 }
669 inline_refs += btrfs_shared_data_ref_count(leaf, sref);
670 break;
671 default:
672 extent_err(leaf, slot, "unknown inline ref type: %u",
673 inline_type);
674 return -EUCLEAN;
675 }
676 ptr += btrfs_extent_inline_ref_size(inline_type);
677 }
678 /* No padding is allowed */
679 if (ptr != end) {
680 extent_err(leaf, slot,
681 "invalid extent item size, padding bytes found");
682 return -EUCLEAN;
683 }
684
685 /* Finally, check the inline refs against total refs */
686 if (inline_refs > total_refs) {
687 extent_err(leaf, slot,
688 "invalid extent refs, have %llu expect >= inline %llu",
689 total_refs, inline_refs);
690 return -EUCLEAN;
691 }
692 return 0;
693 }
694
695 /* Inode item error output has the same format as dir_item_err() */
696 #define inode_item_err(fs_info, eb, slot, fmt, ...) \
697 dir_item_err(fs_info, eb, slot, fmt, __VA_ARGS__)
698
check_inode_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)699 static int check_inode_item(struct btrfs_fs_info *fs_info,
700 struct extent_buffer *leaf,
701 struct btrfs_key *key, int slot)
702 {
703 struct btrfs_inode_item *iitem;
704 u64 super_gen = btrfs_super_generation(fs_info->super_copy);
705 u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
706 u32 mode;
707
708 if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
709 key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
710 key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
711 key->objectid != BTRFS_FREE_INO_OBJECTID) {
712 generic_err(fs_info, leaf, slot,
713 "invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
714 key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
715 BTRFS_FIRST_FREE_OBJECTID,
716 BTRFS_LAST_FREE_OBJECTID,
717 BTRFS_FREE_INO_OBJECTID);
718 return -EUCLEAN;
719 }
720 if (key->offset != 0) {
721 inode_item_err(fs_info, leaf, slot,
722 "invalid key offset: has %llu expect 0",
723 key->offset);
724 return -EUCLEAN;
725 }
726 iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
727
728 /* Here we use super block generation + 1 to handle log tree */
729 if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
730 inode_item_err(fs_info, leaf, slot,
731 "invalid inode generation: has %llu expect (0, %llu]",
732 btrfs_inode_generation(leaf, iitem),
733 super_gen + 1);
734 return -EUCLEAN;
735 }
736 /* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
737 if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
738 inode_item_err(fs_info, leaf, slot,
739 "invalid inode generation: has %llu expect [0, %llu]",
740 btrfs_inode_transid(leaf, iitem), super_gen + 1);
741 return -EUCLEAN;
742 }
743
744 /*
745 * For size and nbytes it's better not to be too strict, as for dir
746 * item its size/nbytes can easily get wrong, but doesn't affect
747 * anything in the fs. So here we skip the check.
748 */
749 mode = btrfs_inode_mode(leaf, iitem);
750 if (mode & ~valid_mask) {
751 inode_item_err(fs_info, leaf, slot,
752 "unknown mode bit detected: 0x%x",
753 mode & ~valid_mask);
754 return -EUCLEAN;
755 }
756
757 /*
758 * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
759 * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
760 * Only needs to check BLK, LNK and SOCKS
761 */
762 if (!is_power_of_2(mode & S_IFMT)) {
763 if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
764 inode_item_err(fs_info, leaf, slot,
765 "invalid mode: has 0%o expect valid S_IF* bit(s)",
766 mode & S_IFMT);
767 return -EUCLEAN;
768 }
769 }
770 if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
771 inode_item_err(fs_info, leaf, slot,
772 "invalid nlink: has %u expect no more than 1 for dir",
773 btrfs_inode_nlink(leaf, iitem));
774 return -EUCLEAN;
775 }
776 if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
777 inode_item_err(fs_info, leaf, slot,
778 "unknown flags detected: 0x%llx",
779 btrfs_inode_flags(leaf, iitem) &
780 ~BTRFS_INODE_FLAG_MASK);
781 return -EUCLEAN;
782 }
783 return 0;
784 }
785
786 /*
787 * Common point to switch the item-specific validation.
788 */
check_leaf_item(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,struct btrfs_key * key,int slot)789 static int check_leaf_item(struct btrfs_fs_info *fs_info,
790 struct extent_buffer *leaf,
791 struct btrfs_key *key, int slot)
792 {
793 int ret = 0;
794
795 switch (key->type) {
796 case BTRFS_EXTENT_DATA_KEY:
797 ret = check_extent_data_item(fs_info, leaf, key, slot);
798 break;
799 case BTRFS_EXTENT_CSUM_KEY:
800 ret = check_csum_item(fs_info, leaf, key, slot);
801 break;
802 case BTRFS_DIR_ITEM_KEY:
803 case BTRFS_DIR_INDEX_KEY:
804 case BTRFS_XATTR_ITEM_KEY:
805 ret = check_dir_item(fs_info, leaf, key, slot);
806 break;
807 case BTRFS_BLOCK_GROUP_ITEM_KEY:
808 ret = check_block_group_item(fs_info, leaf, key, slot);
809 break;
810 case BTRFS_EXTENT_ITEM_KEY:
811 case BTRFS_METADATA_ITEM_KEY:
812 ret = check_extent_item(leaf, key, slot);
813 break;
814 case BTRFS_INODE_ITEM_KEY:
815 ret = check_inode_item(fs_info, leaf, key, slot);
816 break;
817 }
818 return ret;
819 }
820
check_leaf(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf,bool check_item_data)821 static int check_leaf(struct btrfs_fs_info *fs_info, struct extent_buffer *leaf,
822 bool check_item_data)
823 {
824 /* No valid key type is 0, so all key should be larger than this key */
825 struct btrfs_key prev_key = {0, 0, 0};
826 struct btrfs_key key;
827 u32 nritems = btrfs_header_nritems(leaf);
828 int slot;
829
830 if (btrfs_header_level(leaf) != 0) {
831 generic_err(fs_info, leaf, 0,
832 "invalid level for leaf, have %d expect 0",
833 btrfs_header_level(leaf));
834 return -EUCLEAN;
835 }
836
837 /*
838 * Extent buffers from a relocation tree have a owner field that
839 * corresponds to the subvolume tree they are based on. So just from an
840 * extent buffer alone we can not find out what is the id of the
841 * corresponding subvolume tree, so we can not figure out if the extent
842 * buffer corresponds to the root of the relocation tree or not. So
843 * skip this check for relocation trees.
844 */
845 if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
846 u64 owner = btrfs_header_owner(leaf);
847
848 /* These trees must never be empty */
849 if (owner == BTRFS_ROOT_TREE_OBJECTID ||
850 owner == BTRFS_CHUNK_TREE_OBJECTID ||
851 owner == BTRFS_EXTENT_TREE_OBJECTID ||
852 owner == BTRFS_DEV_TREE_OBJECTID ||
853 owner == BTRFS_FS_TREE_OBJECTID ||
854 owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
855 generic_err(fs_info, leaf, 0,
856 "invalid root, root %llu must never be empty",
857 owner);
858 return -EUCLEAN;
859 }
860 /* Unknown tree */
861 if (owner == 0) {
862 generic_err(fs_info, leaf, 0,
863 "invalid owner, root 0 is not defined");
864 return -EUCLEAN;
865 }
866 return 0;
867 }
868
869 if (nritems == 0)
870 return 0;
871
872 /*
873 * Check the following things to make sure this is a good leaf, and
874 * leaf users won't need to bother with similar sanity checks:
875 *
876 * 1) key ordering
877 * 2) item offset and size
878 * No overlap, no hole, all inside the leaf.
879 * 3) item content
880 * If possible, do comprehensive sanity check.
881 * NOTE: All checks must only rely on the item data itself.
882 */
883 for (slot = 0; slot < nritems; slot++) {
884 u32 item_end_expected;
885 int ret;
886
887 btrfs_item_key_to_cpu(leaf, &key, slot);
888
889 /* Make sure the keys are in the right order */
890 if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
891 generic_err(fs_info, leaf, slot,
892 "bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
893 prev_key.objectid, prev_key.type,
894 prev_key.offset, key.objectid, key.type,
895 key.offset);
896 return -EUCLEAN;
897 }
898
899 /*
900 * Make sure the offset and ends are right, remember that the
901 * item data starts at the end of the leaf and grows towards the
902 * front.
903 */
904 if (slot == 0)
905 item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
906 else
907 item_end_expected = btrfs_item_offset_nr(leaf,
908 slot - 1);
909 if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
910 generic_err(fs_info, leaf, slot,
911 "unexpected item end, have %u expect %u",
912 btrfs_item_end_nr(leaf, slot),
913 item_end_expected);
914 return -EUCLEAN;
915 }
916
917 /*
918 * Check to make sure that we don't point outside of the leaf,
919 * just in case all the items are consistent to each other, but
920 * all point outside of the leaf.
921 */
922 if (btrfs_item_end_nr(leaf, slot) >
923 BTRFS_LEAF_DATA_SIZE(fs_info)) {
924 generic_err(fs_info, leaf, slot,
925 "slot end outside of leaf, have %u expect range [0, %u]",
926 btrfs_item_end_nr(leaf, slot),
927 BTRFS_LEAF_DATA_SIZE(fs_info));
928 return -EUCLEAN;
929 }
930
931 /* Also check if the item pointer overlaps with btrfs item. */
932 if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
933 btrfs_item_ptr_offset(leaf, slot)) {
934 generic_err(fs_info, leaf, slot,
935 "slot overlaps with its data, item end %lu data start %lu",
936 btrfs_item_nr_offset(slot) +
937 sizeof(struct btrfs_item),
938 btrfs_item_ptr_offset(leaf, slot));
939 return -EUCLEAN;
940 }
941
942 if (check_item_data) {
943 /*
944 * Check if the item size and content meet other
945 * criteria
946 */
947 ret = check_leaf_item(fs_info, leaf, &key, slot);
948 if (ret < 0)
949 return ret;
950 }
951
952 prev_key.objectid = key.objectid;
953 prev_key.type = key.type;
954 prev_key.offset = key.offset;
955 }
956
957 return 0;
958 }
959
btrfs_check_leaf_full(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf)960 int btrfs_check_leaf_full(struct btrfs_fs_info *fs_info,
961 struct extent_buffer *leaf)
962 {
963 return check_leaf(fs_info, leaf, true);
964 }
965
btrfs_check_leaf_relaxed(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf)966 int btrfs_check_leaf_relaxed(struct btrfs_fs_info *fs_info,
967 struct extent_buffer *leaf)
968 {
969 return check_leaf(fs_info, leaf, false);
970 }
971
btrfs_check_node(struct btrfs_fs_info * fs_info,struct extent_buffer * node)972 int btrfs_check_node(struct btrfs_fs_info *fs_info, struct extent_buffer *node)
973 {
974 unsigned long nr = btrfs_header_nritems(node);
975 struct btrfs_key key, next_key;
976 int slot;
977 int level = btrfs_header_level(node);
978 u64 bytenr;
979 int ret = 0;
980
981 if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
982 generic_err(fs_info, node, 0,
983 "invalid level for node, have %d expect [1, %d]",
984 level, BTRFS_MAX_LEVEL - 1);
985 return -EUCLEAN;
986 }
987 if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
988 btrfs_crit(fs_info,
989 "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
990 btrfs_header_owner(node), node->start,
991 nr == 0 ? "small" : "large", nr,
992 BTRFS_NODEPTRS_PER_BLOCK(fs_info));
993 return -EUCLEAN;
994 }
995
996 for (slot = 0; slot < nr - 1; slot++) {
997 bytenr = btrfs_node_blockptr(node, slot);
998 btrfs_node_key_to_cpu(node, &key, slot);
999 btrfs_node_key_to_cpu(node, &next_key, slot + 1);
1000
1001 if (!bytenr) {
1002 generic_err(fs_info, node, slot,
1003 "invalid NULL node pointer");
1004 ret = -EUCLEAN;
1005 goto out;
1006 }
1007 if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
1008 generic_err(fs_info, node, slot,
1009 "unaligned pointer, have %llu should be aligned to %u",
1010 bytenr, fs_info->sectorsize);
1011 ret = -EUCLEAN;
1012 goto out;
1013 }
1014
1015 if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
1016 generic_err(fs_info, node, slot,
1017 "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
1018 key.objectid, key.type, key.offset,
1019 next_key.objectid, next_key.type,
1020 next_key.offset);
1021 ret = -EUCLEAN;
1022 goto out;
1023 }
1024 }
1025 out:
1026 return ret;
1027 }
1028