• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) Qu Wenruo 2017.  All rights reserved.
4  */
5 
6 /*
7  * The module is used to catch unexpected/corrupted tree block data.
8  * Such behavior can be caused either by a fuzzed image or bugs.
9  *
10  * The objective is to do leaf/node validation checks when tree block is read
11  * from disk, and check *every* possible member, so other code won't
12  * need to checking them again.
13  *
14  * Due to the potential and unwanted damage, every checker needs to be
15  * carefully reviewed otherwise so it does not prevent mount of valid images.
16  */
17 
18 #include <linux/types.h>
19 #include <linux/stddef.h>
20 #include <linux/error-injection.h>
21 #include "ctree.h"
22 #include "tree-checker.h"
23 #include "disk-io.h"
24 #include "compression.h"
25 #include "volumes.h"
26 
27 /*
28  * Error message should follow the following format:
29  * corrupt <type>: <identifier>, <reason>[, <bad_value>]
30  *
31  * @type:	leaf or node
32  * @identifier:	the necessary info to locate the leaf/node.
33  * 		It's recommended to decode key.objecitd/offset if it's
34  * 		meaningful.
35  * @reason:	describe the error
36  * @bad_value:	optional, it's recommended to output bad value and its
37  *		expected value (range).
38  *
39  * Since comma is used to separate the components, only space is allowed
40  * inside each component.
41  */
42 
43 /*
44  * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
45  * Allows callers to customize the output.
46  */
47 __printf(3, 4)
48 __cold
generic_err(const struct extent_buffer * eb,int slot,const char * fmt,...)49 static void generic_err(const struct extent_buffer *eb, int slot,
50 			const char *fmt, ...)
51 {
52 	const struct btrfs_fs_info *fs_info = eb->fs_info;
53 	struct va_format vaf;
54 	va_list args;
55 
56 	va_start(args, fmt);
57 
58 	vaf.fmt = fmt;
59 	vaf.va = &args;
60 
61 	btrfs_crit(fs_info,
62 		"corrupt %s: root=%llu block=%llu slot=%d, %pV",
63 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
64 		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot, &vaf);
65 	va_end(args);
66 }
67 
68 /*
69  * Customized reporter for extent data item, since its key objectid and
70  * offset has its own meaning.
71  */
72 __printf(3, 4)
73 __cold
file_extent_err(const struct extent_buffer * eb,int slot,const char * fmt,...)74 static void file_extent_err(const struct extent_buffer *eb, int slot,
75 			    const char *fmt, ...)
76 {
77 	const struct btrfs_fs_info *fs_info = eb->fs_info;
78 	struct btrfs_key key;
79 	struct va_format vaf;
80 	va_list args;
81 
82 	btrfs_item_key_to_cpu(eb, &key, slot);
83 	va_start(args, fmt);
84 
85 	vaf.fmt = fmt;
86 	vaf.va = &args;
87 
88 	btrfs_crit(fs_info,
89 	"corrupt %s: root=%llu block=%llu slot=%d ino=%llu file_offset=%llu, %pV",
90 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
91 		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
92 		key.objectid, key.offset, &vaf);
93 	va_end(args);
94 }
95 
96 /*
97  * Return 0 if the btrfs_file_extent_##name is aligned to @alignment
98  * Else return 1
99  */
100 #define CHECK_FE_ALIGNED(leaf, slot, fi, name, alignment)		      \
101 ({									      \
102 	if (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))) \
103 		file_extent_err((leaf), (slot),				      \
104 	"invalid %s for file extent, have %llu, should be aligned to %u",     \
105 			(#name), btrfs_file_extent_##name((leaf), (fi)),      \
106 			(alignment));					      \
107 	(!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment)));   \
108 })
109 
file_extent_end(struct extent_buffer * leaf,struct btrfs_key * key,struct btrfs_file_extent_item * extent)110 static u64 file_extent_end(struct extent_buffer *leaf,
111 			   struct btrfs_key *key,
112 			   struct btrfs_file_extent_item *extent)
113 {
114 	u64 end;
115 	u64 len;
116 
117 	if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) {
118 		len = btrfs_file_extent_ram_bytes(leaf, extent);
119 		end = ALIGN(key->offset + len, leaf->fs_info->sectorsize);
120 	} else {
121 		len = btrfs_file_extent_num_bytes(leaf, extent);
122 		end = key->offset + len;
123 	}
124 	return end;
125 }
126 
check_extent_data_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot,struct btrfs_key * prev_key)127 static int check_extent_data_item(struct extent_buffer *leaf,
128 				  struct btrfs_key *key, int slot,
129 				  struct btrfs_key *prev_key)
130 {
131 	struct btrfs_fs_info *fs_info = leaf->fs_info;
132 	struct btrfs_file_extent_item *fi;
133 	u32 sectorsize = fs_info->sectorsize;
134 	u32 item_size = btrfs_item_size_nr(leaf, slot);
135 	u64 extent_end;
136 
137 	if (!IS_ALIGNED(key->offset, sectorsize)) {
138 		file_extent_err(leaf, slot,
139 "unaligned file_offset for file extent, have %llu should be aligned to %u",
140 			key->offset, sectorsize);
141 		return -EUCLEAN;
142 	}
143 
144 	fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
145 
146 	if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
147 		file_extent_err(leaf, slot,
148 		"invalid type for file extent, have %u expect range [0, %u]",
149 			btrfs_file_extent_type(leaf, fi),
150 			BTRFS_FILE_EXTENT_TYPES);
151 		return -EUCLEAN;
152 	}
153 
154 	/*
155 	 * Support for new compression/encryption must introduce incompat flag,
156 	 * and must be caught in open_ctree().
157 	 */
158 	if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
159 		file_extent_err(leaf, slot,
160 	"invalid compression for file extent, have %u expect range [0, %u]",
161 			btrfs_file_extent_compression(leaf, fi),
162 			BTRFS_COMPRESS_TYPES);
163 		return -EUCLEAN;
164 	}
165 	if (btrfs_file_extent_encryption(leaf, fi)) {
166 		file_extent_err(leaf, slot,
167 			"invalid encryption for file extent, have %u expect 0",
168 			btrfs_file_extent_encryption(leaf, fi));
169 		return -EUCLEAN;
170 	}
171 	if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
172 		/* Inline extent must have 0 as key offset */
173 		if (key->offset) {
174 			file_extent_err(leaf, slot,
175 		"invalid file_offset for inline file extent, have %llu expect 0",
176 				key->offset);
177 			return -EUCLEAN;
178 		}
179 
180 		/* Compressed inline extent has no on-disk size, skip it */
181 		if (btrfs_file_extent_compression(leaf, fi) !=
182 		    BTRFS_COMPRESS_NONE)
183 			return 0;
184 
185 		/* Uncompressed inline extent size must match item size */
186 		if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
187 		    btrfs_file_extent_ram_bytes(leaf, fi)) {
188 			file_extent_err(leaf, slot,
189 	"invalid ram_bytes for uncompressed inline extent, have %u expect %llu",
190 				item_size, BTRFS_FILE_EXTENT_INLINE_DATA_START +
191 				btrfs_file_extent_ram_bytes(leaf, fi));
192 			return -EUCLEAN;
193 		}
194 		return 0;
195 	}
196 
197 	/* Regular or preallocated extent has fixed item size */
198 	if (item_size != sizeof(*fi)) {
199 		file_extent_err(leaf, slot,
200 	"invalid item size for reg/prealloc file extent, have %u expect %zu",
201 			item_size, sizeof(*fi));
202 		return -EUCLEAN;
203 	}
204 	if (CHECK_FE_ALIGNED(leaf, slot, fi, ram_bytes, sectorsize) ||
205 	    CHECK_FE_ALIGNED(leaf, slot, fi, disk_bytenr, sectorsize) ||
206 	    CHECK_FE_ALIGNED(leaf, slot, fi, disk_num_bytes, sectorsize) ||
207 	    CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) ||
208 	    CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize))
209 		return -EUCLEAN;
210 
211 	/* Catch extent end overflow */
212 	if (check_add_overflow(btrfs_file_extent_num_bytes(leaf, fi),
213 			       key->offset, &extent_end)) {
214 		file_extent_err(leaf, slot,
215 	"extent end overflow, have file offset %llu extent num bytes %llu",
216 				key->offset,
217 				btrfs_file_extent_num_bytes(leaf, fi));
218 		return -EUCLEAN;
219 	}
220 
221 	/*
222 	 * Check that no two consecutive file extent items, in the same leaf,
223 	 * present ranges that overlap each other.
224 	 */
225 	if (slot > 0 &&
226 	    prev_key->objectid == key->objectid &&
227 	    prev_key->type == BTRFS_EXTENT_DATA_KEY) {
228 		struct btrfs_file_extent_item *prev_fi;
229 		u64 prev_end;
230 
231 		prev_fi = btrfs_item_ptr(leaf, slot - 1,
232 					 struct btrfs_file_extent_item);
233 		prev_end = file_extent_end(leaf, prev_key, prev_fi);
234 		if (prev_end > key->offset) {
235 			file_extent_err(leaf, slot - 1,
236 "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent",
237 					prev_end, key->offset);
238 			return -EUCLEAN;
239 		}
240 	}
241 
242 	return 0;
243 }
244 
check_csum_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot,struct btrfs_key * prev_key)245 static int check_csum_item(struct extent_buffer *leaf, struct btrfs_key *key,
246 			   int slot, struct btrfs_key *prev_key)
247 {
248 	struct btrfs_fs_info *fs_info = leaf->fs_info;
249 	u32 sectorsize = fs_info->sectorsize;
250 	u32 csumsize = btrfs_super_csum_size(fs_info->super_copy);
251 
252 	if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
253 		generic_err(leaf, slot,
254 		"invalid key objectid for csum item, have %llu expect %llu",
255 			key->objectid, BTRFS_EXTENT_CSUM_OBJECTID);
256 		return -EUCLEAN;
257 	}
258 	if (!IS_ALIGNED(key->offset, sectorsize)) {
259 		generic_err(leaf, slot,
260 	"unaligned key offset for csum item, have %llu should be aligned to %u",
261 			key->offset, sectorsize);
262 		return -EUCLEAN;
263 	}
264 	if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
265 		generic_err(leaf, slot,
266 	"unaligned item size for csum item, have %u should be aligned to %u",
267 			btrfs_item_size_nr(leaf, slot), csumsize);
268 		return -EUCLEAN;
269 	}
270 	if (slot > 0 && prev_key->type == BTRFS_EXTENT_CSUM_KEY) {
271 		u64 prev_csum_end;
272 		u32 prev_item_size;
273 
274 		prev_item_size = btrfs_item_size_nr(leaf, slot - 1);
275 		prev_csum_end = (prev_item_size / csumsize) * sectorsize;
276 		prev_csum_end += prev_key->offset;
277 		if (prev_csum_end > key->offset) {
278 			generic_err(leaf, slot - 1,
279 "csum end range (%llu) goes beyond the start range (%llu) of the next csum item",
280 				    prev_csum_end, key->offset);
281 			return -EUCLEAN;
282 		}
283 	}
284 	return 0;
285 }
286 
287 /*
288  * Customized reported for dir_item, only important new info is key->objectid,
289  * which represents inode number
290  */
291 __printf(3, 4)
292 __cold
dir_item_err(const struct extent_buffer * eb,int slot,const char * fmt,...)293 static void dir_item_err(const struct extent_buffer *eb, int slot,
294 			 const char *fmt, ...)
295 {
296 	const struct btrfs_fs_info *fs_info = eb->fs_info;
297 	struct btrfs_key key;
298 	struct va_format vaf;
299 	va_list args;
300 
301 	btrfs_item_key_to_cpu(eb, &key, slot);
302 	va_start(args, fmt);
303 
304 	vaf.fmt = fmt;
305 	vaf.va = &args;
306 
307 	btrfs_crit(fs_info,
308 	"corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
309 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
310 		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
311 		key.objectid, &vaf);
312 	va_end(args);
313 }
314 
check_dir_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)315 static int check_dir_item(struct extent_buffer *leaf,
316 			  struct btrfs_key *key, int slot)
317 {
318 	struct btrfs_fs_info *fs_info = leaf->fs_info;
319 	struct btrfs_dir_item *di;
320 	u32 item_size = btrfs_item_size_nr(leaf, slot);
321 	u32 cur = 0;
322 
323 	di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
324 	while (cur < item_size) {
325 		u32 name_len;
326 		u32 data_len;
327 		u32 max_name_len;
328 		u32 total_size;
329 		u32 name_hash;
330 		u8 dir_type;
331 
332 		/* header itself should not cross item boundary */
333 		if (cur + sizeof(*di) > item_size) {
334 			dir_item_err(leaf, slot,
335 		"dir item header crosses item boundary, have %zu boundary %u",
336 				cur + sizeof(*di), item_size);
337 			return -EUCLEAN;
338 		}
339 
340 		/* dir type check */
341 		dir_type = btrfs_dir_type(leaf, di);
342 		if (dir_type >= BTRFS_FT_MAX) {
343 			dir_item_err(leaf, slot,
344 			"invalid dir item type, have %u expect [0, %u)",
345 				dir_type, BTRFS_FT_MAX);
346 			return -EUCLEAN;
347 		}
348 
349 		if (key->type == BTRFS_XATTR_ITEM_KEY &&
350 		    dir_type != BTRFS_FT_XATTR) {
351 			dir_item_err(leaf, slot,
352 		"invalid dir item type for XATTR key, have %u expect %u",
353 				dir_type, BTRFS_FT_XATTR);
354 			return -EUCLEAN;
355 		}
356 		if (dir_type == BTRFS_FT_XATTR &&
357 		    key->type != BTRFS_XATTR_ITEM_KEY) {
358 			dir_item_err(leaf, slot,
359 			"xattr dir type found for non-XATTR key");
360 			return -EUCLEAN;
361 		}
362 		if (dir_type == BTRFS_FT_XATTR)
363 			max_name_len = XATTR_NAME_MAX;
364 		else
365 			max_name_len = BTRFS_NAME_LEN;
366 
367 		/* Name/data length check */
368 		name_len = btrfs_dir_name_len(leaf, di);
369 		data_len = btrfs_dir_data_len(leaf, di);
370 		if (name_len > max_name_len) {
371 			dir_item_err(leaf, slot,
372 			"dir item name len too long, have %u max %u",
373 				name_len, max_name_len);
374 			return -EUCLEAN;
375 		}
376 		if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(fs_info)) {
377 			dir_item_err(leaf, slot,
378 			"dir item name and data len too long, have %u max %u",
379 				name_len + data_len,
380 				BTRFS_MAX_XATTR_SIZE(fs_info));
381 			return -EUCLEAN;
382 		}
383 
384 		if (data_len && dir_type != BTRFS_FT_XATTR) {
385 			dir_item_err(leaf, slot,
386 			"dir item with invalid data len, have %u expect 0",
387 				data_len);
388 			return -EUCLEAN;
389 		}
390 
391 		total_size = sizeof(*di) + name_len + data_len;
392 
393 		/* header and name/data should not cross item boundary */
394 		if (cur + total_size > item_size) {
395 			dir_item_err(leaf, slot,
396 		"dir item data crosses item boundary, have %u boundary %u",
397 				cur + total_size, item_size);
398 			return -EUCLEAN;
399 		}
400 
401 		/*
402 		 * Special check for XATTR/DIR_ITEM, as key->offset is name
403 		 * hash, should match its name
404 		 */
405 		if (key->type == BTRFS_DIR_ITEM_KEY ||
406 		    key->type == BTRFS_XATTR_ITEM_KEY) {
407 			char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
408 
409 			read_extent_buffer(leaf, namebuf,
410 					(unsigned long)(di + 1), name_len);
411 			name_hash = btrfs_name_hash(namebuf, name_len);
412 			if (key->offset != name_hash) {
413 				dir_item_err(leaf, slot,
414 		"name hash mismatch with key, have 0x%016x expect 0x%016llx",
415 					name_hash, key->offset);
416 				return -EUCLEAN;
417 			}
418 		}
419 		cur += total_size;
420 		di = (struct btrfs_dir_item *)((void *)di + total_size);
421 	}
422 	return 0;
423 }
424 
425 __printf(3, 4)
426 __cold
block_group_err(const struct extent_buffer * eb,int slot,const char * fmt,...)427 static void block_group_err(const struct extent_buffer *eb, int slot,
428 			    const char *fmt, ...)
429 {
430 	const struct btrfs_fs_info *fs_info = eb->fs_info;
431 	struct btrfs_key key;
432 	struct va_format vaf;
433 	va_list args;
434 
435 	btrfs_item_key_to_cpu(eb, &key, slot);
436 	va_start(args, fmt);
437 
438 	vaf.fmt = fmt;
439 	vaf.va = &args;
440 
441 	btrfs_crit(fs_info,
442 	"corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
443 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
444 		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
445 		key.objectid, key.offset, &vaf);
446 	va_end(args);
447 }
448 
check_block_group_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)449 static int check_block_group_item(struct extent_buffer *leaf,
450 				  struct btrfs_key *key, int slot)
451 {
452 	struct btrfs_block_group_item bgi;
453 	u32 item_size = btrfs_item_size_nr(leaf, slot);
454 	u64 flags;
455 	u64 type;
456 
457 	/*
458 	 * Here we don't really care about alignment since extent allocator can
459 	 * handle it.  We care more about the size.
460 	 */
461 	if (key->offset == 0) {
462 		block_group_err(leaf, slot,
463 				"invalid block group size 0");
464 		return -EUCLEAN;
465 	}
466 
467 	if (item_size != sizeof(bgi)) {
468 		block_group_err(leaf, slot,
469 			"invalid item size, have %u expect %zu",
470 				item_size, sizeof(bgi));
471 		return -EUCLEAN;
472 	}
473 
474 	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
475 			   sizeof(bgi));
476 	if (btrfs_block_group_chunk_objectid(&bgi) !=
477 	    BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
478 		block_group_err(leaf, slot,
479 		"invalid block group chunk objectid, have %llu expect %llu",
480 				btrfs_block_group_chunk_objectid(&bgi),
481 				BTRFS_FIRST_CHUNK_TREE_OBJECTID);
482 		return -EUCLEAN;
483 	}
484 
485 	if (btrfs_block_group_used(&bgi) > key->offset) {
486 		block_group_err(leaf, slot,
487 			"invalid block group used, have %llu expect [0, %llu)",
488 				btrfs_block_group_used(&bgi), key->offset);
489 		return -EUCLEAN;
490 	}
491 
492 	flags = btrfs_block_group_flags(&bgi);
493 	if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
494 		block_group_err(leaf, slot,
495 "invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
496 			flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
497 			hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
498 		return -EUCLEAN;
499 	}
500 
501 	type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
502 	if (type != BTRFS_BLOCK_GROUP_DATA &&
503 	    type != BTRFS_BLOCK_GROUP_METADATA &&
504 	    type != BTRFS_BLOCK_GROUP_SYSTEM &&
505 	    type != (BTRFS_BLOCK_GROUP_METADATA |
506 			   BTRFS_BLOCK_GROUP_DATA)) {
507 		block_group_err(leaf, slot,
508 "invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
509 			type, hweight64(type),
510 			BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
511 			BTRFS_BLOCK_GROUP_SYSTEM,
512 			BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
513 		return -EUCLEAN;
514 	}
515 	return 0;
516 }
517 
518 __printf(4, 5)
519 __cold
chunk_err(const struct extent_buffer * leaf,const struct btrfs_chunk * chunk,u64 logical,const char * fmt,...)520 static void chunk_err(const struct extent_buffer *leaf,
521 		      const struct btrfs_chunk *chunk, u64 logical,
522 		      const char *fmt, ...)
523 {
524 	const struct btrfs_fs_info *fs_info = leaf->fs_info;
525 	bool is_sb;
526 	struct va_format vaf;
527 	va_list args;
528 	int i;
529 	int slot = -1;
530 
531 	/* Only superblock eb is able to have such small offset */
532 	is_sb = (leaf->start == BTRFS_SUPER_INFO_OFFSET);
533 
534 	if (!is_sb) {
535 		/*
536 		 * Get the slot number by iterating through all slots, this
537 		 * would provide better readability.
538 		 */
539 		for (i = 0; i < btrfs_header_nritems(leaf); i++) {
540 			if (btrfs_item_ptr_offset(leaf, i) ==
541 					(unsigned long)chunk) {
542 				slot = i;
543 				break;
544 			}
545 		}
546 	}
547 	va_start(args, fmt);
548 	vaf.fmt = fmt;
549 	vaf.va = &args;
550 
551 	if (is_sb)
552 		btrfs_crit(fs_info,
553 		"corrupt superblock syschunk array: chunk_start=%llu, %pV",
554 			   logical, &vaf);
555 	else
556 		btrfs_crit(fs_info,
557 	"corrupt leaf: root=%llu block=%llu slot=%d chunk_start=%llu, %pV",
558 			   BTRFS_CHUNK_TREE_OBJECTID, leaf->start, slot,
559 			   logical, &vaf);
560 	va_end(args);
561 }
562 
563 /*
564  * The common chunk check which could also work on super block sys chunk array.
565  *
566  * Return -EUCLEAN if anything is corrupted.
567  * Return 0 if everything is OK.
568  */
btrfs_check_chunk_valid(struct extent_buffer * leaf,struct btrfs_chunk * chunk,u64 logical)569 int btrfs_check_chunk_valid(struct extent_buffer *leaf,
570 			    struct btrfs_chunk *chunk, u64 logical)
571 {
572 	struct btrfs_fs_info *fs_info = leaf->fs_info;
573 	u64 length;
574 	u64 stripe_len;
575 	u16 num_stripes;
576 	u16 sub_stripes;
577 	u64 type;
578 	u64 features;
579 	bool mixed = false;
580 
581 	length = btrfs_chunk_length(leaf, chunk);
582 	stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
583 	num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
584 	sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
585 	type = btrfs_chunk_type(leaf, chunk);
586 
587 	if (!num_stripes) {
588 		chunk_err(leaf, chunk, logical,
589 			  "invalid chunk num_stripes, have %u", num_stripes);
590 		return -EUCLEAN;
591 	}
592 	if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
593 		chunk_err(leaf, chunk, logical,
594 		"invalid chunk logical, have %llu should aligned to %u",
595 			  logical, fs_info->sectorsize);
596 		return -EUCLEAN;
597 	}
598 	if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
599 		chunk_err(leaf, chunk, logical,
600 			  "invalid chunk sectorsize, have %u expect %u",
601 			  btrfs_chunk_sector_size(leaf, chunk),
602 			  fs_info->sectorsize);
603 		return -EUCLEAN;
604 	}
605 	if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
606 		chunk_err(leaf, chunk, logical,
607 			  "invalid chunk length, have %llu", length);
608 		return -EUCLEAN;
609 	}
610 	if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
611 		chunk_err(leaf, chunk, logical,
612 			  "invalid chunk stripe length: %llu",
613 			  stripe_len);
614 		return -EUCLEAN;
615 	}
616 	if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
617 	    type) {
618 		chunk_err(leaf, chunk, logical,
619 			  "unrecognized chunk type: 0x%llx",
620 			  ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
621 			    BTRFS_BLOCK_GROUP_PROFILE_MASK) &
622 			  btrfs_chunk_type(leaf, chunk));
623 		return -EUCLEAN;
624 	}
625 
626 	if (!is_power_of_2(type & BTRFS_BLOCK_GROUP_PROFILE_MASK) &&
627 	    (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) != 0) {
628 		chunk_err(leaf, chunk, logical,
629 		"invalid chunk profile flag: 0x%llx, expect 0 or 1 bit set",
630 			  type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
631 		return -EUCLEAN;
632 	}
633 	if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
634 		chunk_err(leaf, chunk, logical,
635 	"missing chunk type flag, have 0x%llx one bit must be set in 0x%llx",
636 			  type, BTRFS_BLOCK_GROUP_TYPE_MASK);
637 		return -EUCLEAN;
638 	}
639 
640 	if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
641 	    (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
642 		chunk_err(leaf, chunk, logical,
643 			  "system chunk with data or metadata type: 0x%llx",
644 			  type);
645 		return -EUCLEAN;
646 	}
647 
648 	features = btrfs_super_incompat_flags(fs_info->super_copy);
649 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
650 		mixed = true;
651 
652 	if (!mixed) {
653 		if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
654 		    (type & BTRFS_BLOCK_GROUP_DATA)) {
655 			chunk_err(leaf, chunk, logical,
656 			"mixed chunk type in non-mixed mode: 0x%llx", type);
657 			return -EUCLEAN;
658 		}
659 	}
660 
661 	if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
662 	    (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
663 	    (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
664 	    (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
665 	    (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
666 	    ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && num_stripes != 1)) {
667 		chunk_err(leaf, chunk, logical,
668 			"invalid num_stripes:sub_stripes %u:%u for profile %llu",
669 			num_stripes, sub_stripes,
670 			type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
671 		return -EUCLEAN;
672 	}
673 
674 	return 0;
675 }
676 
677 __printf(3, 4)
678 __cold
dev_item_err(const struct extent_buffer * eb,int slot,const char * fmt,...)679 static void dev_item_err(const struct extent_buffer *eb, int slot,
680 			 const char *fmt, ...)
681 {
682 	struct btrfs_key key;
683 	struct va_format vaf;
684 	va_list args;
685 
686 	btrfs_item_key_to_cpu(eb, &key, slot);
687 	va_start(args, fmt);
688 
689 	vaf.fmt = fmt;
690 	vaf.va = &args;
691 
692 	btrfs_crit(eb->fs_info,
693 	"corrupt %s: root=%llu block=%llu slot=%d devid=%llu %pV",
694 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
695 		btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
696 		key.objectid, &vaf);
697 	va_end(args);
698 }
699 
check_dev_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)700 static int check_dev_item(struct extent_buffer *leaf,
701 			  struct btrfs_key *key, int slot)
702 {
703 	struct btrfs_dev_item *ditem;
704 
705 	if (key->objectid != BTRFS_DEV_ITEMS_OBJECTID) {
706 		dev_item_err(leaf, slot,
707 			     "invalid objectid: has=%llu expect=%llu",
708 			     key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
709 		return -EUCLEAN;
710 	}
711 	ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
712 	if (btrfs_device_id(leaf, ditem) != key->offset) {
713 		dev_item_err(leaf, slot,
714 			     "devid mismatch: key has=%llu item has=%llu",
715 			     key->offset, btrfs_device_id(leaf, ditem));
716 		return -EUCLEAN;
717 	}
718 
719 	/*
720 	 * For device total_bytes, we don't have reliable way to check it, as
721 	 * it can be 0 for device removal. Device size check can only be done
722 	 * by dev extents check.
723 	 */
724 	if (btrfs_device_bytes_used(leaf, ditem) >
725 	    btrfs_device_total_bytes(leaf, ditem)) {
726 		dev_item_err(leaf, slot,
727 			     "invalid bytes used: have %llu expect [0, %llu]",
728 			     btrfs_device_bytes_used(leaf, ditem),
729 			     btrfs_device_total_bytes(leaf, ditem));
730 		return -EUCLEAN;
731 	}
732 	/*
733 	 * Remaining members like io_align/type/gen/dev_group aren't really
734 	 * utilized.  Skip them to make later usage of them easier.
735 	 */
736 	return 0;
737 }
738 
739 /* Inode item error output has the same format as dir_item_err() */
740 #define inode_item_err(fs_info, eb, slot, fmt, ...)			\
741 	dir_item_err(eb, slot, fmt, __VA_ARGS__)
742 
check_inode_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)743 static int check_inode_item(struct extent_buffer *leaf,
744 			    struct btrfs_key *key, int slot)
745 {
746 	struct btrfs_fs_info *fs_info = leaf->fs_info;
747 	struct btrfs_inode_item *iitem;
748 	u64 super_gen = btrfs_super_generation(fs_info->super_copy);
749 	u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
750 	u32 mode;
751 
752 	if ((key->objectid < BTRFS_FIRST_FREE_OBJECTID ||
753 	     key->objectid > BTRFS_LAST_FREE_OBJECTID) &&
754 	    key->objectid != BTRFS_ROOT_TREE_DIR_OBJECTID &&
755 	    key->objectid != BTRFS_FREE_INO_OBJECTID) {
756 		generic_err(leaf, slot,
757 	"invalid key objectid: has %llu expect %llu or [%llu, %llu] or %llu",
758 			    key->objectid, BTRFS_ROOT_TREE_DIR_OBJECTID,
759 			    BTRFS_FIRST_FREE_OBJECTID,
760 			    BTRFS_LAST_FREE_OBJECTID,
761 			    BTRFS_FREE_INO_OBJECTID);
762 		return -EUCLEAN;
763 	}
764 	if (key->offset != 0) {
765 		inode_item_err(fs_info, leaf, slot,
766 			"invalid key offset: has %llu expect 0",
767 			key->offset);
768 		return -EUCLEAN;
769 	}
770 	iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
771 
772 	/* Here we use super block generation + 1 to handle log tree */
773 	if (btrfs_inode_generation(leaf, iitem) > super_gen + 1) {
774 		inode_item_err(fs_info, leaf, slot,
775 			"invalid inode generation: has %llu expect (0, %llu]",
776 			       btrfs_inode_generation(leaf, iitem),
777 			       super_gen + 1);
778 		return -EUCLEAN;
779 	}
780 	/* Note for ROOT_TREE_DIR_ITEM, mkfs could set its transid 0 */
781 	if (btrfs_inode_transid(leaf, iitem) > super_gen + 1) {
782 		inode_item_err(fs_info, leaf, slot,
783 			"invalid inode generation: has %llu expect [0, %llu]",
784 			       btrfs_inode_transid(leaf, iitem), super_gen + 1);
785 		return -EUCLEAN;
786 	}
787 
788 	/*
789 	 * For size and nbytes it's better not to be too strict, as for dir
790 	 * item its size/nbytes can easily get wrong, but doesn't affect
791 	 * anything in the fs. So here we skip the check.
792 	 */
793 	mode = btrfs_inode_mode(leaf, iitem);
794 	if (mode & ~valid_mask) {
795 		inode_item_err(fs_info, leaf, slot,
796 			       "unknown mode bit detected: 0x%x",
797 			       mode & ~valid_mask);
798 		return -EUCLEAN;
799 	}
800 
801 	/*
802 	 * S_IFMT is not bit mapped so we can't completely rely on is_power_of_2,
803 	 * but is_power_of_2() can save us from checking FIFO/CHR/DIR/REG.
804 	 * Only needs to check BLK, LNK and SOCKS
805 	 */
806 	if (!is_power_of_2(mode & S_IFMT)) {
807 		if (!S_ISLNK(mode) && !S_ISBLK(mode) && !S_ISSOCK(mode)) {
808 			inode_item_err(fs_info, leaf, slot,
809 			"invalid mode: has 0%o expect valid S_IF* bit(s)",
810 				       mode & S_IFMT);
811 			return -EUCLEAN;
812 		}
813 	}
814 	if (S_ISDIR(mode) && btrfs_inode_nlink(leaf, iitem) > 1) {
815 		inode_item_err(fs_info, leaf, slot,
816 		       "invalid nlink: has %u expect no more than 1 for dir",
817 			btrfs_inode_nlink(leaf, iitem));
818 		return -EUCLEAN;
819 	}
820 	if (btrfs_inode_flags(leaf, iitem) & ~BTRFS_INODE_FLAG_MASK) {
821 		inode_item_err(fs_info, leaf, slot,
822 			       "unknown flags detected: 0x%llx",
823 			       btrfs_inode_flags(leaf, iitem) &
824 			       ~BTRFS_INODE_FLAG_MASK);
825 		return -EUCLEAN;
826 	}
827 	return 0;
828 }
829 
check_root_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)830 static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
831 			   int slot)
832 {
833 	struct btrfs_fs_info *fs_info = leaf->fs_info;
834 	struct btrfs_root_item ri;
835 	const u64 valid_root_flags = BTRFS_ROOT_SUBVOL_RDONLY |
836 				     BTRFS_ROOT_SUBVOL_DEAD;
837 
838 	/* No such tree id */
839 	if (key->objectid == 0) {
840 		generic_err(leaf, slot, "invalid root id 0");
841 		return -EUCLEAN;
842 	}
843 
844 	/*
845 	 * Some older kernel may create ROOT_ITEM with non-zero offset, so here
846 	 * we only check offset for reloc tree whose key->offset must be a
847 	 * valid tree.
848 	 */
849 	if (key->objectid == BTRFS_TREE_RELOC_OBJECTID && key->offset == 0) {
850 		generic_err(leaf, slot, "invalid root id 0 for reloc tree");
851 		return -EUCLEAN;
852 	}
853 
854 	if (btrfs_item_size_nr(leaf, slot) != sizeof(ri)) {
855 		generic_err(leaf, slot,
856 			    "invalid root item size, have %u expect %zu",
857 			    btrfs_item_size_nr(leaf, slot), sizeof(ri));
858 	}
859 
860 	read_extent_buffer(leaf, &ri, btrfs_item_ptr_offset(leaf, slot),
861 			   sizeof(ri));
862 
863 	/* Generation related */
864 	if (btrfs_root_generation(&ri) >
865 	    btrfs_super_generation(fs_info->super_copy) + 1) {
866 		generic_err(leaf, slot,
867 			"invalid root generation, have %llu expect (0, %llu]",
868 			    btrfs_root_generation(&ri),
869 			    btrfs_super_generation(fs_info->super_copy) + 1);
870 		return -EUCLEAN;
871 	}
872 	if (btrfs_root_generation_v2(&ri) >
873 	    btrfs_super_generation(fs_info->super_copy) + 1) {
874 		generic_err(leaf, slot,
875 		"invalid root v2 generation, have %llu expect (0, %llu]",
876 			    btrfs_root_generation_v2(&ri),
877 			    btrfs_super_generation(fs_info->super_copy) + 1);
878 		return -EUCLEAN;
879 	}
880 	if (btrfs_root_last_snapshot(&ri) >
881 	    btrfs_super_generation(fs_info->super_copy) + 1) {
882 		generic_err(leaf, slot,
883 		"invalid root last_snapshot, have %llu expect (0, %llu]",
884 			    btrfs_root_last_snapshot(&ri),
885 			    btrfs_super_generation(fs_info->super_copy) + 1);
886 		return -EUCLEAN;
887 	}
888 
889 	/* Alignment and level check */
890 	if (!IS_ALIGNED(btrfs_root_bytenr(&ri), fs_info->sectorsize)) {
891 		generic_err(leaf, slot,
892 		"invalid root bytenr, have %llu expect to be aligned to %u",
893 			    btrfs_root_bytenr(&ri), fs_info->sectorsize);
894 		return -EUCLEAN;
895 	}
896 	if (btrfs_root_level(&ri) >= BTRFS_MAX_LEVEL) {
897 		generic_err(leaf, slot,
898 			    "invalid root level, have %u expect [0, %u]",
899 			    btrfs_root_level(&ri), BTRFS_MAX_LEVEL - 1);
900 		return -EUCLEAN;
901 	}
902 	if (ri.drop_level >= BTRFS_MAX_LEVEL) {
903 		generic_err(leaf, slot,
904 			    "invalid root level, have %u expect [0, %u]",
905 			    ri.drop_level, BTRFS_MAX_LEVEL - 1);
906 		return -EUCLEAN;
907 	}
908 
909 	/* Flags check */
910 	if (btrfs_root_flags(&ri) & ~valid_root_flags) {
911 		generic_err(leaf, slot,
912 			    "invalid root flags, have 0x%llx expect mask 0x%llx",
913 			    btrfs_root_flags(&ri), valid_root_flags);
914 		return -EUCLEAN;
915 	}
916 	return 0;
917 }
918 
919 __printf(3,4)
920 __cold
extent_err(const struct extent_buffer * eb,int slot,const char * fmt,...)921 static void extent_err(const struct extent_buffer *eb, int slot,
922 		       const char *fmt, ...)
923 {
924 	struct btrfs_key key;
925 	struct va_format vaf;
926 	va_list args;
927 	u64 bytenr;
928 	u64 len;
929 
930 	btrfs_item_key_to_cpu(eb, &key, slot);
931 	bytenr = key.objectid;
932 	if (key.type == BTRFS_METADATA_ITEM_KEY ||
933 	    key.type == BTRFS_TREE_BLOCK_REF_KEY ||
934 	    key.type == BTRFS_SHARED_BLOCK_REF_KEY)
935 		len = eb->fs_info->nodesize;
936 	else
937 		len = key.offset;
938 	va_start(args, fmt);
939 
940 	vaf.fmt = fmt;
941 	vaf.va = &args;
942 
943 	btrfs_crit(eb->fs_info,
944 	"corrupt %s: block=%llu slot=%d extent bytenr=%llu len=%llu %pV",
945 		btrfs_header_level(eb) == 0 ? "leaf" : "node",
946 		eb->start, slot, bytenr, len, &vaf);
947 	va_end(args);
948 }
949 
check_extent_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot)950 static int check_extent_item(struct extent_buffer *leaf,
951 			     struct btrfs_key *key, int slot)
952 {
953 	struct btrfs_fs_info *fs_info = leaf->fs_info;
954 	struct btrfs_extent_item *ei;
955 	bool is_tree_block = false;
956 	unsigned long ptr;	/* Current pointer inside inline refs */
957 	unsigned long end;	/* Extent item end */
958 	const u32 item_size = btrfs_item_size_nr(leaf, slot);
959 	u64 flags;
960 	u64 generation;
961 	u64 total_refs;		/* Total refs in btrfs_extent_item */
962 	u64 inline_refs = 0;	/* found total inline refs */
963 
964 	if (key->type == BTRFS_METADATA_ITEM_KEY &&
965 	    !btrfs_fs_incompat(fs_info, SKINNY_METADATA)) {
966 		generic_err(leaf, slot,
967 "invalid key type, METADATA_ITEM type invalid when SKINNY_METADATA feature disabled");
968 		return -EUCLEAN;
969 	}
970 	/* key->objectid is the bytenr for both key types */
971 	if (!IS_ALIGNED(key->objectid, fs_info->sectorsize)) {
972 		generic_err(leaf, slot,
973 		"invalid key objectid, have %llu expect to be aligned to %u",
974 			   key->objectid, fs_info->sectorsize);
975 		return -EUCLEAN;
976 	}
977 
978 	/* key->offset is tree level for METADATA_ITEM_KEY */
979 	if (key->type == BTRFS_METADATA_ITEM_KEY &&
980 	    key->offset >= BTRFS_MAX_LEVEL) {
981 		extent_err(leaf, slot,
982 			   "invalid tree level, have %llu expect [0, %u]",
983 			   key->offset, BTRFS_MAX_LEVEL - 1);
984 		return -EUCLEAN;
985 	}
986 
987 	/*
988 	 * EXTENT/METADATA_ITEM consists of:
989 	 * 1) One btrfs_extent_item
990 	 *    Records the total refs, type and generation of the extent.
991 	 *
992 	 * 2) One btrfs_tree_block_info (for EXTENT_ITEM and tree backref only)
993 	 *    Records the first key and level of the tree block.
994 	 *
995 	 * 2) Zero or more btrfs_extent_inline_ref(s)
996 	 *    Each inline ref has one btrfs_extent_inline_ref shows:
997 	 *    2.1) The ref type, one of the 4
998 	 *         TREE_BLOCK_REF	Tree block only
999 	 *         SHARED_BLOCK_REF	Tree block only
1000 	 *         EXTENT_DATA_REF	Data only
1001 	 *         SHARED_DATA_REF	Data only
1002 	 *    2.2) Ref type specific data
1003 	 *         Either using btrfs_extent_inline_ref::offset, or specific
1004 	 *         data structure.
1005 	 */
1006 	if (item_size < sizeof(*ei)) {
1007 		extent_err(leaf, slot,
1008 			   "invalid item size, have %u expect [%zu, %u)",
1009 			   item_size, sizeof(*ei),
1010 			   BTRFS_LEAF_DATA_SIZE(fs_info));
1011 		return -EUCLEAN;
1012 	}
1013 	end = item_size + btrfs_item_ptr_offset(leaf, slot);
1014 
1015 	/* Checks against extent_item */
1016 	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1017 	flags = btrfs_extent_flags(leaf, ei);
1018 	total_refs = btrfs_extent_refs(leaf, ei);
1019 	generation = btrfs_extent_generation(leaf, ei);
1020 	if (generation > btrfs_super_generation(fs_info->super_copy) + 1) {
1021 		extent_err(leaf, slot,
1022 			   "invalid generation, have %llu expect (0, %llu]",
1023 			   generation,
1024 			   btrfs_super_generation(fs_info->super_copy) + 1);
1025 		return -EUCLEAN;
1026 	}
1027 	if (!is_power_of_2(flags & (BTRFS_EXTENT_FLAG_DATA |
1028 				    BTRFS_EXTENT_FLAG_TREE_BLOCK))) {
1029 		extent_err(leaf, slot,
1030 		"invalid extent flag, have 0x%llx expect 1 bit set in 0x%llx",
1031 			flags, BTRFS_EXTENT_FLAG_DATA |
1032 			BTRFS_EXTENT_FLAG_TREE_BLOCK);
1033 		return -EUCLEAN;
1034 	}
1035 	is_tree_block = !!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK);
1036 	if (is_tree_block) {
1037 		if (key->type == BTRFS_EXTENT_ITEM_KEY &&
1038 		    key->offset != fs_info->nodesize) {
1039 			extent_err(leaf, slot,
1040 				   "invalid extent length, have %llu expect %u",
1041 				   key->offset, fs_info->nodesize);
1042 			return -EUCLEAN;
1043 		}
1044 	} else {
1045 		if (key->type != BTRFS_EXTENT_ITEM_KEY) {
1046 			extent_err(leaf, slot,
1047 			"invalid key type, have %u expect %u for data backref",
1048 				   key->type, BTRFS_EXTENT_ITEM_KEY);
1049 			return -EUCLEAN;
1050 		}
1051 		if (!IS_ALIGNED(key->offset, fs_info->sectorsize)) {
1052 			extent_err(leaf, slot,
1053 			"invalid extent length, have %llu expect aligned to %u",
1054 				   key->offset, fs_info->sectorsize);
1055 			return -EUCLEAN;
1056 		}
1057 	}
1058 	ptr = (unsigned long)(struct btrfs_extent_item *)(ei + 1);
1059 
1060 	/* Check the special case of btrfs_tree_block_info */
1061 	if (is_tree_block && key->type != BTRFS_METADATA_ITEM_KEY) {
1062 		struct btrfs_tree_block_info *info;
1063 
1064 		info = (struct btrfs_tree_block_info *)ptr;
1065 		if (btrfs_tree_block_level(leaf, info) >= BTRFS_MAX_LEVEL) {
1066 			extent_err(leaf, slot,
1067 			"invalid tree block info level, have %u expect [0, %u]",
1068 				   btrfs_tree_block_level(leaf, info),
1069 				   BTRFS_MAX_LEVEL - 1);
1070 			return -EUCLEAN;
1071 		}
1072 		ptr = (unsigned long)(struct btrfs_tree_block_info *)(info + 1);
1073 	}
1074 
1075 	/* Check inline refs */
1076 	while (ptr < end) {
1077 		struct btrfs_extent_inline_ref *iref;
1078 		struct btrfs_extent_data_ref *dref;
1079 		struct btrfs_shared_data_ref *sref;
1080 		u64 dref_offset;
1081 		u64 inline_offset;
1082 		u8 inline_type;
1083 
1084 		if (ptr + sizeof(*iref) > end) {
1085 			extent_err(leaf, slot,
1086 "inline ref item overflows extent item, ptr %lu iref size %zu end %lu",
1087 				   ptr, sizeof(*iref), end);
1088 			return -EUCLEAN;
1089 		}
1090 		iref = (struct btrfs_extent_inline_ref *)ptr;
1091 		inline_type = btrfs_extent_inline_ref_type(leaf, iref);
1092 		inline_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1093 		if (ptr + btrfs_extent_inline_ref_size(inline_type) > end) {
1094 			extent_err(leaf, slot,
1095 "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
1096 				   ptr, inline_type, end);
1097 			return -EUCLEAN;
1098 		}
1099 
1100 		switch (inline_type) {
1101 		/* inline_offset is subvolid of the owner, no need to check */
1102 		case BTRFS_TREE_BLOCK_REF_KEY:
1103 			inline_refs++;
1104 			break;
1105 		/* Contains parent bytenr */
1106 		case BTRFS_SHARED_BLOCK_REF_KEY:
1107 			if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
1108 				extent_err(leaf, slot,
1109 		"invalid tree parent bytenr, have %llu expect aligned to %u",
1110 					   inline_offset, fs_info->sectorsize);
1111 				return -EUCLEAN;
1112 			}
1113 			inline_refs++;
1114 			break;
1115 		/*
1116 		 * Contains owner subvolid, owner key objectid, adjusted offset.
1117 		 * The only obvious corruption can happen in that offset.
1118 		 */
1119 		case BTRFS_EXTENT_DATA_REF_KEY:
1120 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1121 			dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
1122 			if (!IS_ALIGNED(dref_offset, fs_info->sectorsize)) {
1123 				extent_err(leaf, slot,
1124 		"invalid data ref offset, have %llu expect aligned to %u",
1125 					   dref_offset, fs_info->sectorsize);
1126 				return -EUCLEAN;
1127 			}
1128 			inline_refs += btrfs_extent_data_ref_count(leaf, dref);
1129 			break;
1130 		/* Contains parent bytenr and ref count */
1131 		case BTRFS_SHARED_DATA_REF_KEY:
1132 			sref = (struct btrfs_shared_data_ref *)(iref + 1);
1133 			if (!IS_ALIGNED(inline_offset, fs_info->sectorsize)) {
1134 				extent_err(leaf, slot,
1135 		"invalid data parent bytenr, have %llu expect aligned to %u",
1136 					   inline_offset, fs_info->sectorsize);
1137 				return -EUCLEAN;
1138 			}
1139 			inline_refs += btrfs_shared_data_ref_count(leaf, sref);
1140 			break;
1141 		default:
1142 			extent_err(leaf, slot, "unknown inline ref type: %u",
1143 				   inline_type);
1144 			return -EUCLEAN;
1145 		}
1146 		ptr += btrfs_extent_inline_ref_size(inline_type);
1147 	}
1148 	/* No padding is allowed */
1149 	if (ptr != end) {
1150 		extent_err(leaf, slot,
1151 			   "invalid extent item size, padding bytes found");
1152 		return -EUCLEAN;
1153 	}
1154 
1155 	/* Finally, check the inline refs against total refs */
1156 	if (inline_refs > total_refs) {
1157 		extent_err(leaf, slot,
1158 			"invalid extent refs, have %llu expect >= inline %llu",
1159 			   total_refs, inline_refs);
1160 		return -EUCLEAN;
1161 	}
1162 	return 0;
1163 }
1164 
check_simple_keyed_refs(struct extent_buffer * leaf,struct btrfs_key * key,int slot)1165 static int check_simple_keyed_refs(struct extent_buffer *leaf,
1166 				   struct btrfs_key *key, int slot)
1167 {
1168 	u32 expect_item_size = 0;
1169 
1170 	if (key->type == BTRFS_SHARED_DATA_REF_KEY)
1171 		expect_item_size = sizeof(struct btrfs_shared_data_ref);
1172 
1173 	if (btrfs_item_size_nr(leaf, slot) != expect_item_size) {
1174 		generic_err(leaf, slot,
1175 		"invalid item size, have %u expect %u for key type %u",
1176 			    btrfs_item_size_nr(leaf, slot),
1177 			    expect_item_size, key->type);
1178 		return -EUCLEAN;
1179 	}
1180 	if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1181 		generic_err(leaf, slot,
1182 "invalid key objectid for shared block ref, have %llu expect aligned to %u",
1183 			    key->objectid, leaf->fs_info->sectorsize);
1184 		return -EUCLEAN;
1185 	}
1186 	if (key->type != BTRFS_TREE_BLOCK_REF_KEY &&
1187 	    !IS_ALIGNED(key->offset, leaf->fs_info->sectorsize)) {
1188 		extent_err(leaf, slot,
1189 		"invalid tree parent bytenr, have %llu expect aligned to %u",
1190 			   key->offset, leaf->fs_info->sectorsize);
1191 		return -EUCLEAN;
1192 	}
1193 	return 0;
1194 }
1195 
check_extent_data_ref(struct extent_buffer * leaf,struct btrfs_key * key,int slot)1196 static int check_extent_data_ref(struct extent_buffer *leaf,
1197 				 struct btrfs_key *key, int slot)
1198 {
1199 	struct btrfs_extent_data_ref *dref;
1200 	unsigned long ptr = btrfs_item_ptr_offset(leaf, slot);
1201 	const unsigned long end = ptr + btrfs_item_size_nr(leaf, slot);
1202 
1203 	if (btrfs_item_size_nr(leaf, slot) % sizeof(*dref) != 0) {
1204 		generic_err(leaf, slot,
1205 	"invalid item size, have %u expect aligned to %zu for key type %u",
1206 			    btrfs_item_size_nr(leaf, slot),
1207 			    sizeof(*dref), key->type);
1208 	}
1209 	if (!IS_ALIGNED(key->objectid, leaf->fs_info->sectorsize)) {
1210 		generic_err(leaf, slot,
1211 "invalid key objectid for shared block ref, have %llu expect aligned to %u",
1212 			    key->objectid, leaf->fs_info->sectorsize);
1213 		return -EUCLEAN;
1214 	}
1215 	for (; ptr < end; ptr += sizeof(*dref)) {
1216 		u64 root_objectid;
1217 		u64 owner;
1218 		u64 offset;
1219 		u64 hash;
1220 
1221 		dref = (struct btrfs_extent_data_ref *)ptr;
1222 		root_objectid = btrfs_extent_data_ref_root(leaf, dref);
1223 		owner = btrfs_extent_data_ref_objectid(leaf, dref);
1224 		offset = btrfs_extent_data_ref_offset(leaf, dref);
1225 		hash = hash_extent_data_ref(root_objectid, owner, offset);
1226 		if (hash != key->offset) {
1227 			extent_err(leaf, slot,
1228 	"invalid extent data ref hash, item has 0x%016llx key has 0x%016llx",
1229 				   hash, key->offset);
1230 			return -EUCLEAN;
1231 		}
1232 		if (!IS_ALIGNED(offset, leaf->fs_info->sectorsize)) {
1233 			extent_err(leaf, slot,
1234 	"invalid extent data backref offset, have %llu expect aligned to %u",
1235 				   offset, leaf->fs_info->sectorsize);
1236 		}
1237 	}
1238 	return 0;
1239 }
1240 
1241 /*
1242  * Common point to switch the item-specific validation.
1243  */
check_leaf_item(struct extent_buffer * leaf,struct btrfs_key * key,int slot,struct btrfs_key * prev_key)1244 static int check_leaf_item(struct extent_buffer *leaf,
1245 			   struct btrfs_key *key, int slot,
1246 			   struct btrfs_key *prev_key)
1247 {
1248 	int ret = 0;
1249 	struct btrfs_chunk *chunk;
1250 
1251 	switch (key->type) {
1252 	case BTRFS_EXTENT_DATA_KEY:
1253 		ret = check_extent_data_item(leaf, key, slot, prev_key);
1254 		break;
1255 	case BTRFS_EXTENT_CSUM_KEY:
1256 		ret = check_csum_item(leaf, key, slot, prev_key);
1257 		break;
1258 	case BTRFS_DIR_ITEM_KEY:
1259 	case BTRFS_DIR_INDEX_KEY:
1260 	case BTRFS_XATTR_ITEM_KEY:
1261 		ret = check_dir_item(leaf, key, slot);
1262 		break;
1263 	case BTRFS_BLOCK_GROUP_ITEM_KEY:
1264 		ret = check_block_group_item(leaf, key, slot);
1265 		break;
1266 	case BTRFS_CHUNK_ITEM_KEY:
1267 		chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
1268 		ret = btrfs_check_chunk_valid(leaf, chunk, key->offset);
1269 		break;
1270 	case BTRFS_DEV_ITEM_KEY:
1271 		ret = check_dev_item(leaf, key, slot);
1272 		break;
1273 	case BTRFS_INODE_ITEM_KEY:
1274 		ret = check_inode_item(leaf, key, slot);
1275 		break;
1276 	case BTRFS_ROOT_ITEM_KEY:
1277 		ret = check_root_item(leaf, key, slot);
1278 		break;
1279 	case BTRFS_EXTENT_ITEM_KEY:
1280 	case BTRFS_METADATA_ITEM_KEY:
1281 		ret = check_extent_item(leaf, key, slot);
1282 		break;
1283 	case BTRFS_TREE_BLOCK_REF_KEY:
1284 	case BTRFS_SHARED_DATA_REF_KEY:
1285 	case BTRFS_SHARED_BLOCK_REF_KEY:
1286 		ret = check_simple_keyed_refs(leaf, key, slot);
1287 		break;
1288 	case BTRFS_EXTENT_DATA_REF_KEY:
1289 		ret = check_extent_data_ref(leaf, key, slot);
1290 		break;
1291 	}
1292 	return ret;
1293 }
1294 
check_leaf(struct extent_buffer * leaf,bool check_item_data)1295 static int check_leaf(struct extent_buffer *leaf, bool check_item_data)
1296 {
1297 	struct btrfs_fs_info *fs_info = leaf->fs_info;
1298 	/* No valid key type is 0, so all key should be larger than this key */
1299 	struct btrfs_key prev_key = {0, 0, 0};
1300 	struct btrfs_key key;
1301 	u32 nritems = btrfs_header_nritems(leaf);
1302 	int slot;
1303 
1304 	if (btrfs_header_level(leaf) != 0) {
1305 		generic_err(leaf, 0,
1306 			"invalid level for leaf, have %d expect 0",
1307 			btrfs_header_level(leaf));
1308 		return -EUCLEAN;
1309 	}
1310 
1311 	/*
1312 	 * Extent buffers from a relocation tree have a owner field that
1313 	 * corresponds to the subvolume tree they are based on. So just from an
1314 	 * extent buffer alone we can not find out what is the id of the
1315 	 * corresponding subvolume tree, so we can not figure out if the extent
1316 	 * buffer corresponds to the root of the relocation tree or not. So
1317 	 * skip this check for relocation trees.
1318 	 */
1319 	if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
1320 		u64 owner = btrfs_header_owner(leaf);
1321 
1322 		/* These trees must never be empty */
1323 		if (owner == BTRFS_ROOT_TREE_OBJECTID ||
1324 		    owner == BTRFS_CHUNK_TREE_OBJECTID ||
1325 		    owner == BTRFS_EXTENT_TREE_OBJECTID ||
1326 		    owner == BTRFS_DEV_TREE_OBJECTID ||
1327 		    owner == BTRFS_FS_TREE_OBJECTID ||
1328 		    owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
1329 			generic_err(leaf, 0,
1330 			"invalid root, root %llu must never be empty",
1331 				    owner);
1332 			return -EUCLEAN;
1333 		}
1334 		/* Unknown tree */
1335 		if (owner == 0) {
1336 			generic_err(leaf, 0,
1337 				"invalid owner, root 0 is not defined");
1338 			return -EUCLEAN;
1339 		}
1340 		return 0;
1341 	}
1342 
1343 	if (nritems == 0)
1344 		return 0;
1345 
1346 	/*
1347 	 * Check the following things to make sure this is a good leaf, and
1348 	 * leaf users won't need to bother with similar sanity checks:
1349 	 *
1350 	 * 1) key ordering
1351 	 * 2) item offset and size
1352 	 *    No overlap, no hole, all inside the leaf.
1353 	 * 3) item content
1354 	 *    If possible, do comprehensive sanity check.
1355 	 *    NOTE: All checks must only rely on the item data itself.
1356 	 */
1357 	for (slot = 0; slot < nritems; slot++) {
1358 		u32 item_end_expected;
1359 		int ret;
1360 
1361 		btrfs_item_key_to_cpu(leaf, &key, slot);
1362 
1363 		/* Make sure the keys are in the right order */
1364 		if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
1365 			generic_err(leaf, slot,
1366 	"bad key order, prev (%llu %u %llu) current (%llu %u %llu)",
1367 				prev_key.objectid, prev_key.type,
1368 				prev_key.offset, key.objectid, key.type,
1369 				key.offset);
1370 			return -EUCLEAN;
1371 		}
1372 
1373 		/*
1374 		 * Make sure the offset and ends are right, remember that the
1375 		 * item data starts at the end of the leaf and grows towards the
1376 		 * front.
1377 		 */
1378 		if (slot == 0)
1379 			item_end_expected = BTRFS_LEAF_DATA_SIZE(fs_info);
1380 		else
1381 			item_end_expected = btrfs_item_offset_nr(leaf,
1382 								 slot - 1);
1383 		if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
1384 			generic_err(leaf, slot,
1385 				"unexpected item end, have %u expect %u",
1386 				btrfs_item_end_nr(leaf, slot),
1387 				item_end_expected);
1388 			return -EUCLEAN;
1389 		}
1390 
1391 		/*
1392 		 * Check to make sure that we don't point outside of the leaf,
1393 		 * just in case all the items are consistent to each other, but
1394 		 * all point outside of the leaf.
1395 		 */
1396 		if (btrfs_item_end_nr(leaf, slot) >
1397 		    BTRFS_LEAF_DATA_SIZE(fs_info)) {
1398 			generic_err(leaf, slot,
1399 			"slot end outside of leaf, have %u expect range [0, %u]",
1400 				btrfs_item_end_nr(leaf, slot),
1401 				BTRFS_LEAF_DATA_SIZE(fs_info));
1402 			return -EUCLEAN;
1403 		}
1404 
1405 		/* Also check if the item pointer overlaps with btrfs item. */
1406 		if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
1407 		    btrfs_item_ptr_offset(leaf, slot)) {
1408 			generic_err(leaf, slot,
1409 		"slot overlaps with its data, item end %lu data start %lu",
1410 				btrfs_item_nr_offset(slot) +
1411 				sizeof(struct btrfs_item),
1412 				btrfs_item_ptr_offset(leaf, slot));
1413 			return -EUCLEAN;
1414 		}
1415 
1416 		if (check_item_data) {
1417 			/*
1418 			 * Check if the item size and content meet other
1419 			 * criteria
1420 			 */
1421 			ret = check_leaf_item(leaf, &key, slot, &prev_key);
1422 			if (ret < 0)
1423 				return ret;
1424 		}
1425 
1426 		prev_key.objectid = key.objectid;
1427 		prev_key.type = key.type;
1428 		prev_key.offset = key.offset;
1429 	}
1430 
1431 	return 0;
1432 }
1433 
btrfs_check_leaf_full(struct extent_buffer * leaf)1434 int btrfs_check_leaf_full(struct extent_buffer *leaf)
1435 {
1436 	return check_leaf(leaf, true);
1437 }
1438 ALLOW_ERROR_INJECTION(btrfs_check_leaf_full, ERRNO);
1439 
btrfs_check_leaf_relaxed(struct extent_buffer * leaf)1440 int btrfs_check_leaf_relaxed(struct extent_buffer *leaf)
1441 {
1442 	return check_leaf(leaf, false);
1443 }
1444 
btrfs_check_node(struct extent_buffer * node)1445 int btrfs_check_node(struct extent_buffer *node)
1446 {
1447 	struct btrfs_fs_info *fs_info = node->fs_info;
1448 	unsigned long nr = btrfs_header_nritems(node);
1449 	struct btrfs_key key, next_key;
1450 	int slot;
1451 	int level = btrfs_header_level(node);
1452 	u64 bytenr;
1453 	int ret = 0;
1454 
1455 	if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
1456 		generic_err(node, 0,
1457 			"invalid level for node, have %d expect [1, %d]",
1458 			level, BTRFS_MAX_LEVEL - 1);
1459 		return -EUCLEAN;
1460 	}
1461 	if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(fs_info)) {
1462 		btrfs_crit(fs_info,
1463 "corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%u]",
1464 			   btrfs_header_owner(node), node->start,
1465 			   nr == 0 ? "small" : "large", nr,
1466 			   BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1467 		return -EUCLEAN;
1468 	}
1469 
1470 	for (slot = 0; slot < nr - 1; slot++) {
1471 		bytenr = btrfs_node_blockptr(node, slot);
1472 		btrfs_node_key_to_cpu(node, &key, slot);
1473 		btrfs_node_key_to_cpu(node, &next_key, slot + 1);
1474 
1475 		if (!bytenr) {
1476 			generic_err(node, slot,
1477 				"invalid NULL node pointer");
1478 			ret = -EUCLEAN;
1479 			goto out;
1480 		}
1481 		if (!IS_ALIGNED(bytenr, fs_info->sectorsize)) {
1482 			generic_err(node, slot,
1483 			"unaligned pointer, have %llu should be aligned to %u",
1484 				bytenr, fs_info->sectorsize);
1485 			ret = -EUCLEAN;
1486 			goto out;
1487 		}
1488 
1489 		if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
1490 			generic_err(node, slot,
1491 	"bad key order, current (%llu %u %llu) next (%llu %u %llu)",
1492 				key.objectid, key.type, key.offset,
1493 				next_key.objectid, next_key.type,
1494 				next_key.offset);
1495 			ret = -EUCLEAN;
1496 			goto out;
1497 		}
1498 	}
1499 out:
1500 	return ret;
1501 }
1502 ALLOW_ERROR_INJECTION(btrfs_check_node, ERRNO);
1503