Lines Matching refs:start
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
95 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
103 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
106 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); in __btrfs_debug_check_extent_io_range()
129 u64 start; member
157 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
158 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
377 if (offset < entry->start) in tree_insert()
427 if (offset < entry->start) in __etree_search()
452 while (prev && offset < prev_entry->start) { in __etree_search()
503 if (other->end == state->start - 1 && in merge_state()
509 state->start = other->start; in merge_state()
518 if (other->start == state->end + 1 && in merge_state()
547 struct extent_state *state, u64 start, u64 end, in insert_state() argument
554 if (end < start) { in insert_state()
556 "insert state: end < start %llu %llu", end, start); in insert_state()
559 state->start = start; in insert_state()
570 found->start, found->end, start, end); in insert_state()
599 prealloc->start = orig->start; in split_state()
602 orig->start = split; in split_state()
639 u64 range = state->end - state->start + 1; in clear_state_bit()
695 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
708 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
741 cached->start <= start && cached->end > start) { in __clear_extent_bit()
754 node = tree_search(tree, start); in __clear_extent_bit()
759 if (state->start > end) in __clear_extent_bit()
761 WARN_ON(state->end < start); in __clear_extent_bit()
786 if (state->start < start) { in __clear_extent_bit()
789 err = split_state(tree, state, prealloc, start); in __clear_extent_bit()
809 if (state->start <= end && state->end > end) { in __clear_extent_bit()
829 start = last_end + 1; in __clear_extent_bit()
830 if (start <= end && state && !need_resched()) in __clear_extent_bit()
834 if (start > end) in __clear_extent_bit()
868 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
874 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
883 node = tree_search(tree, start); in wait_extent_bit()
890 if (state->start > end) in wait_extent_bit()
894 start = state->start; in wait_extent_bit()
900 start = state->end + 1; in wait_extent_bit()
902 if (start > end) in wait_extent_bit()
925 u64 range = state->end - state->start + 1; in set_state_bits()
964 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
978 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
996 if (state->start <= start && state->end > start && in __set_extent_bit()
1006 node = tree_search_for_insert(tree, start, &p, &parent); in __set_extent_bit()
1010 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
1021 last_start = state->start; in __set_extent_bit()
1030 if (state->start == start && state->end <= end) { in __set_extent_bit()
1032 *failed_start = state->start; in __set_extent_bit()
1042 start = last_end + 1; in __set_extent_bit()
1044 if (start < end && state && state->start == start && in __set_extent_bit()
1066 if (state->start < start) { in __set_extent_bit()
1068 *failed_start = start; in __set_extent_bit()
1078 start = state->end + 1; in __set_extent_bit()
1085 err = split_state(tree, state, prealloc, start); in __set_extent_bit()
1098 start = last_end + 1; in __set_extent_bit()
1100 if (start < end && state && state->start == start && in __set_extent_bit()
1113 if (state->start > start) { in __set_extent_bit()
1127 err = insert_state(tree, prealloc, start, this_end, in __set_extent_bit()
1134 start = this_end + 1; in __set_extent_bit()
1143 if (state->start <= end && state->end > end) { in __set_extent_bit()
1145 *failed_start = start; in __set_extent_bit()
1164 if (start > end) in __set_extent_bit()
1180 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1184 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1207 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1221 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1242 if (state->start <= start && state->end > start && in convert_extent_bit()
1253 node = tree_search_for_insert(tree, start, &p, &parent); in convert_extent_bit()
1260 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1270 last_start = state->start; in convert_extent_bit()
1279 if (state->start == start && state->end <= end) { in convert_extent_bit()
1285 start = last_end + 1; in convert_extent_bit()
1286 if (start < end && state && state->start == start && in convert_extent_bit()
1308 if (state->start < start) { in convert_extent_bit()
1314 err = split_state(tree, state, prealloc, start); in convert_extent_bit()
1327 start = last_end + 1; in convert_extent_bit()
1328 if (start < end && state && state->start == start && in convert_extent_bit()
1341 if (state->start > start) { in convert_extent_bit()
1358 err = insert_state(tree, prealloc, start, this_end, in convert_extent_bit()
1364 start = this_end + 1; in convert_extent_bit()
1373 if (state->start <= end && state->end > end) { in convert_extent_bit()
1392 if (start > end) in convert_extent_bit()
1408 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1419 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, in set_record_extent_bits()
1423 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits_nowait() argument
1426 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, in set_extent_bits_nowait()
1430 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
1434 return __clear_extent_bit(tree, start, end, bits, wake, delete, in clear_extent_bit()
1438 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1447 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, in clear_record_extent_bits()
1455 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1462 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, in lock_extent_bits()
1467 start = failed_start; in lock_extent_bits()
1470 WARN_ON(start > end); in lock_extent_bits()
1475 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1480 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1483 if (failed_start > start) in try_lock_extent()
1484 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1491 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
1493 unsigned long index = start >> PAGE_SHIFT; in extent_range_clear_dirty_for_io()
1506 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
1508 unsigned long index = start >> PAGE_SHIFT; in extent_range_redirty_for_io()
1528 u64 start, unsigned bits) in find_first_extent_bit_state() argument
1537 node = tree_search(tree, start); in find_first_extent_bit_state()
1543 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1561 int find_first_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_extent_bit() argument
1571 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1584 state = find_first_extent_bit_state(tree, start, bits); in find_first_extent_bit()
1588 *start_ret = state->start; in find_first_extent_bit()
1612 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, in find_contiguous_extent_bit() argument
1619 state = find_first_extent_bit_state(tree, start, bits); in find_contiguous_extent_bit()
1621 *start_ret = state->start; in find_contiguous_extent_bit()
1624 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
1649 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, in find_first_clear_extent_bit() argument
1659 node = __etree_search(tree, start, &next, &prev, NULL, NULL); in find_first_clear_extent_bit()
1686 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1693 start = state->end + 1; in find_first_clear_extent_bit()
1704 *start_ret = state->start; in find_first_clear_extent_bit()
1736 if (state->end >= start && !(state->state & bits)) { in find_first_clear_extent_bit()
1739 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1757 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, in btrfs_find_delalloc_range() argument
1763 u64 cur_start = *start; in btrfs_find_delalloc_range()
1781 if (found && (state->start != cur_start || in btrfs_find_delalloc_range()
1791 *start = state->start; in btrfs_find_delalloc_range()
1799 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1817 u64 start, u64 end) in __unlock_for_delalloc() argument
1819 unsigned long index = start >> PAGE_SHIFT; in __unlock_for_delalloc()
1861 struct page *locked_page, u64 *start, in find_lock_delalloc_range() argument
1875 delalloc_start = *start; in find_lock_delalloc_range()
1879 if (!found || delalloc_end <= *start) { in find_lock_delalloc_range()
1880 *start = delalloc_start; in find_lock_delalloc_range()
1891 if (delalloc_start < *start) in find_lock_delalloc_range()
1892 delalloc_start = *start; in find_lock_delalloc_range()
1935 *start = delalloc_start; in find_lock_delalloc_range()
2019 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, in extent_clear_unlock_delalloc() argument
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); in extent_clear_unlock_delalloc()
2027 start >> PAGE_SHIFT, end >> PAGE_SHIFT, in extent_clear_unlock_delalloc()
2037 u64 *start, u64 search_end, u64 max_bytes, in count_range_bits() argument
2042 u64 cur_start = *start; in count_range_bits()
2065 if (state->start > search_end) in count_range_bits()
2067 if (contig && found && state->start > last + 1) in count_range_bits()
2071 max(cur_start, state->start); in count_range_bits()
2075 *start = max(cur_start, state->start); in count_range_bits()
2095 int set_state_failrec(struct extent_io_tree *tree, u64 start, in set_state_failrec() argument
2107 node = tree_search(tree, start); in set_state_failrec()
2113 if (state->start != start) { in set_state_failrec()
2123 struct io_failure_record *get_state_failrec(struct extent_io_tree *tree, u64 start) in get_state_failrec() argument
2134 node = tree_search(tree, start); in get_state_failrec()
2140 if (state->start != start) { in get_state_failrec()
2157 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
2165 if (cached && extent_state_in_tree(cached) && cached->start <= start && in test_range_bit()
2166 cached->end > start) in test_range_bit()
2169 node = tree_search(tree, start); in test_range_bit()
2170 while (node && start <= end) { in test_range_bit()
2173 if (filled && state->start > start) { in test_range_bit()
2178 if (state->start > end) in test_range_bit()
2193 start = state->end + 1; in test_range_bit()
2194 if (start > end) in test_range_bit()
2213 u64 start = page_offset(page); in check_page_uptodate() local
2214 u64 end = start + PAGE_SIZE - 1; in check_page_uptodate()
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2226 set_state_failrec(failure_tree, rec->start, NULL); in free_io_failure()
2227 ret = clear_extent_bits(failure_tree, rec->start, in free_io_failure()
2228 rec->start + rec->len - 1, in free_io_failure()
2233 ret = clear_extent_bits(io_tree, rec->start, in free_io_failure()
2234 rec->start + rec->len - 1, in free_io_failure()
2253 int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start, in repair_io_failure() argument
2327 ino, start, in repair_io_failure()
2337 u64 start = eb->start; in btrfs_repair_eb_io_failure() local
2347 ret = repair_io_failure(fs_info, 0, start, PAGE_SIZE, start, p, in btrfs_repair_eb_io_failure()
2348 start - page_offset(p), mirror_num); in btrfs_repair_eb_io_failure()
2351 start += PAGE_SIZE; in btrfs_repair_eb_io_failure()
2363 struct extent_io_tree *io_tree, u64 start, in clean_io_failure() argument
2378 failrec = get_state_failrec(failure_tree, start); in clean_io_failure()
2388 failrec->start); in clean_io_failure()
2396 failrec->start, in clean_io_failure()
2400 if (state && state->start <= failrec->start && in clean_io_failure()
2401 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2405 repair_io_failure(fs_info, ino, start, failrec->len, in clean_io_failure()
2423 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) in btrfs_free_io_failure_record() argument
2433 state = find_first_extent_bit_state(failure_tree, start, EXTENT_DIRTY); in btrfs_free_io_failure_record()
2435 if (state->start > end) in btrfs_free_io_failure_record()
2452 u64 start, u64 end) in btrfs_get_io_failure_record() argument
2463 failrec = get_state_failrec(failure_tree, start); in btrfs_get_io_failure_record()
2467 failrec->logical, failrec->start, failrec->len, in btrfs_get_io_failure_record()
2482 failrec->start = start; in btrfs_get_io_failure_record()
2483 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2489 em = lookup_extent_mapping(em_tree, start, failrec->len); in btrfs_get_io_failure_record()
2496 if (em->start > start || em->start + em->len <= start) { in btrfs_get_io_failure_record()
2506 logical = start - em->start; in btrfs_get_io_failure_record()
2516 logical, start, failrec->len); in btrfs_get_io_failure_record()
2522 ret = set_extent_bits(failure_tree, start, end, in btrfs_get_io_failure_record()
2525 ret = set_state_failrec(failure_tree, start, failrec); in btrfs_get_io_failure_record()
2527 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); in btrfs_get_io_failure_record()
2645 u64 start, u64 end, int failed_mirror, in btrfs_submit_read_repair() argument
2660 "repair read error: read error at %llu", start); in btrfs_submit_read_repair()
2664 failrec = btrfs_get_io_failure_record(inode, start, end); in btrfs_submit_read_repair()
2694 repair_io_bio->logical = failrec->start; in btrfs_submit_read_repair()
2712 void end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate); in end_extent_writepage()
2740 u64 start; in end_bio_extent_writepage() local
2766 start = page_offset(page); in end_bio_extent_writepage()
2767 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2769 end_extent_writepage(page, error, start, end); in end_bio_extent_writepage()
2777 endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, in endio_readpage_release_extent() argument
2781 u64 end = start + len - 1; in endio_readpage_release_extent()
2784 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2785 unlock_extent_cached_atomic(tree, start, end, &cached); in endio_readpage_release_extent()
2806 u64 start; in end_bio_extent_readpage() local
2844 start = page_offset(page); in end_bio_extent_readpage()
2845 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2852 start, end, mirror); in end_bio_extent_readpage()
2855 offset, page, start, end, mirror); in end_bio_extent_readpage()
2860 failure_tree, tree, start, in end_bio_extent_readpage()
2881 start - page_offset(page), in end_bio_extent_readpage()
2882 start, end, mirror, in end_bio_extent_readpage()
2925 endio_readpage_release_extent(tree, start, in end_bio_extent_readpage()
2926 end - start + 1, 0); in end_bio_extent_readpage()
2928 extent_start = start; in end_bio_extent_readpage()
2929 extent_len = end + 1 - start; in end_bio_extent_readpage()
2930 } else if (extent_start + extent_len == start) { in end_bio_extent_readpage()
2931 extent_len += end + 1 - start; in end_bio_extent_readpage()
2935 extent_start = start; in end_bio_extent_readpage()
2936 extent_len = end + 1 - start; in end_bio_extent_readpage()
3111 u64 start, u64 len, struct extent_map **em_cached) in __get_extent_map() argument
3117 if (extent_map_in_tree(em) && start >= em->start && in __get_extent_map()
3118 start < extent_map_end(em)) { in __get_extent_map()
3127 em = btrfs_get_extent(BTRFS_I(inode), page, pg_offset, start, len); in __get_extent_map()
3147 u64 start = page_offset(page); in btrfs_do_readpage() local
3148 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
3149 u64 cur = start; in btrfs_do_readpage()
3169 unlock_extent(tree, start, end); in btrfs_do_readpage()
3212 extent_offset = cur - em->start; in btrfs_do_readpage()
3272 *prev_em_start != em->start) in btrfs_do_readpage()
3276 *prev_em_start = em->start; in btrfs_do_readpage()
3347 u64 start, u64 end, in contiguous_readpages() argument
3356 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); in contiguous_readpages()
3463 u64 start = page_offset(page); in __extent_writepage_io() local
3464 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage_io()
3466 u64 cur = start; in __extent_writepage_io()
3478 ret = btrfs_writepage_cow_fixup(page, start, page_end); in __extent_writepage_io()
3512 extent_offset = cur - em->start; in __extent_writepage_io()
3579 u64 start = page_offset(page); in __extent_writepage() local
3580 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage()
3615 ret = writepage_delalloc(BTRFS_I(inode), page, wbc, start, in __extent_writepage()
3636 end_extent_writepage(page, ret, start, page_end); in __extent_writepage()
3875 u64 offset = eb->start; in write_one_eb()
3878 unsigned long start, end; in write_one_eb() local
3897 start = btrfs_item_nr_offset(nritems); in write_one_eb()
3899 memzero_extent_buffer(eb, start, end - start); in write_one_eb()
4281 int extent_write_locked_range(struct inode *inode, u64 start, u64 end, in extent_write_locked_range() argument
4287 unsigned long nr_pages = (end - start + PAGE_SIZE) >> in extent_write_locked_range()
4298 .range_start = start, in extent_write_locked_range()
4306 while (start <= end) { in extent_write_locked_range()
4307 page = find_get_page(mapping, start >> PAGE_SHIFT); in extent_write_locked_range()
4311 btrfs_writepage_endio_finish_ordered(page, start, in extent_write_locked_range()
4312 start + PAGE_SIZE - 1, 1); in extent_write_locked_range()
4316 start += PAGE_SIZE; in extent_write_locked_range()
4386 u64 start = page_offset(page); in extent_invalidatepage() local
4387 u64 end = start + PAGE_SIZE - 1; in extent_invalidatepage()
4390 start += ALIGN(offset, blocksize); in extent_invalidatepage()
4391 if (start > end) in extent_invalidatepage()
4394 lock_extent_bits(tree, start, end, &cached_state); in extent_invalidatepage()
4396 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | in extent_invalidatepage()
4409 u64 start = page_offset(page); in try_release_extent_state() local
4410 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
4413 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) { in try_release_extent_state()
4420 ret = __clear_extent_bit(tree, start, end, in try_release_extent_state()
4443 u64 start = page_offset(page); in try_release_extent_mapping() local
4444 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
4452 while (start <= end) { in try_release_extent_mapping()
4456 len = end - start + 1; in try_release_extent_mapping()
4458 em = lookup_extent_mapping(map, start, len); in try_release_extent_mapping()
4464 em->start != start) { in try_release_extent_mapping()
4469 if (test_range_bit(tree, em->start, in try_release_extent_mapping()
4508 start = extent_map_end(em); in try_release_extent_mapping()
4669 u64 start, u64 len) in extent_fiemap() argument
4673 u64 max = start + len; in extent_fiemap()
4713 start = round_down(start, btrfs_inode_sectorsize(inode)); in extent_fiemap()
4714 len = round_up(max, btrfs_inode_sectorsize(inode)) - start; in extent_fiemap()
4761 lock_extent_bits(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4764 em = get_extent_skip_holes(inode, start, last_for_get_extent); in extent_fiemap()
4776 if (em->start >= max || extent_map_end(em) < off) in extent_fiemap()
4785 em_start = max(em->start, off); in extent_fiemap()
4794 offset_in_extent = em_start - em->start; in extent_fiemap()
4821 (em->start - em->orig_start); in extent_fiemap()
4874 unlock_extent_cached(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4953 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, in __alloc_extent_buffer() argument
4959 eb->start = start; in __alloc_extent_buffer()
5001 new = __alloc_extent_buffer(src->fs_info, src->start, src->len); in btrfs_clone_extent_buffer()
5025 u64 start, unsigned long len) in __alloc_dummy_extent_buffer() argument
5031 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
5054 u64 start) in alloc_dummy_extent_buffer() argument
5056 return __alloc_dummy_extent_buffer(fs_info, start, fs_info->nodesize); in alloc_dummy_extent_buffer()
5112 u64 start) in find_extent_buffer() argument
5118 start >> PAGE_SHIFT); in find_extent_buffer()
5150 u64 start) in alloc_test_extent_buffer() argument
5155 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5158 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5170 start >> PAGE_SHIFT, eb); in alloc_test_extent_buffer()
5174 exists = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5191 u64 start) in alloc_extent_buffer() argument
5196 unsigned long index = start >> PAGE_SHIFT; in alloc_extent_buffer()
5204 if (!IS_ALIGNED(start, fs_info->sectorsize)) { in alloc_extent_buffer()
5205 btrfs_err(fs_info, "bad tree block start %llu", start); in alloc_extent_buffer()
5209 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5213 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
5278 start >> PAGE_SHIFT, eb); in alloc_extent_buffer()
5282 exists = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5334 eb->start >> PAGE_SHIFT); in release_extent_buffer()
5597 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
5602 eb->start, eb->len, start, len); in report_eb_range()
5616 unsigned long start, unsigned long len) in check_eb_range() argument
5621 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
5622 return report_eb_range(eb, start, len); in check_eb_range()
5628 unsigned long start, unsigned long len) in read_extent_buffer() argument
5635 unsigned long i = start >> PAGE_SHIFT; in read_extent_buffer()
5637 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
5646 offset = offset_in_page(start); in read_extent_buffer()
5664 unsigned long start, unsigned long len) in read_extent_buffer_to_user_nofault() argument
5671 unsigned long i = start >> PAGE_SHIFT; in read_extent_buffer_to_user_nofault()
5674 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
5675 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
5677 offset = offset_in_page(start); in read_extent_buffer_to_user_nofault()
5699 unsigned long start, unsigned long len) in memcmp_extent_buffer() argument
5706 unsigned long i = start >> PAGE_SHIFT; in memcmp_extent_buffer()
5709 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
5712 offset = offset_in_page(start); in memcmp_extent_buffer()
5754 unsigned long start, unsigned long len) in write_extent_buffer() argument
5761 unsigned long i = start >> PAGE_SHIFT; in write_extent_buffer()
5763 if (check_eb_range(eb, start, len)) in write_extent_buffer()
5766 offset = offset_in_page(start); in write_extent_buffer()
5783 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
5790 unsigned long i = start >> PAGE_SHIFT; in memzero_extent_buffer()
5792 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
5795 offset = offset_in_page(start); in memzero_extent_buffer()
5875 unsigned long start, unsigned long nr, in eb_bitmap_offset() argument
5887 offset = start + byte_offset; in eb_bitmap_offset()
5899 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
5907 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
5921 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
5932 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_set()
5964 unsigned long start, unsigned long pos, in extent_buffer_bitmap_clear() argument
5975 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_clear()