Lines Matching refs:end
95 state->start, state->end, state->state, in btrfs_extent_state_leak_debug_check()
103 #define btrfs_debug_check_extent_io_range(tree, start, end) \ argument
104 __btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
106 struct extent_io_tree *tree, u64 start, u64 end) in __btrfs_debug_check_extent_io_range() argument
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
118 caller, btrfs_ino(BTRFS_I(inode)), isize, start, end); in __btrfs_debug_check_extent_io_range()
130 u64 end; member
157 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
158 ret = ulist_add(&changeset->range_changed, state->start, state->end, in add_extent_changeset()
379 else if (offset > entry->end) in tree_insert()
429 else if (offset > entry->end) in __etree_search()
442 while (prev && offset > prev_entry->end) { in __etree_search()
503 if (other->end == state->start - 1 && in merge_state()
518 if (other->start == state->end + 1 && in merge_state()
524 state->end = other->end; in merge_state()
547 struct extent_state *state, u64 start, u64 end, in insert_state() argument
554 if (end < start) { in insert_state()
556 "insert state: end < start %llu %llu", end, start); in insert_state()
560 state->end = end; in insert_state()
564 node = tree_insert(&tree->state, NULL, end, &state->rb_node, p, parent); in insert_state()
570 found->start, found->end, start, end); in insert_state()
600 prealloc->end = split - 1; in split_state()
604 node = tree_insert(&tree->state, &orig->rb_node, prealloc->end, in split_state()
639 u64 range = state->end - state->start + 1; in clear_state_bit()
695 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __clear_extent_bit() argument
708 btrfs_debug_check_extent_io_range(tree, start, end); in __clear_extent_bit()
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
741 cached->start <= start && cached->end > start) { in __clear_extent_bit()
759 if (state->start > end) in __clear_extent_bit()
761 WARN_ON(state->end < start); in __clear_extent_bit()
762 last_end = state->end; in __clear_extent_bit()
796 if (state->end <= end) { in __clear_extent_bit()
809 if (state->start <= end && state->end > end) { in __clear_extent_bit()
812 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
830 if (start <= end && state && !need_resched()) in __clear_extent_bit()
834 if (start > end) in __clear_extent_bit()
868 static void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in wait_extent_bit() argument
874 btrfs_debug_check_extent_io_range(tree, start, end); in wait_extent_bit()
890 if (state->start > end) in wait_extent_bit()
900 start = state->end + 1; in wait_extent_bit()
902 if (start > end) in wait_extent_bit()
925 u64 range = state->end - state->start + 1; in set_state_bits()
964 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in __set_extent_bit() argument
978 btrfs_debug_check_extent_io_range(tree, start, end); in __set_extent_bit()
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
996 if (state->start <= start && state->end > start && in __set_extent_bit()
1010 err = insert_state(tree, prealloc, start, end, in __set_extent_bit()
1022 last_end = state->end; in __set_extent_bit()
1030 if (state->start == start && state->end <= end) { in __set_extent_bit()
1044 if (start < end && state && state->start == start && in __set_extent_bit()
1078 start = state->end + 1; in __set_extent_bit()
1092 if (state->end <= end) { in __set_extent_bit()
1100 if (start < end && state && state->start == start && in __set_extent_bit()
1115 if (end < last_start) in __set_extent_bit()
1116 this_end = end; in __set_extent_bit()
1143 if (state->start <= end && state->end > end) { in __set_extent_bit()
1152 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1164 if (start > end) in __set_extent_bit()
1180 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bit() argument
1184 return __set_extent_bit(tree, start, end, bits, 0, failed_start, in set_extent_bit()
1207 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in convert_extent_bit() argument
1221 btrfs_debug_check_extent_io_range(tree, start, end); in convert_extent_bit()
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1242 if (state->start <= start && state->end > start && in convert_extent_bit()
1260 err = insert_state(tree, prealloc, start, end, in convert_extent_bit()
1271 last_end = state->end; in convert_extent_bit()
1279 if (state->start == start && state->end <= end) { in convert_extent_bit()
1286 if (start < end && state && state->start == start && in convert_extent_bit()
1320 if (state->end <= end) { in convert_extent_bit()
1328 if (start < end && state && state->start == start && in convert_extent_bit()
1343 if (end < last_start) in convert_extent_bit()
1344 this_end = end; in convert_extent_bit()
1373 if (state->start <= end && state->end > end) { in convert_extent_bit()
1380 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1392 if (start > end) in convert_extent_bit()
1408 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in set_record_extent_bits() argument
1419 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, GFP_NOFS, in set_record_extent_bits()
1423 int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end, in set_extent_bits_nowait() argument
1426 return __set_extent_bit(tree, start, end, bits, 0, NULL, NULL, in set_extent_bits_nowait()
1430 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, in clear_extent_bit() argument
1434 return __clear_extent_bit(tree, start, end, bits, wake, delete, in clear_extent_bit()
1438 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in clear_record_extent_bits() argument
1447 return __clear_extent_bit(tree, start, end, bits, 0, 0, NULL, GFP_NOFS, in clear_record_extent_bits()
1455 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, in lock_extent_bits() argument
1462 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, in lock_extent_bits()
1466 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED); in lock_extent_bits()
1470 WARN_ON(start > end); in lock_extent_bits()
1475 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) in try_lock_extent() argument
1480 err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED, in try_lock_extent()
1491 void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_clear_dirty_for_io() argument
1494 unsigned long end_index = end >> PAGE_SHIFT; in extent_range_clear_dirty_for_io()
1506 void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) in extent_range_redirty_for_io() argument
1509 unsigned long end_index = end >> PAGE_SHIFT; in extent_range_redirty_for_io()
1543 if (state->end >= start && (state->state & bits)) in find_first_extent_bit_state()
1571 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1589 *end_ret = state->end; in find_first_extent_bit()
1622 *end_ret = state->end; in find_contiguous_extent_bit()
1626 *end_ret = state->end; in find_contiguous_extent_bit()
1674 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1686 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1693 start = state->end + 1; in find_first_clear_extent_bit()
1722 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1736 if (state->end >= start && !(state->state & bits)) { in find_first_clear_extent_bit()
1737 *end_ret = state->end; in find_first_clear_extent_bit()
1758 u64 *end, u64 max_bytes, in btrfs_find_delalloc_range() argument
1775 *end = (u64)-1; in btrfs_find_delalloc_range()
1787 *end = state->end; in btrfs_find_delalloc_range()
1796 *end = state->end; in btrfs_find_delalloc_range()
1797 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1799 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1817 u64 start, u64 end) in __unlock_for_delalloc() argument
1820 unsigned long end_index = end >> PAGE_SHIFT; in __unlock_for_delalloc()
1862 u64 *end) in find_lock_delalloc_range() argument
1881 *end = delalloc_end; in find_lock_delalloc_range()
1936 *end = delalloc_end; in find_lock_delalloc_range()
2019 void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, in extent_clear_unlock_delalloc() argument
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); in extent_clear_unlock_delalloc()
2027 start >> PAGE_SHIFT, end >> PAGE_SHIFT, in extent_clear_unlock_delalloc()
2069 if (state->end >= cur_start && (state->state & bits) == bits) { in count_range_bits()
2070 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
2078 last = state->end; in count_range_bits()
2157 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, in test_range_bit() argument
2166 cached->end > start) in test_range_bit()
2170 while (node && start <= end) { in test_range_bit()
2178 if (state->start > end) in test_range_bit()
2190 if (state->end == (u64)-1) in test_range_bit()
2193 start = state->end + 1; in test_range_bit()
2194 if (start > end) in test_range_bit()
2214 u64 end = start + PAGE_SIZE - 1; in check_page_uptodate() local
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2401 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2423 void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) in btrfs_free_io_failure_record() argument
2435 if (state->start > end) in btrfs_free_io_failure_record()
2438 ASSERT(state->end <= end); in btrfs_free_io_failure_record()
2452 u64 start, u64 end) in btrfs_get_io_failure_record() argument
2483 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2522 ret = set_extent_bits(failure_tree, start, end, in btrfs_get_io_failure_record()
2527 ret = set_extent_bits(tree, start, end, EXTENT_DAMAGED); in btrfs_get_io_failure_record()
2645 u64 start, u64 end, int failed_mirror, in btrfs_submit_read_repair() argument
2664 failrec = btrfs_get_io_failure_record(inode, start, end); in btrfs_submit_read_repair()
2712 void end_extent_writepage(struct page *page, int err, u64 start, u64 end) in end_extent_writepage() argument
2717 btrfs_writepage_endio_finish_ordered(page, start, end, uptodate); in end_extent_writepage()
2741 u64 end; in end_bio_extent_writepage() local
2767 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2769 end_extent_writepage(page, error, start, end); in end_bio_extent_writepage()
2781 u64 end = start + len - 1; in endio_readpage_release_extent() local
2784 set_extent_uptodate(tree, start, end, &cached, GFP_ATOMIC); in endio_readpage_release_extent()
2785 unlock_extent_cached_atomic(tree, start, end, &cached); in endio_readpage_release_extent()
2807 u64 end; in end_bio_extent_readpage() local
2845 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2852 start, end, mirror); in end_bio_extent_readpage()
2855 offset, page, start, end, mirror); in end_bio_extent_readpage()
2882 start, end, mirror, in end_bio_extent_readpage()
2926 end - start + 1, 0); in end_bio_extent_readpage()
2929 extent_len = end + 1 - start; in end_bio_extent_readpage()
2931 extent_len += end + 1 - start; in end_bio_extent_readpage()
2936 extent_len = end + 1 - start; in end_bio_extent_readpage()
3148 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage() local
3169 unlock_extent(tree, start, end); in btrfs_do_readpage()
3186 while (cur <= end) { in btrfs_do_readpage()
3206 end - cur + 1, em_cached); in btrfs_do_readpage()
3209 unlock_extent(tree, cur, end); in btrfs_do_readpage()
3214 BUG_ON(end < cur); in btrfs_do_readpage()
3222 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
3223 cur_end = min(extent_map_end(em) - 1, end); in btrfs_do_readpage()
3347 u64 start, u64 end, in contiguous_readpages() argument
3356 btrfs_lock_and_flush_ordered_range(inode, start, end, NULL); in contiguous_readpages()
3465 u64 end; in __extent_writepage_io() local
3493 end = page_end; in __extent_writepage_io()
3496 while (cur <= end) { in __extent_writepage_io()
3505 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); in __extent_writepage_io()
3515 BUG_ON(end < cur); in __extent_writepage_io()
3516 iosize = min(em_end - cur, end - cur + 1); in __extent_writepage_io()
3544 page->index, cur, end); in __extent_writepage_io()
3878 unsigned long start, end; in write_one_eb() local
3889 end = btrfs_node_key_ptr_offset(nritems); in write_one_eb()
3891 memzero_extent_buffer(eb, end, eb->len - end); in write_one_eb()
3898 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); in write_one_eb()
3899 memzero_extent_buffer(eb, start, end - start); in write_one_eb()
3953 pgoff_t end; /* Inclusive */ in btree_write_cache_pages() local
3960 end = -1; in btree_write_cache_pages()
3968 end = wbc->range_end >> PAGE_SHIFT; in btree_write_cache_pages()
3977 tag_pages_for_writeback(mapping, index, end); in btree_write_cache_pages()
3978 while (!done && !nr_to_write_done && (index <= end) && in btree_write_cache_pages()
3979 (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, in btree_write_cache_pages()
4122 pgoff_t end; /* Inclusive */ in extent_write_cache_pages() local
4143 end = -1; in extent_write_cache_pages()
4151 end = wbc->range_end >> PAGE_SHIFT; in extent_write_cache_pages()
4175 tag_pages_for_writeback(mapping, index, end); in extent_write_cache_pages()
4177 while (!done && !nr_to_write_done && (index <= end) && in extent_write_cache_pages()
4179 &index, end, tag))) { in extent_write_cache_pages()
4281 int extent_write_locked_range(struct inode *inode, u64 start, u64 end, in extent_write_locked_range() argument
4287 unsigned long nr_pages = (end - start + PAGE_SIZE) >> in extent_write_locked_range()
4299 .range_end = end + 1, in extent_write_locked_range()
4306 while (start <= end) { in extent_write_locked_range()
4387 u64 end = start + PAGE_SIZE - 1; in extent_invalidatepage() local
4391 if (start > end) in extent_invalidatepage()
4394 lock_extent_bits(tree, start, end, &cached_state); in extent_invalidatepage()
4396 clear_extent_bit(tree, start, end, EXTENT_LOCKED | EXTENT_DELALLOC | in extent_invalidatepage()
4410 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state() local
4413 if (test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL)) { in try_release_extent_state()
4420 ret = __clear_extent_bit(tree, start, end, in try_release_extent_state()
4444 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping() local
4452 while (start <= end) { in try_release_extent_mapping()
4456 len = end - start + 1; in try_release_extent_mapping()
4688 int end = 0; in extent_fiemap() local
4772 while (!end) { in extent_fiemap()
4808 end = 1; in extent_fiemap()
4811 end = 1; in extent_fiemap()
4848 end = 1; in extent_fiemap()
4859 end = 1; in extent_fiemap()