• Home
  • Raw
  • Download

Lines Matching +full:1 +full:eb

64 	struct extent_buffer *eb;  in btrfs_extent_buffer_leak_debug_check()  local
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
81 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
83 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
139 unsigned int extent_locked:1;
142 unsigned int sync_io:1;
157 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
337 refcount_set(&state->refs, 1); in alloc_extent_state()
503 if (other->end == state->start - 1 && in merge_state()
518 if (other->start == state->end + 1 && in merge_state()
557 WARN_ON(1); in insert_state()
585 * prealloc: [orig->start, split - 1]
600 prealloc->end = split - 1; in split_state()
624 * it will optionally wake up anyone waiting on this state (wake == 1).
639 u64 range = state->end - state->start + 1; in clear_state_bit()
659 WARN_ON(1); in clear_state_bit()
688 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
718 clear = 1; in __clear_extent_bit()
812 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
827 if (last_end == (u64)-1) in __clear_extent_bit()
829 start = last_end + 1; in __clear_extent_bit()
878 while (1) { in wait_extent_bit()
900 start = state->end + 1; in wait_extent_bit()
925 u64 range = state->end - state->start + 1; in set_state_bits()
928 ret = add_extent_changeset(state, bits_to_set, changeset, 1); in set_state_bits()
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
1040 if (last_end == (u64)-1) in __set_extent_bit()
1042 start = last_end + 1; in __set_extent_bit()
1078 start = state->end + 1; in __set_extent_bit()
1096 if (last_end == (u64)-1) in __set_extent_bit()
1098 start = last_end + 1; in __set_extent_bit()
1118 this_end = last_start - 1; in __set_extent_bit()
1134 start = this_end + 1; in __set_extent_bit()
1152 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1283 if (last_end == (u64)-1) in convert_extent_bit()
1285 start = last_end + 1; in convert_extent_bit()
1325 if (last_end == (u64)-1) in convert_extent_bit()
1327 start = last_end + 1; in convert_extent_bit()
1346 this_end = last_start - 1; in convert_extent_bit()
1364 start = this_end + 1; in convert_extent_bit()
1380 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1461 while (1) { in lock_extent_bits()
1484 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1485 EXTENT_LOCKED, 1, 0, NULL); in try_lock_extent()
1488 return 1; in try_lock_extent()
1541 while (1) { in find_first_extent_bit_state()
1559 * If nothing was found, 1 is returned. If found something, return 0.
1566 int ret = 1; in find_first_extent_bit()
1571 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1616 int ret = 1; in find_contiguous_extent_bit()
1624 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
1645 * set it's possible that @end_ret contains -1, this happens in case the range
1658 while (1) { in find_first_clear_extent_bit()
1666 *end_ret = -1; in find_first_clear_extent_bit()
1674 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1675 *end_ret = -1; in find_first_clear_extent_bit()
1686 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1693 start = state->end + 1; in find_first_clear_extent_bit()
1722 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1734 while (1) { in find_first_clear_extent_bit()
1739 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1775 *end = (u64)-1; in btrfs_find_delalloc_range()
1779 while (1) { in btrfs_find_delalloc_range()
1797 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1799 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1897 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
1898 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
1912 loops = 1; in find_lock_delalloc_range()
1925 EXTENT_DELALLOC, 1, cached_state); in find_lock_delalloc_range()
1946 unsigned long nr_pages = end_index - start_index + 1; in __process_pages_contig()
2015 *index_ret = start_index + pages_locked - 1; in __process_pages_contig()
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); in extent_clear_unlock_delalloc()
2063 while (1) { in count_range_bits()
2067 if (contig && found && state->start > last + 1) in count_range_bits()
2070 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
2076 found = 1; in count_range_bits()
2153 * If 'filled' == 1, this returns 1 only if every extent in the tree
2154 * has the bits set. Otherwise, 1 is returned if any bit in the
2182 bitset = 1; in test_range_bit()
2190 if (state->end == (u64)-1) in test_range_bit()
2193 start = state->end + 1; in test_range_bit()
2214 u64 end = start + PAGE_SIZE - 1; in check_page_uptodate()
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2228 rec->start + rec->len - 1, in free_io_failure()
2234 rec->start + rec->len - 1, in free_io_failure()
2267 bio = btrfs_io_bio_alloc(1); in repair_io_failure()
2291 ASSERT(bbio->mirror_num == 1); in repair_io_failure()
2303 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9; in repair_io_failure()
2305 dev = bbio->stripes[bbio->mirror_num - 1].dev; in repair_io_failure()
2334 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) in btrfs_repair_eb_io_failure() argument
2336 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_repair_eb_io_failure()
2337 u64 start = eb->start; in btrfs_repair_eb_io_failure()
2338 int i, num_pages = num_extent_pages(eb); in btrfs_repair_eb_io_failure()
2345 struct page *p = eb->pages[i]; in btrfs_repair_eb_io_failure()
2373 ret = count_range_bits(failure_tree, &private, (u64)-1, 1, in clean_io_failure()
2401 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2404 if (num_copies > 1) { in clean_io_failure()
2483 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2544 if (num_copies == 1) { in btrfs_check_repairable()
2571 failrec->in_validation = 1; in btrfs_check_repairable()
2618 * 1. A buffered read bio, which is not cloned. in btrfs_io_needs_validation()
2623 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get in btrfs_io_needs_validation()
2676 repair_bio = btrfs_io_bio_alloc(1); in btrfs_submit_read_repair()
2767 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2781 u64 end = start + len - 1; in endio_readpage_release_extent()
2845 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2889 struct extent_buffer *eb; in end_bio_extent_readpage() local
2891 eb = (struct extent_buffer *)page->private; in end_bio_extent_readpage()
2892 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bio_extent_readpage()
2893 eb->read_mirror = mirror; in end_bio_extent_readpage()
2894 atomic_dec(&eb->io_pages); in end_bio_extent_readpage()
2896 &eb->bflags)) in end_bio_extent_readpage()
2897 btree_readahead_hook(eb, -EIO); in end_bio_extent_readpage()
2921 extent_len, 1); in end_bio_extent_readpage()
2926 end - start + 1, 0); in end_bio_extent_readpage()
2929 extent_len = end + 1 - start; in end_bio_extent_readpage()
2931 extent_len += end + 1 - start; in end_bio_extent_readpage()
2936 extent_len = end + 1 - start; in end_bio_extent_readpage()
3094 static void attach_extent_buffer_page(struct extent_buffer *eb, in attach_extent_buffer_page() argument
3098 attach_page_private(page, eb); in attach_extent_buffer_page()
3100 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
3148 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
3199 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3202 cur + iosize - 1, &cached); in btrfs_do_readpage()
3206 end - cur + 1, em_cached); in btrfs_do_readpage()
3222 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
3223 cur_end = min(extent_map_end(em) - 1, end); in btrfs_do_readpage()
3271 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
3291 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3294 cur + iosize - 1, &cached); in btrfs_do_readpage()
3301 EXTENT_UPTODATE, 1, NULL)) { in btrfs_do_readpage()
3303 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3313 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3331 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3374 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3385 u64 page_end = delalloc_start + PAGE_SIZE - 1; in writepage_delalloc()
3398 delalloc_start = delalloc_end + 1; in writepage_delalloc()
3419 delalloc_start = delalloc_end + 1; in writepage_delalloc()
3440 return 1; in writepage_delalloc()
3450 * We return 1 if the IO is started and the page is unlocked,
3464 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage_io()
3484 return 1; in __extent_writepage_io()
3491 update_nr_written(wbc, nr_written + 1); in __extent_writepage_io()
3502 page_end, 1); in __extent_writepage_io()
3505 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); in __extent_writepage_io()
3516 iosize = min(em_end - cur, end - cur + 1); in __extent_writepage_io()
3534 cur + iosize - 1, 1); in __extent_writepage_io()
3540 btrfs_set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage_io()
3580 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage()
3617 if (ret == 1) in __extent_writepage()
3625 if (ret == 1) in __extent_writepage()
3643 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
3645 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
3649 static void end_extent_buffer_writeback(struct extent_buffer *eb) in end_extent_buffer_writeback() argument
3651 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_extent_buffer_writeback()
3653 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_extent_buffer_writeback()
3657 * Lock eb pages and flush the bio if we can't the locks
3663 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
3666 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
3671 if (!btrfs_try_tree_write_lock(eb)) { in lock_extent_buffer_for_io()
3675 flush = 1; in lock_extent_buffer_for_io()
3676 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3679 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
3680 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3687 flush = 1; in lock_extent_buffer_for_io()
3689 while (1) { in lock_extent_buffer_for_io()
3690 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
3691 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3692 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) in lock_extent_buffer_for_io()
3694 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3699 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
3703 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3704 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
3705 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
3706 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3707 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
3709 -eb->len, in lock_extent_buffer_for_io()
3711 ret = 1; in lock_extent_buffer_for_io()
3713 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3716 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3721 num_pages = num_extent_pages(eb); in lock_extent_buffer_for_io()
3723 struct page *p = eb->pages[i]; in lock_extent_buffer_for_io()
3735 flush = 1; in lock_extent_buffer_for_io()
3745 unlock_page(eb->pages[i]); in lock_extent_buffer_for_io()
3748 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can in lock_extent_buffer_for_io()
3751 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3752 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3753 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in lock_extent_buffer_for_io()
3754 end_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
3755 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3756 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, in lock_extent_buffer_for_io()
3758 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
3759 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3765 struct extent_buffer *eb = (struct extent_buffer *)page->private; in set_btree_ioerr() local
3769 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in set_btree_ioerr()
3776 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
3782 fs_info = eb->fs_info; in set_btree_ioerr()
3784 eb->len, fs_info->dirty_metadata_batch); in set_btree_ioerr()
3810 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
3811 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
3824 switch (eb->log_index) { in set_btree_ioerr()
3825 case -1: in set_btree_ioerr()
3826 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3829 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3831 case 1: in set_btree_ioerr()
3832 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3842 struct extent_buffer *eb; in end_bio_extent_buffer_writepage() local
3850 eb = (struct extent_buffer *)page->private; in end_bio_extent_buffer_writepage()
3851 BUG_ON(!eb); in end_bio_extent_buffer_writepage()
3852 done = atomic_dec_and_test(&eb->io_pages); in end_bio_extent_buffer_writepage()
3855 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { in end_bio_extent_buffer_writepage()
3865 end_extent_buffer_writeback(eb); in end_bio_extent_buffer_writepage()
3871 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
3875 u64 offset = eb->start; in write_one_eb()
3882 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in write_one_eb()
3883 num_pages = num_extent_pages(eb); in write_one_eb()
3884 atomic_set(&eb->io_pages, num_pages); in write_one_eb()
3887 nritems = btrfs_header_nritems(eb); in write_one_eb()
3888 if (btrfs_header_level(eb) > 0) { in write_one_eb()
3891 memzero_extent_buffer(eb, end, eb->len - end); in write_one_eb()
3895 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in write_one_eb()
3898 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); in write_one_eb()
3899 memzero_extent_buffer(eb, start, end - start); in write_one_eb()
3903 struct page *p = eb->pages[i]; in write_one_eb()
3916 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) in write_one_eb()
3917 end_extent_buffer_writeback(eb); in write_one_eb()
3922 update_nr_written(wbc, 1); in write_one_eb()
3928 struct page *p = eb->pages[i]; in write_one_eb()
3940 struct extent_buffer *eb, *prev_eb = NULL; in btree_write_cache_pages() local
3960 end = -1; in btree_write_cache_pages()
3969 scanned = 1; in btree_write_cache_pages()
3995 eb = (struct extent_buffer *)page->private; in btree_write_cache_pages()
4002 if (WARN_ON(!eb)) { in btree_write_cache_pages()
4007 if (eb == prev_eb) { in btree_write_cache_pages()
4012 ret = atomic_inc_not_zero(&eb->refs); in btree_write_cache_pages()
4017 prev_eb = eb; in btree_write_cache_pages()
4018 ret = lock_extent_buffer_for_io(eb, &epd); in btree_write_cache_pages()
4020 free_extent_buffer(eb); in btree_write_cache_pages()
4023 done = 1; in btree_write_cache_pages()
4024 free_extent_buffer(eb); in btree_write_cache_pages()
4028 ret = write_one_eb(eb, wbc, &epd); in btree_write_cache_pages()
4030 done = 1; in btree_write_cache_pages()
4031 free_extent_buffer(eb); in btree_write_cache_pages()
4034 free_extent_buffer(eb); in btree_write_cache_pages()
4052 scanned = 1; in btree_write_cache_pages()
4085 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
4143 end = -1; in extent_write_cache_pages()
4153 range_whole = 1; in extent_write_cache_pages()
4154 scanned = 1; in extent_write_cache_pages()
4167 wbc->tagged_writepages = 1; in extent_write_cache_pages()
4185 done_index = page->index + 1; in extent_write_cache_pages()
4220 done = 1; in extent_write_cache_pages()
4239 scanned = 1; in extent_write_cache_pages()
4292 .extent_locked = 1, in extent_write_locked_range()
4299 .range_end = end + 1, in extent_write_locked_range()
4301 .punt_to_cgroup = 1, in extent_write_locked_range()
4302 .no_cgroup_owner = 1, in extent_write_locked_range()
4312 start + PAGE_SIZE - 1, 1); in extent_write_locked_range()
4355 u64 prev_em_start = (u64)-1; in extent_readahead()
4360 u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1; in extent_readahead()
4362 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); in extent_readahead()
4387 u64 end = start + PAGE_SIZE - 1; in extent_invalidatepage()
4397 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state); in extent_invalidatepage()
4410 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
4411 int ret = 1; in try_release_extent_state()
4430 ret = 1; in try_release_extent_state()
4444 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
4456 len = end - start + 1; in try_release_extent_mapping()
4470 extent_map_end(em) - 1, in try_release_extent_mapping()
4534 while (1) { in get_extent_skip_holes()
4596 WARN_ON(1); in emit_fiemap_extent()
4602 * 1) Their logical addresses are continuous in emit_fiemap_extent()
4699 path->leave_spinning = 1; in extent_fiemap()
4720 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1, in extent_fiemap()
4726 if (ret == 1) in extent_fiemap()
4738 last = (u64)-1; in extent_fiemap()
4747 last_for_get_extent = last + 1; in extent_fiemap()
4757 last = (u64)-1; in extent_fiemap()
4761 lock_extent_bits(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4808 end = 1; in extent_fiemap()
4811 end = 1; in extent_fiemap()
4845 if ((em_start >= last) || em_len == (u64)-1 || in extent_fiemap()
4846 (last == (u64)-1 && isize <= em_end)) { in extent_fiemap()
4848 end = 1; in extent_fiemap()
4859 end = 1; in extent_fiemap()
4864 if (ret == 1) in extent_fiemap()
4874 unlock_extent_cached(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4884 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
4886 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
4889 int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
4891 return (atomic_read(&eb->io_pages) || in extent_buffer_under_io()
4892 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
4893 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
4899 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
4903 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in btrfs_release_extent_buffer_pages()
4905 BUG_ON(extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
4907 num_pages = num_extent_pages(eb); in btrfs_release_extent_buffer_pages()
4909 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
4917 * removed the eb from the radix tree, so we could race in btrfs_release_extent_buffer_pages()
4918 * and have this page now attached to the new eb. So in btrfs_release_extent_buffer_pages()
4920 * this eb. in btrfs_release_extent_buffer_pages()
4923 page->private == (unsigned long)eb) { in btrfs_release_extent_buffer_pages()
4924 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in btrfs_release_extent_buffer_pages()
4929 * to a new eb. in btrfs_release_extent_buffer_pages()
4945 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
4947 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
4948 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in btrfs_release_extent_buffer()
4949 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
4956 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
4958 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
4959 eb->start = start; in __alloc_extent_buffer()
4960 eb->len = len; in __alloc_extent_buffer()
4961 eb->fs_info = fs_info; in __alloc_extent_buffer()
4962 eb->bflags = 0; in __alloc_extent_buffer()
4963 rwlock_init(&eb->lock); in __alloc_extent_buffer()
4964 atomic_set(&eb->blocking_readers, 0); in __alloc_extent_buffer()
4965 eb->blocking_writers = 0; in __alloc_extent_buffer()
4966 eb->lock_recursed = false; in __alloc_extent_buffer()
4967 init_waitqueue_head(&eb->write_lock_wq); in __alloc_extent_buffer()
4968 init_waitqueue_head(&eb->read_lock_wq); in __alloc_extent_buffer()
4970 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list, in __alloc_extent_buffer()
4973 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
4974 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
4975 atomic_set(&eb->io_pages, 0); in __alloc_extent_buffer()
4985 eb->spinning_writers = 0; in __alloc_extent_buffer()
4986 atomic_set(&eb->spinning_readers, 0); in __alloc_extent_buffer()
4987 atomic_set(&eb->read_locks, 0); in __alloc_extent_buffer()
4988 eb->write_locks = 0; in __alloc_extent_buffer()
4991 return eb; in __alloc_extent_buffer()
5027 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
5031 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
5032 if (!eb) in __alloc_dummy_extent_buffer()
5035 num_pages = num_extent_pages(eb); in __alloc_dummy_extent_buffer()
5037 eb->pages[i] = alloc_page(GFP_NOFS); in __alloc_dummy_extent_buffer()
5038 if (!eb->pages[i]) in __alloc_dummy_extent_buffer()
5041 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
5042 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
5043 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
5045 return eb; in __alloc_dummy_extent_buffer()
5048 __free_page(eb->pages[i - 1]); in __alloc_dummy_extent_buffer()
5049 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
5059 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
5081 * which trigger io after they set eb->io_pages. Note that once io is in check_buffer_tree_ref()
5085 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
5086 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5089 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
5090 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5091 atomic_inc(&eb->refs); in check_buffer_tree_ref()
5092 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
5095 static void mark_extent_buffer_accessed(struct extent_buffer *eb, in mark_extent_buffer_accessed() argument
5100 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
5102 num_pages = num_extent_pages(eb); in mark_extent_buffer_accessed()
5104 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
5114 struct extent_buffer *eb; in find_extent_buffer() local
5117 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer()
5119 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer()
5122 * Lock our eb's refs_lock to avoid races with in find_extent_buffer()
5123 * free_extent_buffer. When we get our eb it might be flagged in find_extent_buffer()
5126 * eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
5130 * So here we could race and increment the eb's reference count, in find_extent_buffer()
5136 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
5137 spin_lock(&eb->refs_lock); in find_extent_buffer()
5138 spin_unlock(&eb->refs_lock); in find_extent_buffer()
5140 mark_extent_buffer_accessed(eb, NULL); in find_extent_buffer()
5141 return eb; in find_extent_buffer()
5152 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
5155 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5156 if (eb) in alloc_test_extent_buffer()
5157 return eb; in alloc_test_extent_buffer()
5158 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5159 if (!eb) in alloc_test_extent_buffer()
5161 eb->fs_info = fs_info; in alloc_test_extent_buffer()
5170 start >> PAGE_SHIFT, eb); in alloc_test_extent_buffer()
5180 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
5181 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
5183 return eb; in alloc_test_extent_buffer()
5185 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
5197 struct extent_buffer *eb; in alloc_extent_buffer() local
5201 int uptodate = 1; in alloc_extent_buffer()
5209 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5210 if (eb) in alloc_extent_buffer()
5211 return eb; in alloc_extent_buffer()
5213 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
5214 if (!eb) in alloc_extent_buffer()
5217 num_pages = num_extent_pages(eb); in alloc_extent_buffer()
5228 * We could have already allocated an eb for this page in alloc_extent_buffer()
5230 * the existing eb, and if we can we know it's good and in alloc_extent_buffer()
5252 attach_extent_buffer_page(eb, p); in alloc_extent_buffer()
5255 eb->pages[i] = p; in alloc_extent_buffer()
5268 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
5278 start >> PAGE_SHIFT, eb); in alloc_extent_buffer()
5289 check_buffer_tree_ref(eb); in alloc_extent_buffer()
5290 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
5298 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5299 return eb; in alloc_extent_buffer()
5302 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
5304 if (eb->pages[i]) in alloc_extent_buffer()
5305 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5308 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
5314 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
5317 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
5320 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
5321 __releases(&eb->refs_lock) in release_extent_buffer()
5323 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
5325 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
5326 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
5327 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
5328 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
5330 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5334 eb->start >> PAGE_SHIFT); in release_extent_buffer()
5337 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5340 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in release_extent_buffer()
5342 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
5344 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
5345 __free_extent_buffer(eb); in release_extent_buffer()
5346 return 1; in release_extent_buffer()
5349 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
5350 return 1; in release_extent_buffer()
5352 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5357 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
5361 if (!eb) in free_extent_buffer()
5364 while (1) { in free_extent_buffer()
5365 refs = atomic_read(&eb->refs); in free_extent_buffer()
5366 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
5367 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
5368 refs == 1)) in free_extent_buffer()
5370 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); in free_extent_buffer()
5375 spin_lock(&eb->refs_lock); in free_extent_buffer()
5376 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
5377 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
5378 !extent_buffer_under_io(eb) && in free_extent_buffer()
5379 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
5380 atomic_dec(&eb->refs); in free_extent_buffer()
5386 release_extent_buffer(eb); in free_extent_buffer()
5389 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
5391 if (!eb) in free_extent_buffer_stale()
5394 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
5395 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
5397 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
5398 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
5399 atomic_dec(&eb->refs); in free_extent_buffer_stale()
5400 release_extent_buffer(eb); in free_extent_buffer_stale()
5403 void clear_extent_buffer_dirty(const struct extent_buffer *eb) in clear_extent_buffer_dirty() argument
5409 num_pages = num_extent_pages(eb); in clear_extent_buffer_dirty()
5412 page = eb->pages[i]; in clear_extent_buffer_dirty()
5428 WARN_ON(atomic_read(&eb->refs) == 0); in clear_extent_buffer_dirty()
5431 bool set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
5437 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
5439 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
5441 num_pages = num_extent_pages(eb); in set_extent_buffer_dirty()
5442 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
5443 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
5447 set_page_dirty(eb->pages[i]); in set_extent_buffer_dirty()
5451 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
5457 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
5463 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
5464 num_pages = num_extent_pages(eb); in clear_extent_buffer_uptodate()
5466 page = eb->pages[i]; in clear_extent_buffer_uptodate()
5472 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
5478 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
5479 num_pages = num_extent_pages(eb); in set_extent_buffer_uptodate()
5481 page = eb->pages[i]; in set_extent_buffer_uptodate()
5486 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) in read_extent_buffer_pages() argument
5493 int all_uptodate = 1; in read_extent_buffer_pages()
5499 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
5502 num_pages = num_extent_pages(eb); in read_extent_buffer_pages()
5504 page = eb->pages[i]; in read_extent_buffer_pages()
5519 page = eb->pages[i]; in read_extent_buffer_pages()
5527 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in read_extent_buffer_pages()
5531 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
5532 eb->read_mirror = 0; in read_extent_buffer_pages()
5533 atomic_set(&eb->io_pages, num_reads); in read_extent_buffer_pages()
5538 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
5540 page = eb->pages[i]; in read_extent_buffer_pages()
5544 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5563 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5580 page = eb->pages[i]; in read_extent_buffer_pages()
5591 page = eb->pages[locked_pages]; in read_extent_buffer_pages()
5597 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
5600 btrfs_warn(eb->fs_info, in report_eb_range()
5601 "access to eb bytenr %llu len %lu out of range start %lu len %lu", in report_eb_range()
5602 eb->start, eb->len, start, len); in report_eb_range()
5610 * the eb.
5611 * NOTE: @start and @len are offset inside the eb, not logical address.
5615 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
5620 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
5621 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
5622 return report_eb_range(eb, start, len); in check_eb_range()
5627 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
5637 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
5649 page = eb->pages[i]; in read_extent_buffer()
5662 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
5674 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
5675 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
5680 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
5698 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
5709 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
5715 page = eb->pages[i]; in memcmp_extent_buffer()
5732 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, in write_extent_buffer_chunk_tree_uuid() argument
5737 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_chunk_tree_uuid()
5738 kaddr = page_address(eb->pages[0]); in write_extent_buffer_chunk_tree_uuid()
5743 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) in write_extent_buffer_fsid() argument
5747 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_fsid()
5748 kaddr = page_address(eb->pages[0]); in write_extent_buffer_fsid()
5753 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
5763 if (check_eb_range(eb, start, len)) in write_extent_buffer()
5769 page = eb->pages[i]; in write_extent_buffer()
5783 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
5792 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
5798 page = eb->pages[i]; in memzero_extent_buffer()
5864 * @eb: the extent buffer
5874 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
5895 * @eb: the extent buffer
5899 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
5907 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
5908 page = eb->pages[i]; in extent_buffer_test_bit()
5911 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
5916 * @eb: the extent buffer
5921 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
5932 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_set()
5933 page = eb->pages[i]; in extent_buffer_bitmap_set()
5944 page = eb->pages[++i]; in extent_buffer_bitmap_set()
5958 * @eb: the extent buffer
5963 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
5975 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_clear()
5976 page = eb->pages[i]; in extent_buffer_bitmap_clear()
5987 page = eb->pages[++i]; in extent_buffer_bitmap_clear()
6017 must_memmove = 1; in copy_pages()
6068 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
6069 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
6087 cur = min_t(unsigned long, len, src_off_in_page + 1); in memmove_extent_buffer()
6088 cur = min(cur, dst_off_in_page + 1); in memmove_extent_buffer()
6090 dst_off_in_page - cur + 1, in memmove_extent_buffer()
6091 src_off_in_page - cur + 1, cur); in memmove_extent_buffer()
6101 struct extent_buffer *eb; in try_release_extent_buffer() local
6104 * We need to make sure nobody is attaching this page to an eb right in try_release_extent_buffer()
6110 return 1; in try_release_extent_buffer()
6113 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
6114 BUG_ON(!eb); in try_release_extent_buffer()
6118 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
6121 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
6122 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
6123 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
6130 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
6133 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
6134 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
6138 return release_extent_buffer(eb); in try_release_extent_buffer()