• Home
  • Raw
  • Download

Lines Matching +full:1 +full:eb

64 	struct extent_buffer *eb;  in btrfs_extent_buffer_leak_debug_check()  local
76 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
80 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
81 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
82 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
83 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
115 if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) { in __btrfs_debug_check_extent_io_range()
139 unsigned int extent_locked:1;
142 unsigned int sync_io:1;
157 changeset->bytes_changed += state->end - state->start + 1; in add_extent_changeset()
337 refcount_set(&state->refs, 1); in alloc_extent_state()
503 if (other->end == state->start - 1 && in merge_state()
518 if (other->start == state->end + 1 && in merge_state()
557 WARN_ON(1); in insert_state()
585 * prealloc: [orig->start, split - 1]
600 prealloc->end = split - 1; in split_state()
624 * it will optionally wake up anyone waiting on this state (wake == 1).
639 u64 range = state->end - state->start + 1; in clear_state_bit()
659 WARN_ON(1); in clear_state_bit()
688 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
709 trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits); in __clear_extent_bit()
718 clear = 1; in __clear_extent_bit()
812 err = split_state(tree, state, prealloc, end + 1); in __clear_extent_bit()
827 if (last_end == (u64)-1) in __clear_extent_bit()
829 start = last_end + 1; in __clear_extent_bit()
878 while (1) { in wait_extent_bit()
900 start = state->end + 1; in wait_extent_bit()
925 u64 range = state->end - state->start + 1; in set_state_bits()
928 ret = add_extent_changeset(state, bits_to_set, changeset, 1); in set_state_bits()
979 trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits); in __set_extent_bit()
1040 if (last_end == (u64)-1) in __set_extent_bit()
1042 start = last_end + 1; in __set_extent_bit()
1078 start = state->end + 1; in __set_extent_bit()
1096 if (last_end == (u64)-1) in __set_extent_bit()
1098 start = last_end + 1; in __set_extent_bit()
1118 this_end = last_start - 1; in __set_extent_bit()
1134 start = this_end + 1; in __set_extent_bit()
1152 err = split_state(tree, state, prealloc, end + 1); in __set_extent_bit()
1222 trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits, in convert_extent_bit()
1283 if (last_end == (u64)-1) in convert_extent_bit()
1285 start = last_end + 1; in convert_extent_bit()
1325 if (last_end == (u64)-1) in convert_extent_bit()
1327 start = last_end + 1; in convert_extent_bit()
1346 this_end = last_start - 1; in convert_extent_bit()
1364 start = this_end + 1; in convert_extent_bit()
1380 err = split_state(tree, state, prealloc, end + 1); in convert_extent_bit()
1461 while (1) { in lock_extent_bits()
1484 clear_extent_bit(tree, start, failed_start - 1, in try_lock_extent()
1485 EXTENT_LOCKED, 1, 0, NULL); in try_lock_extent()
1488 return 1; in try_lock_extent()
1541 while (1) { in find_first_extent_bit_state()
1559 * If nothing was found, 1 is returned. If found something, return 0.
1566 int ret = 1; in find_first_extent_bit()
1571 if (state->end == start - 1 && extent_state_in_tree(state)) { in find_first_extent_bit()
1616 int ret = 1; in find_contiguous_extent_bit()
1624 if (state->start > (*end_ret + 1)) in find_contiguous_extent_bit()
1645 * set it's possible that @end_ret contains -1, this happens in case the range
1658 while (1) { in find_first_clear_extent_bit()
1666 *end_ret = -1; in find_first_clear_extent_bit()
1674 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1675 *end_ret = -1; in find_first_clear_extent_bit()
1686 if (in_range(start, state->start, state->end - state->start + 1)) { in find_first_clear_extent_bit()
1693 start = state->end + 1; in find_first_clear_extent_bit()
1722 *start_ret = state->end + 1; in find_first_clear_extent_bit()
1734 while (1) { in find_first_clear_extent_bit()
1739 *end_ret = state->start - 1; in find_first_clear_extent_bit()
1775 *end = (u64)-1; in btrfs_find_delalloc_range()
1779 while (1) { in btrfs_find_delalloc_range()
1797 cur_start = state->end + 1; in btrfs_find_delalloc_range()
1799 total_bytes += state->end - state->start + 1; in btrfs_find_delalloc_range()
1897 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
1898 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
1912 loops = 1; in find_lock_delalloc_range()
1925 EXTENT_DELALLOC, 1, cached_state); in find_lock_delalloc_range()
1946 unsigned long nr_pages = end_index - start_index + 1; in __process_pages_contig()
2015 *index_ret = start_index + pages_locked - 1; in __process_pages_contig()
2024 clear_extent_bit(&inode->io_tree, start, end, clear_bits, 1, 0, NULL); in extent_clear_unlock_delalloc()
2063 while (1) { in count_range_bits()
2067 if (contig && found && state->start > last + 1) in count_range_bits()
2070 total_bytes += min(search_end, state->end) + 1 - in count_range_bits()
2076 found = 1; in count_range_bits()
2153 * If 'filled' == 1, this returns 1 only if every extent in the tree
2154 * has the bits set. Otherwise, 1 is returned if any bit in the
2182 bitset = 1; in test_range_bit()
2190 if (state->end == (u64)-1) in test_range_bit()
2193 start = state->end + 1; in test_range_bit()
2214 u64 end = start + PAGE_SIZE - 1; in check_page_uptodate()
2215 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) in check_page_uptodate()
2228 rec->start + rec->len - 1, in free_io_failure()
2234 rec->start + rec->len - 1, in free_io_failure()
2267 bio = btrfs_io_bio_alloc(1); in repair_io_failure()
2291 ASSERT(bbio->mirror_num == 1); in repair_io_failure()
2303 sector = bbio->stripes[bbio->mirror_num - 1].physical >> 9; in repair_io_failure()
2305 dev = bbio->stripes[bbio->mirror_num - 1].dev; in repair_io_failure()
2334 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) in btrfs_repair_eb_io_failure() argument
2336 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_repair_eb_io_failure()
2337 u64 start = eb->start; in btrfs_repair_eb_io_failure()
2338 int i, num_pages = num_extent_pages(eb); in btrfs_repair_eb_io_failure()
2345 struct page *p = eb->pages[i]; in btrfs_repair_eb_io_failure()
2373 ret = count_range_bits(failure_tree, &private, (u64)-1, 1, in clean_io_failure()
2401 state->end >= failrec->start + failrec->len - 1) { in clean_io_failure()
2404 if (num_copies > 1) { in clean_io_failure()
2483 failrec->len = end - start + 1; in btrfs_get_io_failure_record()
2544 if (num_copies == 1) { in btrfs_check_repairable()
2571 failrec->in_validation = 1; in btrfs_check_repairable()
2618 * 1. A buffered read bio, which is not cloned. in btrfs_io_needs_validation()
2623 * btrfs_io_bio->iter; for non-cloned bios (cases 1 and 3), we can get in btrfs_io_needs_validation()
2676 repair_bio = btrfs_io_bio_alloc(1); in btrfs_submit_read_repair()
2767 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_writepage()
2781 u64 end = start + len - 1; in endio_readpage_release_extent()
2845 end = start + bvec->bv_offset + bvec->bv_len - 1; in end_bio_extent_readpage()
2889 struct extent_buffer *eb; in end_bio_extent_readpage() local
2891 eb = (struct extent_buffer *)page->private; in end_bio_extent_readpage()
2892 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bio_extent_readpage()
2893 eb->read_mirror = mirror; in end_bio_extent_readpage()
2894 atomic_dec(&eb->io_pages); in end_bio_extent_readpage()
2896 &eb->bflags)) in end_bio_extent_readpage()
2897 btree_readahead_hook(eb, -EIO); in end_bio_extent_readpage()
2921 extent_len, 1); in end_bio_extent_readpage()
2926 end - start + 1, 0); in end_bio_extent_readpage()
2929 extent_len = end + 1 - start; in end_bio_extent_readpage()
2931 extent_len += end + 1 - start; in end_bio_extent_readpage()
2936 extent_len = end + 1 - start; in end_bio_extent_readpage()
3094 static void attach_extent_buffer_page(struct extent_buffer *eb, in attach_extent_buffer_page() argument
3098 attach_page_private(page, eb); in attach_extent_buffer_page()
3100 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
3148 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
3199 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3202 cur + iosize - 1, &cached); in btrfs_do_readpage()
3206 end - cur + 1, em_cached); in btrfs_do_readpage()
3222 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
3223 cur_end = min(extent_map_end(em) - 1, end); in btrfs_do_readpage()
3271 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
3291 set_extent_uptodate(tree, cur, cur + iosize - 1, in btrfs_do_readpage()
3294 cur + iosize - 1, &cached); in btrfs_do_readpage()
3301 EXTENT_UPTODATE, 1, NULL)) { in btrfs_do_readpage()
3303 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3313 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3331 unlock_extent(tree, cur, cur + iosize - 1); in btrfs_do_readpage()
3374 * This returns 1 if btrfs_run_delalloc_range function did all the work required
3385 u64 page_end = delalloc_start + PAGE_SIZE - 1; in writepage_delalloc()
3398 delalloc_start = delalloc_end + 1; in writepage_delalloc()
3419 delalloc_start = delalloc_end + 1; in writepage_delalloc()
3440 return 1; in writepage_delalloc()
3450 * We return 1 if the IO is started and the page is unlocked,
3464 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage_io()
3484 return 1; in __extent_writepage_io()
3491 update_nr_written(wbc, nr_written + 1); in __extent_writepage_io()
3502 page_end, 1); in __extent_writepage_io()
3505 em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); in __extent_writepage_io()
3516 iosize = min(em_end - cur, end - cur + 1); in __extent_writepage_io()
3534 cur + iosize - 1, 1); in __extent_writepage_io()
3540 btrfs_set_range_writeback(tree, cur, cur + iosize - 1); in __extent_writepage_io()
3580 u64 page_end = start + PAGE_SIZE - 1; in __extent_writepage()
3617 if (ret == 1) in __extent_writepage()
3625 if (ret == 1) in __extent_writepage()
3643 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
3645 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
3649 static void end_extent_buffer_writeback(struct extent_buffer *eb) in end_extent_buffer_writeback() argument
3651 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_extent_buffer_writeback()
3653 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_extent_buffer_writeback()
3657 * Lock eb pages and flush the bio if we can't the locks
3663 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
3666 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
3671 if (!btrfs_try_tree_write_lock(eb)) { in lock_extent_buffer_for_io()
3675 flush = 1; in lock_extent_buffer_for_io()
3676 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3679 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
3680 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3687 flush = 1; in lock_extent_buffer_for_io()
3689 while (1) { in lock_extent_buffer_for_io()
3690 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
3691 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3692 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) in lock_extent_buffer_for_io()
3694 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3699 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
3703 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3704 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
3705 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
3706 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3707 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
3709 -eb->len, in lock_extent_buffer_for_io()
3711 ret = 1; in lock_extent_buffer_for_io()
3713 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3716 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3721 num_pages = num_extent_pages(eb); in lock_extent_buffer_for_io()
3723 struct page *p = eb->pages[i]; in lock_extent_buffer_for_io()
3735 flush = 1; in lock_extent_buffer_for_io()
3745 unlock_page(eb->pages[i]); in lock_extent_buffer_for_io()
3748 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can in lock_extent_buffer_for_io()
3751 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
3752 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
3753 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in lock_extent_buffer_for_io()
3754 end_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
3755 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
3756 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len, in lock_extent_buffer_for_io()
3758 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
3759 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
3765 struct extent_buffer *eb = (struct extent_buffer *)page->private; in set_btree_ioerr() local
3769 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in set_btree_ioerr()
3776 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
3782 fs_info = eb->fs_info; in set_btree_ioerr()
3784 eb->len, fs_info->dirty_metadata_batch); in set_btree_ioerr()
3810 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
3811 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
3824 switch (eb->log_index) { in set_btree_ioerr()
3825 case -1: in set_btree_ioerr()
3826 set_bit(BTRFS_FS_BTREE_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3829 set_bit(BTRFS_FS_LOG1_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3831 case 1: in set_btree_ioerr()
3832 set_bit(BTRFS_FS_LOG2_ERR, &eb->fs_info->flags); in set_btree_ioerr()
3842 struct extent_buffer *eb; in end_bio_extent_buffer_writepage() local
3850 eb = (struct extent_buffer *)page->private; in end_bio_extent_buffer_writepage()
3851 BUG_ON(!eb); in end_bio_extent_buffer_writepage()
3852 done = atomic_dec_and_test(&eb->io_pages); in end_bio_extent_buffer_writepage()
3855 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { in end_bio_extent_buffer_writepage()
3865 end_extent_buffer_writeback(eb); in end_bio_extent_buffer_writepage()
3871 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
3875 u64 offset = eb->start; in write_one_eb()
3882 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in write_one_eb()
3883 num_pages = num_extent_pages(eb); in write_one_eb()
3884 atomic_set(&eb->io_pages, num_pages); in write_one_eb()
3887 nritems = btrfs_header_nritems(eb); in write_one_eb()
3888 if (btrfs_header_level(eb) > 0) { in write_one_eb()
3891 memzero_extent_buffer(eb, end, eb->len - end); in write_one_eb()
3895 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in write_one_eb()
3898 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); in write_one_eb()
3899 memzero_extent_buffer(eb, start, end - start); in write_one_eb()
3903 struct page *p = eb->pages[i]; in write_one_eb()
3916 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) in write_one_eb()
3917 end_extent_buffer_writeback(eb); in write_one_eb()
3922 update_nr_written(wbc, 1); in write_one_eb()
3928 struct page *p = eb->pages[i]; in write_one_eb()
3940 struct extent_buffer *eb, *prev_eb = NULL; in btree_write_cache_pages() local
3960 end = -1; in btree_write_cache_pages()
3969 scanned = 1; in btree_write_cache_pages()
3995 eb = (struct extent_buffer *)page->private; in btree_write_cache_pages()
4002 if (WARN_ON(!eb)) { in btree_write_cache_pages()
4007 if (eb == prev_eb) { in btree_write_cache_pages()
4012 ret = atomic_inc_not_zero(&eb->refs); in btree_write_cache_pages()
4017 prev_eb = eb; in btree_write_cache_pages()
4018 ret = lock_extent_buffer_for_io(eb, &epd); in btree_write_cache_pages()
4020 free_extent_buffer(eb); in btree_write_cache_pages()
4023 done = 1; in btree_write_cache_pages()
4024 free_extent_buffer(eb); in btree_write_cache_pages()
4028 ret = write_one_eb(eb, wbc, &epd); in btree_write_cache_pages()
4030 done = 1; in btree_write_cache_pages()
4031 free_extent_buffer(eb); in btree_write_cache_pages()
4034 free_extent_buffer(eb); in btree_write_cache_pages()
4051 scanned = 1; in btree_write_cache_pages()
4084 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
4142 end = -1; in extent_write_cache_pages()
4152 range_whole = 1; in extent_write_cache_pages()
4153 scanned = 1; in extent_write_cache_pages()
4166 wbc->tagged_writepages = 1; in extent_write_cache_pages()
4184 done_index = page->index + 1; in extent_write_cache_pages()
4219 done = 1; in extent_write_cache_pages()
4238 scanned = 1; in extent_write_cache_pages()
4291 .extent_locked = 1, in extent_write_locked_range()
4298 .range_end = end + 1, in extent_write_locked_range()
4300 .punt_to_cgroup = 1, in extent_write_locked_range()
4301 .no_cgroup_owner = 1, in extent_write_locked_range()
4311 start + PAGE_SIZE - 1, 1); in extent_write_locked_range()
4354 u64 prev_em_start = (u64)-1; in extent_readahead()
4359 u64 contig_end = page_offset(pagepool[nr - 1]) + PAGE_SIZE - 1; in extent_readahead()
4361 ASSERT(contig_start + nr * PAGE_SIZE - 1 == contig_end); in extent_readahead()
4386 u64 end = start + PAGE_SIZE - 1; in extent_invalidatepage()
4396 EXTENT_DO_ACCOUNTING, 1, 1, &cached_state); in extent_invalidatepage()
4409 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
4410 int ret = 1; in try_release_extent_state()
4429 ret = 1; in try_release_extent_state()
4443 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
4455 len = end - start + 1; in try_release_extent_mapping()
4469 extent_map_end(em) - 1, in try_release_extent_mapping()
4533 while (1) { in get_extent_skip_holes()
4595 WARN_ON(1); in emit_fiemap_extent()
4601 * 1) Their logical addresses are continuous in emit_fiemap_extent()
4698 path->leave_spinning = 1; in extent_fiemap()
4719 ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(inode), -1, in extent_fiemap()
4725 if (ret == 1) in extent_fiemap()
4737 last = (u64)-1; in extent_fiemap()
4746 last_for_get_extent = last + 1; in extent_fiemap()
4756 last = (u64)-1; in extent_fiemap()
4760 lock_extent_bits(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4807 end = 1; in extent_fiemap()
4810 end = 1; in extent_fiemap()
4844 if ((em_start >= last) || em_len == (u64)-1 || in extent_fiemap()
4845 (last == (u64)-1 && isize <= em_end)) { in extent_fiemap()
4847 end = 1; in extent_fiemap()
4858 end = 1; in extent_fiemap()
4863 if (ret == 1) in extent_fiemap()
4873 unlock_extent_cached(&inode->io_tree, start, start + len - 1, in extent_fiemap()
4883 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
4885 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
4888 int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
4890 return (atomic_read(&eb->io_pages) || in extent_buffer_under_io()
4891 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
4892 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
4898 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
4902 int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in btrfs_release_extent_buffer_pages()
4904 BUG_ON(extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
4906 num_pages = num_extent_pages(eb); in btrfs_release_extent_buffer_pages()
4908 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
4916 * removed the eb from the radix tree, so we could race in btrfs_release_extent_buffer_pages()
4917 * and have this page now attached to the new eb. So in btrfs_release_extent_buffer_pages()
4919 * this eb. in btrfs_release_extent_buffer_pages()
4922 page->private == (unsigned long)eb) { in btrfs_release_extent_buffer_pages()
4923 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in btrfs_release_extent_buffer_pages()
4928 * to a new eb. in btrfs_release_extent_buffer_pages()
4944 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
4946 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
4947 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in btrfs_release_extent_buffer()
4948 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
4955 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
4957 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
4958 eb->start = start; in __alloc_extent_buffer()
4959 eb->len = len; in __alloc_extent_buffer()
4960 eb->fs_info = fs_info; in __alloc_extent_buffer()
4961 eb->bflags = 0; in __alloc_extent_buffer()
4962 rwlock_init(&eb->lock); in __alloc_extent_buffer()
4963 atomic_set(&eb->blocking_readers, 0); in __alloc_extent_buffer()
4964 eb->blocking_writers = 0; in __alloc_extent_buffer()
4965 eb->lock_recursed = false; in __alloc_extent_buffer()
4966 init_waitqueue_head(&eb->write_lock_wq); in __alloc_extent_buffer()
4967 init_waitqueue_head(&eb->read_lock_wq); in __alloc_extent_buffer()
4969 btrfs_leak_debug_add(&fs_info->eb_leak_lock, &eb->leak_list, in __alloc_extent_buffer()
4972 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
4973 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
4974 atomic_set(&eb->io_pages, 0); in __alloc_extent_buffer()
4984 eb->spinning_writers = 0; in __alloc_extent_buffer()
4985 atomic_set(&eb->spinning_readers, 0); in __alloc_extent_buffer()
4986 atomic_set(&eb->read_locks, 0); in __alloc_extent_buffer()
4987 eb->write_locks = 0; in __alloc_extent_buffer()
4990 return eb; in __alloc_extent_buffer()
5026 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
5030 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
5031 if (!eb) in __alloc_dummy_extent_buffer()
5034 num_pages = num_extent_pages(eb); in __alloc_dummy_extent_buffer()
5036 eb->pages[i] = alloc_page(GFP_NOFS); in __alloc_dummy_extent_buffer()
5037 if (!eb->pages[i]) in __alloc_dummy_extent_buffer()
5040 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
5041 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
5042 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
5044 return eb; in __alloc_dummy_extent_buffer()
5047 __free_page(eb->pages[i - 1]); in __alloc_dummy_extent_buffer()
5048 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
5058 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
5080 * which trigger io after they set eb->io_pages. Note that once io is in check_buffer_tree_ref()
5084 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
5085 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5088 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
5089 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
5090 atomic_inc(&eb->refs); in check_buffer_tree_ref()
5091 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
5094 static void mark_extent_buffer_accessed(struct extent_buffer *eb, in mark_extent_buffer_accessed() argument
5099 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
5101 num_pages = num_extent_pages(eb); in mark_extent_buffer_accessed()
5103 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
5113 struct extent_buffer *eb; in find_extent_buffer() local
5116 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer()
5118 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer()
5121 * Lock our eb's refs_lock to avoid races with in find_extent_buffer()
5122 * free_extent_buffer. When we get our eb it might be flagged in find_extent_buffer()
5125 * eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
5129 * So here we could race and increment the eb's reference count, in find_extent_buffer()
5135 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
5136 spin_lock(&eb->refs_lock); in find_extent_buffer()
5137 spin_unlock(&eb->refs_lock); in find_extent_buffer()
5139 mark_extent_buffer_accessed(eb, NULL); in find_extent_buffer()
5140 return eb; in find_extent_buffer()
5151 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
5154 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5155 if (eb) in alloc_test_extent_buffer()
5156 return eb; in alloc_test_extent_buffer()
5157 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
5158 if (!eb) in alloc_test_extent_buffer()
5160 eb->fs_info = fs_info; in alloc_test_extent_buffer()
5169 start >> PAGE_SHIFT, eb); in alloc_test_extent_buffer()
5179 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
5180 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
5182 return eb; in alloc_test_extent_buffer()
5184 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
5196 struct extent_buffer *eb; in alloc_extent_buffer() local
5200 int uptodate = 1; in alloc_extent_buffer()
5208 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
5209 if (eb) in alloc_extent_buffer()
5210 return eb; in alloc_extent_buffer()
5212 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
5213 if (!eb) in alloc_extent_buffer()
5216 num_pages = num_extent_pages(eb); in alloc_extent_buffer()
5227 * We could have already allocated an eb for this page in alloc_extent_buffer()
5229 * the existing eb, and if we can we know it's good and in alloc_extent_buffer()
5251 attach_extent_buffer_page(eb, p); in alloc_extent_buffer()
5254 eb->pages[i] = p; in alloc_extent_buffer()
5267 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
5277 start >> PAGE_SHIFT, eb); in alloc_extent_buffer()
5288 check_buffer_tree_ref(eb); in alloc_extent_buffer()
5289 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
5297 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5298 return eb; in alloc_extent_buffer()
5301 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
5303 if (eb->pages[i]) in alloc_extent_buffer()
5304 unlock_page(eb->pages[i]); in alloc_extent_buffer()
5307 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
5313 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
5316 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
5319 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
5320 __releases(&eb->refs_lock) in release_extent_buffer()
5322 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
5324 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
5325 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
5326 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
5327 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
5329 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5333 eb->start >> PAGE_SHIFT); in release_extent_buffer()
5336 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5339 btrfs_leak_debug_del(&eb->fs_info->eb_leak_lock, &eb->leak_list); in release_extent_buffer()
5341 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
5343 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
5344 __free_extent_buffer(eb); in release_extent_buffer()
5345 return 1; in release_extent_buffer()
5348 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
5349 return 1; in release_extent_buffer()
5351 spin_unlock(&eb->refs_lock); in release_extent_buffer()
5356 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
5360 if (!eb) in free_extent_buffer()
5363 while (1) { in free_extent_buffer()
5364 refs = atomic_read(&eb->refs); in free_extent_buffer()
5365 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
5366 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
5367 refs == 1)) in free_extent_buffer()
5369 old = atomic_cmpxchg(&eb->refs, refs, refs - 1); in free_extent_buffer()
5374 spin_lock(&eb->refs_lock); in free_extent_buffer()
5375 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
5376 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
5377 !extent_buffer_under_io(eb) && in free_extent_buffer()
5378 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
5379 atomic_dec(&eb->refs); in free_extent_buffer()
5385 release_extent_buffer(eb); in free_extent_buffer()
5388 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
5390 if (!eb) in free_extent_buffer_stale()
5393 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
5394 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
5396 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
5397 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
5398 atomic_dec(&eb->refs); in free_extent_buffer_stale()
5399 release_extent_buffer(eb); in free_extent_buffer_stale()
5402 void clear_extent_buffer_dirty(const struct extent_buffer *eb) in clear_extent_buffer_dirty() argument
5408 num_pages = num_extent_pages(eb); in clear_extent_buffer_dirty()
5411 page = eb->pages[i]; in clear_extent_buffer_dirty()
5427 WARN_ON(atomic_read(&eb->refs) == 0); in clear_extent_buffer_dirty()
5430 bool set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
5436 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
5438 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
5440 num_pages = num_extent_pages(eb); in set_extent_buffer_dirty()
5441 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
5442 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
5446 set_page_dirty(eb->pages[i]); in set_extent_buffer_dirty()
5450 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
5456 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
5462 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
5463 num_pages = num_extent_pages(eb); in clear_extent_buffer_uptodate()
5465 page = eb->pages[i]; in clear_extent_buffer_uptodate()
5471 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
5477 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
5478 num_pages = num_extent_pages(eb); in set_extent_buffer_uptodate()
5480 page = eb->pages[i]; in set_extent_buffer_uptodate()
5485 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) in read_extent_buffer_pages() argument
5492 int all_uptodate = 1; in read_extent_buffer_pages()
5498 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
5501 num_pages = num_extent_pages(eb); in read_extent_buffer_pages()
5503 page = eb->pages[i]; in read_extent_buffer_pages()
5518 page = eb->pages[i]; in read_extent_buffer_pages()
5526 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in read_extent_buffer_pages()
5530 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
5531 eb->read_mirror = 0; in read_extent_buffer_pages()
5532 atomic_set(&eb->io_pages, num_reads); in read_extent_buffer_pages()
5537 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
5539 page = eb->pages[i]; in read_extent_buffer_pages()
5543 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5562 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5579 page = eb->pages[i]; in read_extent_buffer_pages()
5590 page = eb->pages[locked_pages]; in read_extent_buffer_pages()
5596 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
5599 btrfs_warn(eb->fs_info, in report_eb_range()
5600 "access to eb bytenr %llu len %lu out of range start %lu len %lu", in report_eb_range()
5601 eb->start, eb->len, start, len); in report_eb_range()
5609 * the eb.
5610 * NOTE: @start and @len are offset inside the eb, not logical address.
5614 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
5619 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
5620 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
5621 return report_eb_range(eb, start, len); in check_eb_range()
5626 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
5636 if (check_eb_range(eb, start, len)) in read_extent_buffer()
5642 page = eb->pages[i]; in read_extent_buffer()
5655 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
5667 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
5668 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
5673 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
5691 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
5702 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
5708 page = eb->pages[i]; in memcmp_extent_buffer()
5725 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, in write_extent_buffer_chunk_tree_uuid() argument
5730 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_chunk_tree_uuid()
5731 kaddr = page_address(eb->pages[0]); in write_extent_buffer_chunk_tree_uuid()
5736 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) in write_extent_buffer_fsid() argument
5740 WARN_ON(!PageUptodate(eb->pages[0])); in write_extent_buffer_fsid()
5741 kaddr = page_address(eb->pages[0]); in write_extent_buffer_fsid()
5746 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
5756 if (check_eb_range(eb, start, len)) in write_extent_buffer()
5762 page = eb->pages[i]; in write_extent_buffer()
5776 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
5785 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
5791 page = eb->pages[i]; in memzero_extent_buffer()
5857 * @eb: the extent buffer
5867 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
5888 * @eb: the extent buffer
5892 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
5900 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
5901 page = eb->pages[i]; in extent_buffer_test_bit()
5904 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
5909 * @eb: the extent buffer
5914 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
5925 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_set()
5926 page = eb->pages[i]; in extent_buffer_bitmap_set()
5937 page = eb->pages[++i]; in extent_buffer_bitmap_set()
5951 * @eb: the extent buffer
5956 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
5968 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_clear()
5969 page = eb->pages[i]; in extent_buffer_bitmap_clear()
5980 page = eb->pages[++i]; in extent_buffer_bitmap_clear()
6010 must_memmove = 1; in copy_pages()
6061 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
6062 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
6080 cur = min_t(unsigned long, len, src_off_in_page + 1); in memmove_extent_buffer()
6081 cur = min(cur, dst_off_in_page + 1); in memmove_extent_buffer()
6083 dst_off_in_page - cur + 1, in memmove_extent_buffer()
6084 src_off_in_page - cur + 1, cur); in memmove_extent_buffer()
6094 struct extent_buffer *eb; in try_release_extent_buffer() local
6097 * We need to make sure nobody is attaching this page to an eb right in try_release_extent_buffer()
6103 return 1; in try_release_extent_buffer()
6106 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
6107 BUG_ON(!eb); in try_release_extent_buffer()
6111 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
6114 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
6115 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
6116 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
6123 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
6126 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
6127 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
6131 return release_extent_buffer(eb); in try_release_extent_buffer()