• Home
  • Raw
  • Download

Lines Matching +full:1 +full:eb

44 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb)  in btrfs_leak_debug_add_eb()  argument
46 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
50 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
54 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
56 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
60 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
66 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
79 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
83 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
84 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
85 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
86 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
91 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
92 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
190 ASSERT(end + 1 - start != 0 && end + 1 - start < U32_MAX); in process_one_page()
191 len = end + 1 - start; in process_one_page()
276 u32 len = end + 1 - start; in lock_delalloc_pages()
291 processed_end = page_offset(page) + PAGE_SIZE - 1; in lock_delalloc_pages()
353 /* @delalloc_end can be -1, never go beyond @orig_end */ in find_lock_delalloc_range()
370 if (delalloc_end + 1 - delalloc_start > max_bytes) in find_lock_delalloc_range()
371 delalloc_end = delalloc_start + max_bytes - 1; in find_lock_delalloc_range()
385 loops = 1; in find_lock_delalloc_range()
398 EXTENT_DELALLOC, 1, cached_state); in find_lock_delalloc_range()
541 * Thus we need to do processed->end + 1 >= start check in endio_readpage_release_extent()
544 processed->end + 1 >= start && end >= processed->end) { in endio_readpage_release_extent()
631 end = start + bvec->bv_len - 1; in end_bio_extent_readpage()
652 offset_in_page(end) + 1); in end_bio_extent_readpage()
730 * 1) The pages are belonging to the same inode in btrfs_bio_is_contig()
865 static int attach_extent_buffer_page(struct extent_buffer *eb, in attach_extent_buffer_page() argument
869 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_page()
883 attach_page_private(page, eb); in attach_extent_buffer_page()
885 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
978 const u64 end = start + PAGE_SIZE - 1; in btrfs_do_readpage()
1016 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1021 end - cur + 1, em_cached); in btrfs_do_readpage()
1024 end_page_read(page, false, cur, end + 1 - cur); in btrfs_do_readpage()
1034 iosize = min(extent_map_end(em) - cur, end - cur + 1); in btrfs_do_readpage()
1079 prev_em_start && *prev_em_start != (u64)-1 && in btrfs_do_readpage()
1093 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1101 unlock_extent(tree, cur, cur + iosize - 1, NULL); in btrfs_do_readpage()
1129 u64 end = start + PAGE_SIZE - 1; in btrfs_read_folio()
1165 * This returns 1 if btrfs_run_delalloc_range function did all the work required
1176 const u64 page_end = page_start + PAGE_SIZE - 1; in writepage_delalloc()
1186 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1195 delalloc_start = delalloc_end + 1; in writepage_delalloc()
1203 DIV_ROUND_UP(delalloc_end + 1 - page_start, PAGE_SIZE); in writepage_delalloc()
1209 if (ret == 1) { in writepage_delalloc()
1211 return 1; in writepage_delalloc()
1282 * We return 1 if the IO is started and the page is unlocked,
1294 u64 end = cur + PAGE_SIZE - 1; in __extent_writepage_io()
1306 return 1; in __extent_writepage_io()
1311 u32 len = end - cur + 1; in __extent_writepage_io()
1364 iosize = min(min(em_end, end + 1), dirty_range_end) - cur; in __extent_writepage_io()
1368 btrfs_set_range_writeback(inode, cur, cur + iosize - 1); in __extent_writepage_io()
1442 if (ret == 1) in __extent_writepage()
1448 if (ret == 1) in __extent_writepage()
1469 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
1471 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
1482 static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
1485 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
1488 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1489 while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
1490 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1493 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
1494 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
1498 * We need to do this to prevent races in people who check if the eb is in lock_extent_buffer_for_io()
1502 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
1503 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
1504 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
1505 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1506 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
1508 -eb->len, in lock_extent_buffer_for_io()
1512 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
1514 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
1518 static void set_btree_ioerr(struct extent_buffer *eb) in set_btree_ioerr() argument
1520 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
1522 set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in set_btree_ioerr()
1528 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
1536 mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); in set_btree_ioerr()
1562 * for the eb flag EXTENT_BUFFER_WRITE_ERR at transaction commit time is in set_btree_ioerr()
1563 * not done and would not be reliable - the eb might have been released in set_btree_ioerr()
1576 switch (eb->log_index) { in set_btree_ioerr()
1577 case -1: in set_btree_ioerr()
1583 case 1: in set_btree_ioerr()
1598 struct extent_buffer *eb; in find_extent_buffer_nolock() local
1601 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
1603 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
1605 return eb; in find_extent_buffer_nolock()
1613 struct extent_buffer *eb = bbio->private; in extent_buffer_write_end_io() local
1614 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_write_end_io()
1621 set_btree_ioerr(eb); in extent_buffer_write_end_io()
1624 u64 start = eb->start + bio_offset; in extent_buffer_write_end_io()
1632 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in extent_buffer_write_end_io()
1634 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in extent_buffer_write_end_io()
1639 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
1645 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
1648 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
1649 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
1650 end = btrfs_node_key_ptr_offset(eb, nritems); in prepare_eb_write()
1651 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
1655 * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0 in prepare_eb_write()
1657 start = btrfs_item_nr_offset(eb, nritems); in prepare_eb_write()
1658 end = btrfs_item_nr_offset(eb, 0); in prepare_eb_write()
1660 end += BTRFS_LEAF_DATA_SIZE(eb->fs_info); in prepare_eb_write()
1662 end += btrfs_item_offset(eb, nritems - 1); in prepare_eb_write()
1663 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
1667 static noinline_for_stack void write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
1670 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_eb()
1673 prepare_eb_write(eb); in write_one_eb()
1677 eb->fs_info, extent_buffer_write_end_io, eb); in write_one_eb()
1678 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in write_one_eb()
1681 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in write_one_eb()
1682 bbio->file_offset = eb->start; in write_one_eb()
1684 struct page *p = eb->pages[0]; in write_one_eb()
1687 btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len); in write_one_eb()
1688 if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start, in write_one_eb()
1689 eb->len)) { in write_one_eb()
1693 __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p)); in write_one_eb()
1694 wbc_account_cgroup_owner(wbc, p, eb->len); in write_one_eb()
1697 for (int i = 0; i < num_extent_pages(eb); i++) { in write_one_eb()
1698 struct page *p = eb->pages[i]; in write_one_eb()
1737 struct extent_buffer *eb; in submit_eb_subpage() local
1763 * Here we just want to grab the eb without touching extra in submit_eb_subpage()
1766 eb = find_extent_buffer_nolock(fs_info, start); in submit_eb_subpage()
1771 * The eb has already reached 0 refs thus find_extent_buffer() in submit_eb_subpage()
1772 * doesn't return it. We don't need to write back such eb in submit_eb_subpage()
1775 if (!eb) in submit_eb_subpage()
1778 if (lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_subpage()
1779 write_one_eb(eb, wbc); in submit_eb_subpage()
1782 free_extent_buffer(eb); in submit_eb_subpage()
1792 * belongs to this eb, we don't need to submit
1811 struct extent_buffer *eb; in submit_eb_page() local
1826 eb = (struct extent_buffer *)page->private; in submit_eb_page()
1832 if (WARN_ON(!eb)) { in submit_eb_page()
1837 if (eb == ctx->eb) { in submit_eb_page()
1841 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
1846 ctx->eb = eb; in submit_eb_page()
1848 ret = btrfs_check_meta_write_pointer(eb->fs_info, ctx); in submit_eb_page()
1852 free_extent_buffer(eb); in submit_eb_page()
1856 if (!lock_extent_buffer_for_io(eb, wbc)) { in submit_eb_page()
1857 free_extent_buffer(eb); in submit_eb_page()
1862 /* Mark the last eb in the block group. */ in submit_eb_page()
1863 btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb); in submit_eb_page()
1864 ctx->zoned_bg->meta_write_pointer += eb->len; in submit_eb_page()
1866 write_one_eb(eb, wbc); in submit_eb_page()
1867 free_extent_buffer(eb); in submit_eb_page()
1868 return 1; in submit_eb_page()
1889 end = -1; in btree_write_cache_pages()
1898 scanned = 1; in btree_write_cache_pages()
1920 done = 1; in btree_write_cache_pages()
1939 scanned = 1; in btree_write_cache_pages()
1967 * extent io tree. Thus we don't want to submit such wild eb in btree_write_cache_pages()
2031 end = -1; in extent_write_cache_pages()
2041 range_whole = 1; in extent_write_cache_pages()
2042 scanned = 1; in extent_write_cache_pages()
2055 wbc->tagged_writepages = 1; in extent_write_cache_pages()
2111 done = 1; in extent_write_cache_pages()
2131 scanned = 1; in extent_write_cache_pages()
2175 ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); in extent_write_locked_range()
2178 u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); in extent_write_locked_range()
2179 u32 cur_len = cur_end + 1 - cur; in extent_write_locked_range()
2192 if (ret == 1) in extent_write_locked_range()
2210 cur = cur_end + 1; in extent_write_locked_range()
2242 u64 prev_em_start = (u64)-1; in extent_readahead()
2247 u64 contig_end = contig_start + readahead_batch_length(rac) - 1; in extent_readahead()
2268 u64 end = start + folio_size(folio) - 1; in extent_invalidate_folio()
2299 u64 end = start + PAGE_SIZE - 1; in try_release_extent_state()
2300 int ret = 1; in try_release_extent_state()
2323 ret = 1; in try_release_extent_state()
2337 u64 end = start + PAGE_SIZE - 1; in try_release_extent_mapping()
2349 len = end - start + 1; in try_release_extent_mapping()
2363 extent_map_end(em) - 1, in try_release_extent_mapping()
2457 * file extent item for file range [512K, 1M[, and after in emit_fiemap_extent()
2520 * 1) The file extent item's range ends at or behind the in emit_fiemap_extent()
2550 * 1) Their logical addresses are continuous in emit_fiemap_extent()
2630 return 1; in fiemap_next_leaf_item()
2648 * Returns: 0 on success, < 0 on error, 1 if not found.
2669 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); in fiemap_search_slot()
2681 return 1; in fiemap_search_slot()
2760 prealloc_start = last_delalloc_end + 1; in fiemap_process_hole()
2787 delalloc_end + 1 - delalloc_start, in fiemap_process_hole()
2794 cur_offset = delalloc_end + 1; in fiemap_process_hole()
2809 prealloc_len = end + 1 - start; in fiemap_process_hole()
2811 prealloc_start = last_delalloc_end + 1; in fiemap_process_hole()
2812 prealloc_len = end + 1 - prealloc_start; in fiemap_process_hole()
2851 ret = btrfs_lookup_file_extent(NULL, root, path, ino, (u64)-1, 0); in fiemap_find_last_extent_offset()
2852 /* There can't be a file extent item at offset (u64)-1 */ in fiemap_find_last_extent_offset()
2989 const u64 range_end = min(key.offset, lockend) - 1; in extent_fiemap()
3037 extent_end - 1); in extent_fiemap()
3043 key.offset, extent_end - 1); in extent_fiemap()
3101 0, 0, 0, prev_extent_end, lockend - 1); in extent_fiemap()
3117 i_size - 1, in extent_fiemap()
3140 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
3142 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
3145 static int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
3147 return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
3148 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
3162 * Even there is no eb refs here, we may still have in page_range_has_eb()
3171 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page) in detach_extent_buffer_page() argument
3173 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_page()
3174 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_page()
3177 * For mapped eb, we're going to change the page private, which should in detach_extent_buffer_page()
3192 * removed the eb from the radix tree, so we could race in detach_extent_buffer_page()
3193 * and have this page now attached to the new eb. So in detach_extent_buffer_page()
3195 * this eb. in detach_extent_buffer_page()
3198 page->private == (unsigned long)eb) { in detach_extent_buffer_page()
3199 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_page()
3204 * to a new eb. in detach_extent_buffer_page()
3214 * For subpage, we can have dummy eb with page private. In this case, in detach_extent_buffer_page()
3216 * one dummy eb, no sharing. in detach_extent_buffer_page()
3236 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
3241 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
3243 num_pages = num_extent_pages(eb); in btrfs_release_extent_buffer_pages()
3245 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
3250 detach_extent_buffer_page(eb, page); in btrfs_release_extent_buffer_pages()
3260 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
3262 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
3263 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
3264 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
3271 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
3273 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
3274 eb->start = start; in __alloc_extent_buffer()
3275 eb->len = len; in __alloc_extent_buffer()
3276 eb->fs_info = fs_info; in __alloc_extent_buffer()
3277 init_rwsem(&eb->lock); in __alloc_extent_buffer()
3279 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
3281 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
3282 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
3286 return eb; in __alloc_extent_buffer()
3333 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
3338 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
3339 if (!eb) in __alloc_dummy_extent_buffer()
3342 num_pages = num_extent_pages(eb); in __alloc_dummy_extent_buffer()
3343 ret = btrfs_alloc_page_array(num_pages, eb->pages); in __alloc_dummy_extent_buffer()
3348 struct page *p = eb->pages[i]; in __alloc_dummy_extent_buffer()
3350 ret = attach_extent_buffer_page(eb, p, NULL); in __alloc_dummy_extent_buffer()
3355 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
3356 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
3357 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
3359 return eb; in __alloc_dummy_extent_buffer()
3362 if (eb->pages[i]) { in __alloc_dummy_extent_buffer()
3363 detach_extent_buffer_page(eb, eb->pages[i]); in __alloc_dummy_extent_buffer()
3364 __free_page(eb->pages[i]); in __alloc_dummy_extent_buffer()
3367 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
3377 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
3403 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
3404 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3407 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
3408 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
3409 atomic_inc(&eb->refs); in check_buffer_tree_ref()
3410 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
3413 static void mark_extent_buffer_accessed(struct extent_buffer *eb, in mark_extent_buffer_accessed() argument
3418 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
3420 num_pages = num_extent_pages(eb); in mark_extent_buffer_accessed()
3422 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
3432 struct extent_buffer *eb; in find_extent_buffer() local
3434 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
3435 if (!eb) in find_extent_buffer()
3438 * Lock our eb's refs_lock to avoid races with free_extent_buffer(). in find_extent_buffer()
3439 * When we get our eb it might be flagged with EXTENT_BUFFER_STALE and in find_extent_buffer()
3441 * set, eb->refs == 2, that the buffer isn't under IO (dirty and in find_extent_buffer()
3445 * could race and increment the eb's reference count, clear its stale in find_extent_buffer()
3450 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
3451 spin_lock(&eb->refs_lock); in find_extent_buffer()
3452 spin_unlock(&eb->refs_lock); in find_extent_buffer()
3454 mark_extent_buffer_accessed(eb, NULL); in find_extent_buffer()
3455 return eb; in find_extent_buffer()
3462 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
3465 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3466 if (eb) in alloc_test_extent_buffer()
3467 return eb; in alloc_test_extent_buffer()
3468 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
3469 if (!eb) in alloc_test_extent_buffer()
3471 eb->fs_info = fs_info; in alloc_test_extent_buffer()
3480 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
3490 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
3491 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
3493 return eb; in alloc_test_extent_buffer()
3495 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
3518 * We could have already allocated an eb for this page and attached one in grab_extent_buffer()
3519 * so lets see if we can get a ref on the existing eb, and if we can we in grab_extent_buffer()
3563 struct extent_buffer *eb; in alloc_extent_buffer() local
3569 int uptodate = 1; in alloc_extent_buffer()
3586 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
3587 if (eb) in alloc_extent_buffer()
3588 return eb; in alloc_extent_buffer()
3590 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
3591 if (!eb) in alloc_extent_buffer()
3601 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
3603 num_pages = num_extent_pages(eb); in alloc_extent_buffer()
3639 ret = attach_extent_buffer_page(eb, p, prealloc); in alloc_extent_buffer()
3642 * To inform we have extra eb under allocation, so that in alloc_extent_buffer()
3644 * when the eb hasn't yet been inserted into radix tree. in alloc_extent_buffer()
3646 * The ref will be decreased when the eb released the page, in in alloc_extent_buffer()
3653 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len)); in alloc_extent_buffer()
3654 eb->pages[i] = p; in alloc_extent_buffer()
3655 if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len)) in alloc_extent_buffer()
3667 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
3677 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
3688 check_buffer_tree_ref(eb); in alloc_extent_buffer()
3689 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
3697 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3698 return eb; in alloc_extent_buffer()
3701 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
3703 if (eb->pages[i]) in alloc_extent_buffer()
3704 unlock_page(eb->pages[i]); in alloc_extent_buffer()
3707 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
3713 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
3716 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
3719 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
3720 __releases(&eb->refs_lock) in release_extent_buffer()
3722 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
3724 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
3725 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
3726 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
3727 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
3729 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3733 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
3736 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3739 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
3741 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
3743 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
3744 __free_extent_buffer(eb); in release_extent_buffer()
3745 return 1; in release_extent_buffer()
3748 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
3749 return 1; in release_extent_buffer()
3751 spin_unlock(&eb->refs_lock); in release_extent_buffer()
3756 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
3759 if (!eb) in free_extent_buffer()
3762 refs = atomic_read(&eb->refs); in free_extent_buffer()
3763 while (1) { in free_extent_buffer()
3764 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
3765 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
3766 refs == 1)) in free_extent_buffer()
3768 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
3772 spin_lock(&eb->refs_lock); in free_extent_buffer()
3773 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
3774 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
3775 !extent_buffer_under_io(eb) && in free_extent_buffer()
3776 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
3777 atomic_dec(&eb->refs); in free_extent_buffer()
3783 release_extent_buffer(eb); in free_extent_buffer()
3786 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
3788 if (!eb) in free_extent_buffer_stale()
3791 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
3792 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
3794 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
3795 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
3796 atomic_dec(&eb->refs); in free_extent_buffer_stale()
3797 release_extent_buffer(eb); in free_extent_buffer_stale()
3812 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb) in clear_subpage_extent_buffer_dirty() argument
3814 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
3815 struct page *page = eb->pages[0]; in clear_subpage_extent_buffer_dirty()
3820 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start, in clear_subpage_extent_buffer_dirty()
3821 eb->len); in clear_subpage_extent_buffer_dirty()
3825 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
3829 struct extent_buffer *eb) in btrfs_clear_buffer_dirty() argument
3831 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_clear_buffer_dirty()
3836 btrfs_assert_tree_write_locked(eb); in btrfs_clear_buffer_dirty()
3838 if (trans && btrfs_header_generation(eb) != trans->transid) in btrfs_clear_buffer_dirty()
3841 if (!test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) in btrfs_clear_buffer_dirty()
3844 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, in btrfs_clear_buffer_dirty()
3847 if (eb->fs_info->nodesize < PAGE_SIZE) in btrfs_clear_buffer_dirty()
3848 return clear_subpage_extent_buffer_dirty(eb); in btrfs_clear_buffer_dirty()
3850 num_pages = num_extent_pages(eb); in btrfs_clear_buffer_dirty()
3853 page = eb->pages[i]; in btrfs_clear_buffer_dirty()
3860 WARN_ON(atomic_read(&eb->refs) == 0); in btrfs_clear_buffer_dirty()
3863 void set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
3869 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
3871 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
3873 num_pages = num_extent_pages(eb); in set_extent_buffer_dirty()
3874 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
3875 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
3878 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
3892 lock_page(eb->pages[0]); in set_extent_buffer_dirty()
3894 btrfs_page_set_dirty(eb->fs_info, eb->pages[i], in set_extent_buffer_dirty()
3895 eb->start, eb->len); in set_extent_buffer_dirty()
3897 unlock_page(eb->pages[0]); in set_extent_buffer_dirty()
3898 percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, in set_extent_buffer_dirty()
3899 eb->len, in set_extent_buffer_dirty()
3900 eb->fs_info->dirty_metadata_batch); in set_extent_buffer_dirty()
3904 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
3908 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
3910 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
3915 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
3916 num_pages = num_extent_pages(eb); in clear_extent_buffer_uptodate()
3918 page = eb->pages[i]; in clear_extent_buffer_uptodate()
3929 btrfs_subpage_clear_uptodate(fs_info, page, eb->start, in clear_extent_buffer_uptodate()
3930 eb->len); in clear_extent_buffer_uptodate()
3934 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
3936 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
3941 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
3942 num_pages = num_extent_pages(eb); in set_extent_buffer_uptodate()
3944 page = eb->pages[i]; in set_extent_buffer_uptodate()
3953 btrfs_subpage_set_uptodate(fs_info, page, eb->start, in set_extent_buffer_uptodate()
3954 eb->len); in set_extent_buffer_uptodate()
3960 struct extent_buffer *eb = bbio->private; in extent_buffer_read_end_io() local
3961 struct btrfs_fs_info *fs_info = eb->fs_info; in extent_buffer_read_end_io()
3967 eb->read_mirror = bbio->mirror_num; in extent_buffer_read_end_io()
3970 btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) in extent_buffer_read_end_io()
3974 set_extent_buffer_uptodate(eb); in extent_buffer_read_end_io()
3976 clear_extent_buffer_uptodate(eb); in extent_buffer_read_end_io()
3977 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in extent_buffer_read_end_io()
3981 u64 start = eb->start + bio_offset; in extent_buffer_read_end_io()
3993 clear_bit(EXTENT_BUFFER_READING, &eb->bflags); in extent_buffer_read_end_io()
3995 wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); in extent_buffer_read_end_io()
3996 free_extent_buffer(eb); in extent_buffer_read_end_io()
4001 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, in read_extent_buffer_pages() argument
4004 int num_pages = num_extent_pages(eb), i; in read_extent_buffer_pages()
4007 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4015 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages()
4019 if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) in read_extent_buffer_pages()
4022 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
4023 eb->read_mirror = 0; in read_extent_buffer_pages()
4024 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
4025 atomic_inc(&eb->refs); in read_extent_buffer_pages()
4028 REQ_OP_READ | REQ_META, eb->fs_info, in read_extent_buffer_pages()
4029 extent_buffer_read_end_io, eb); in read_extent_buffer_pages()
4030 bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; in read_extent_buffer_pages()
4031 bbio->inode = BTRFS_I(eb->fs_info->btree_inode); in read_extent_buffer_pages()
4032 bbio->file_offset = eb->start; in read_extent_buffer_pages()
4034 if (eb->fs_info->nodesize < PAGE_SIZE) { in read_extent_buffer_pages()
4035 __bio_add_page(&bbio->bio, eb->pages[0], eb->len, in read_extent_buffer_pages()
4036 eb->start - page_offset(eb->pages[0])); in read_extent_buffer_pages()
4039 __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); in read_extent_buffer_pages()
4045 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); in read_extent_buffer_pages()
4046 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
4053 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
4056 btrfs_warn(eb->fs_info, in report_eb_range()
4057 "access to eb bytenr %llu len %lu out of range start %lu len %lu", in report_eb_range()
4058 eb->start, eb->len, start, len); in report_eb_range()
4066 * the eb.
4067 * NOTE: @start and @len are offset inside the eb, not logical address.
4071 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
4076 /* start, start + len should not go beyond eb->len nor overflow */ in check_eb_range()
4077 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
4078 return report_eb_range(eb, start, len); in check_eb_range()
4083 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
4093 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
4102 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer()
4105 page = eb->pages[i]; in read_extent_buffer()
4118 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
4130 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
4131 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
4133 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer_to_user_nofault()
4136 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
4154 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
4165 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
4168 offset = get_eb_offset_in_page(eb, start); in memcmp_extent_buffer()
4171 page = eb->pages[i]; in memcmp_extent_buffer()
4192 * For subpage case, check if the range covered by the eb has EXTENT_UPTODATE.
4194 static void assert_eb_page_uptodate(const struct extent_buffer *eb, in assert_eb_page_uptodate() argument
4197 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_page_uptodate()
4207 if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in assert_eb_page_uptodate()
4212 eb->start, eb->len))) in assert_eb_page_uptodate()
4213 btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len); in assert_eb_page_uptodate()
4219 static void __write_extent_buffer(const struct extent_buffer *eb, in __write_extent_buffer() argument
4230 const bool check_uptodate = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __write_extent_buffer()
4232 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)); in __write_extent_buffer()
4234 if (check_eb_range(eb, start, len)) in __write_extent_buffer()
4237 offset = get_eb_offset_in_page(eb, start); in __write_extent_buffer()
4240 page = eb->pages[i]; in __write_extent_buffer()
4242 assert_eb_page_uptodate(eb, page); in __write_extent_buffer()
4258 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
4261 return __write_extent_buffer(eb, srcv, start, len, false); in write_extent_buffer()
4264 static void memset_extent_buffer(const struct extent_buffer *eb, int c, in memset_extent_buffer() argument
4271 unsigned int offset = get_eb_offset_in_page(eb, cur); in memset_extent_buffer()
4273 struct page *page = eb->pages[index]; in memset_extent_buffer()
4275 assert_eb_page_uptodate(eb, page); in memset_extent_buffer()
4282 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
4285 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
4287 return memset_extent_buffer(eb, 0, start, len); in memzero_extent_buffer()
4348 * @eb: the extent buffer
4358 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
4371 offset = start + offset_in_page(eb->start) + byte_offset; in eb_bitmap_offset()
4380 * @eb: the extent buffer
4384 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
4392 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
4393 page = eb->pages[i]; in extent_buffer_test_bit()
4394 assert_eb_page_uptodate(eb, page); in extent_buffer_test_bit()
4396 return 1U & (kaddr[offset] >> (nr & (BITS_PER_BYTE - 1))); in extent_buffer_test_bit()
4399 static u8 *extent_buffer_get_byte(const struct extent_buffer *eb, unsigned long bytenr) in extent_buffer_get_byte() argument
4403 if (check_eb_range(eb, bytenr, 1)) in extent_buffer_get_byte()
4405 return page_address(eb->pages[index]) + get_eb_offset_in_page(eb, bytenr); in extent_buffer_get_byte()
4409 * Set an area of a bitmap to 1.
4411 * @eb: the extent buffer
4416 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
4420 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_set()
4429 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_set()
4435 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_set()
4436 memset_extent_buffer(eb, 0xff, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_set()
4439 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_set()
4447 * @eb: the extent buffer
4452 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
4457 unsigned int last_byte = start + BIT_BYTE(pos + len - 1); in extent_buffer_bitmap_clear()
4466 kaddr = extent_buffer_get_byte(eb, first_byte); in extent_buffer_bitmap_clear()
4472 ASSERT(first_byte + 1 <= last_byte); in extent_buffer_bitmap_clear()
4473 memset_extent_buffer(eb, 0, first_byte + 1, last_byte - first_byte - 1); in extent_buffer_bitmap_clear()
4476 kaddr = extent_buffer_get_byte(eb, last_byte); in extent_buffer_bitmap_clear()
4516 unsigned long dst_end = dst_offset + len - 1; in memmove_extent_buffer()
4517 unsigned long src_end = src_offset + len - 1; in memmove_extent_buffer()
4541 cur = min_t(unsigned long, len, src_off_in_page + 1); in memmove_extent_buffer()
4542 cur = min(cur, dst_off_in_page + 1); in memmove_extent_buffer()
4545 cur + 1; in memmove_extent_buffer()
4546 use_memmove = areas_overlap(src_end - cur + 1, dst_end - cur + 1, in memmove_extent_buffer()
4549 __write_extent_buffer(dst, src_addr, dst_end - cur + 1, cur, in memmove_extent_buffer()
4590 cur = gang[ret - 1]->start + gang[ret - 1]->len; in get_next_extent_buffer()
4604 struct extent_buffer *eb = NULL; in try_release_subpage_extent_buffer() local
4615 eb = get_next_extent_buffer(fs_info, page, cur); in try_release_subpage_extent_buffer()
4616 if (!eb) { in try_release_subpage_extent_buffer()
4617 /* No more eb in the page range after or at cur */ in try_release_subpage_extent_buffer()
4621 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
4624 * The same as try_release_extent_buffer(), to ensure the eb in try_release_subpage_extent_buffer()
4627 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4628 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
4629 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4636 * If tree ref isn't set then we know the ref on this eb is a in try_release_subpage_extent_buffer()
4637 * real ref, so just return, this eb will likely be freed soon in try_release_subpage_extent_buffer()
4640 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
4641 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
4650 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
4658 ret = 1; in try_release_subpage_extent_buffer()
4668 struct extent_buffer *eb; in try_release_extent_buffer() local
4680 return 1; in try_release_extent_buffer()
4683 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
4684 BUG_ON(!eb); in try_release_extent_buffer()
4688 * the eb doesn't disappear out from under us while we're looking at in try_release_extent_buffer()
4691 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
4692 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
4693 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4700 * If tree ref isn't set then we know the ref on this eb is a real ref, in try_release_extent_buffer()
4703 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
4704 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
4708 return release_extent_buffer(eb); in try_release_extent_buffer()
4715 * @owner_root: objectid of the root that owns this eb
4717 * @level: level for the eb
4720 * normal uptodate check of the eb, without checking the generation. If we have
4731 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
4734 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
4735 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
4738 if (btrfs_buffer_uptodate(eb, gen, 1)) { in btrfs_readahead_tree_block()
4739 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4743 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0, &check); in btrfs_readahead_tree_block()
4745 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
4747 free_extent_buffer(eb); in btrfs_readahead_tree_block()
4764 btrfs_header_level(node) - 1); in btrfs_readahead_node_child()