Lines Matching refs:eb
38 static inline void btrfs_leak_debug_add_eb(struct extent_buffer *eb) in btrfs_leak_debug_add_eb() argument
40 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_add_eb()
44 list_add(&eb->leak_list, &fs_info->allocated_ebs); in btrfs_leak_debug_add_eb()
48 static inline void btrfs_leak_debug_del_eb(struct extent_buffer *eb) in btrfs_leak_debug_del_eb() argument
50 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_leak_debug_del_eb()
54 list_del(&eb->leak_list); in btrfs_leak_debug_del_eb()
60 struct extent_buffer *eb; in btrfs_extent_buffer_leak_debug_check() local
73 eb = list_first_entry(&fs_info->allocated_ebs, in btrfs_extent_buffer_leak_debug_check()
77 eb->start, eb->len, atomic_read(&eb->refs), eb->bflags, in btrfs_extent_buffer_leak_debug_check()
78 btrfs_header_owner(eb)); in btrfs_extent_buffer_leak_debug_check()
79 list_del(&eb->leak_list); in btrfs_extent_buffer_leak_debug_check()
80 kmem_cache_free(extent_buffer_cache, eb); in btrfs_extent_buffer_leak_debug_check()
85 #define btrfs_leak_debug_add_eb(eb) do {} while (0) argument
86 #define btrfs_leak_debug_del_eb(eb) do {} while (0) argument
631 int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) in btrfs_repair_eb_io_failure() argument
633 struct btrfs_fs_info *fs_info = eb->fs_info; in btrfs_repair_eb_io_failure()
634 u64 start = eb->start; in btrfs_repair_eb_io_failure()
635 int i, num_pages = num_extent_pages(eb); in btrfs_repair_eb_io_failure()
642 struct page *p = eb->pages[i]; in btrfs_repair_eb_io_failure()
1145 struct extent_buffer *eb; in find_extent_buffer_readpage() local
1158 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_readpage()
1161 ASSERT(eb); in find_extent_buffer_readpage()
1162 return eb; in find_extent_buffer_readpage()
1277 struct extent_buffer *eb; in end_bio_extent_readpage() local
1279 eb = find_extent_buffer_readpage(fs_info, page, start); in end_bio_extent_readpage()
1280 set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in end_bio_extent_readpage()
1281 eb->read_mirror = mirror; in end_bio_extent_readpage()
1282 atomic_dec(&eb->io_pages); in end_bio_extent_readpage()
1634 static int attach_extent_buffer_page(struct extent_buffer *eb, in attach_extent_buffer_page() argument
1638 struct btrfs_fs_info *fs_info = eb->fs_info; in attach_extent_buffer_page()
1652 attach_page_private(page, eb); in attach_extent_buffer_page()
1654 WARN_ON(page->private != (unsigned long)eb); in attach_extent_buffer_page()
2359 void wait_on_extent_buffer_writeback(struct extent_buffer *eb) in wait_on_extent_buffer_writeback() argument
2361 wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK, in wait_on_extent_buffer_writeback()
2365 static void end_extent_buffer_writeback(struct extent_buffer *eb) in end_extent_buffer_writeback() argument
2367 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in end_extent_buffer_writeback()
2369 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); in end_extent_buffer_writeback()
2382 static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, in lock_extent_buffer_for_io() argument
2385 struct btrfs_fs_info *fs_info = eb->fs_info; in lock_extent_buffer_for_io()
2390 if (!btrfs_try_tree_write_lock(eb)) { in lock_extent_buffer_for_io()
2393 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
2396 if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { in lock_extent_buffer_for_io()
2397 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
2405 wait_on_extent_buffer_writeback(eb); in lock_extent_buffer_for_io()
2406 btrfs_tree_lock(eb); in lock_extent_buffer_for_io()
2407 if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) in lock_extent_buffer_for_io()
2409 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
2418 spin_lock(&eb->refs_lock); in lock_extent_buffer_for_io()
2419 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { in lock_extent_buffer_for_io()
2420 set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); in lock_extent_buffer_for_io()
2421 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
2422 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN); in lock_extent_buffer_for_io()
2424 -eb->len, in lock_extent_buffer_for_io()
2428 spin_unlock(&eb->refs_lock); in lock_extent_buffer_for_io()
2431 btrfs_tree_unlock(eb); in lock_extent_buffer_for_io()
2442 num_pages = num_extent_pages(eb); in lock_extent_buffer_for_io()
2444 struct page *p = eb->pages[i]; in lock_extent_buffer_for_io()
2458 static void set_btree_ioerr(struct page *page, struct extent_buffer *eb) in set_btree_ioerr() argument
2460 struct btrfs_fs_info *fs_info = eb->fs_info; in set_btree_ioerr()
2462 btrfs_page_set_error(fs_info, page, eb->start, eb->len); in set_btree_ioerr()
2463 if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) in set_btree_ioerr()
2470 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_btree_ioerr()
2485 eb->len, fs_info->dirty_metadata_batch); in set_btree_ioerr()
2525 switch (eb->log_index) { in set_btree_ioerr()
2547 struct extent_buffer *eb; in find_extent_buffer_nolock() local
2550 eb = radix_tree_lookup(&fs_info->buffer_radix, in find_extent_buffer_nolock()
2552 if (eb && atomic_inc_not_zero(&eb->refs)) { in find_extent_buffer_nolock()
2554 return eb; in find_extent_buffer_nolock()
2587 struct extent_buffer *eb; in end_bio_subpage_eb_writepage() local
2595 eb = find_extent_buffer_nolock(fs_info, cur_bytenr); in end_bio_subpage_eb_writepage()
2596 ASSERT(eb); in end_bio_subpage_eb_writepage()
2598 cur_bytenr = eb->start + eb->len; in end_bio_subpage_eb_writepage()
2600 ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)); in end_bio_subpage_eb_writepage()
2601 done = atomic_dec_and_test(&eb->io_pages); in end_bio_subpage_eb_writepage()
2605 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { in end_bio_subpage_eb_writepage()
2607 set_btree_ioerr(page, eb); in end_bio_subpage_eb_writepage()
2610 btrfs_subpage_clear_writeback(fs_info, page, eb->start, in end_bio_subpage_eb_writepage()
2611 eb->len); in end_bio_subpage_eb_writepage()
2612 end_extent_buffer_writeback(eb); in end_bio_subpage_eb_writepage()
2618 atomic_dec(&eb->refs); in end_bio_subpage_eb_writepage()
2628 struct extent_buffer *eb; in end_bio_extent_buffer_writepage() local
2636 eb = (struct extent_buffer *)page->private; in end_bio_extent_buffer_writepage()
2637 BUG_ON(!eb); in end_bio_extent_buffer_writepage()
2638 done = atomic_dec_and_test(&eb->io_pages); in end_bio_extent_buffer_writepage()
2641 test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { in end_bio_extent_buffer_writepage()
2643 set_btree_ioerr(page, eb); in end_bio_extent_buffer_writepage()
2651 end_extent_buffer_writeback(eb); in end_bio_extent_buffer_writepage()
2657 static void prepare_eb_write(struct extent_buffer *eb) in prepare_eb_write() argument
2663 clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); in prepare_eb_write()
2664 atomic_set(&eb->io_pages, num_extent_pages(eb)); in prepare_eb_write()
2667 nritems = btrfs_header_nritems(eb); in prepare_eb_write()
2668 if (btrfs_header_level(eb) > 0) { in prepare_eb_write()
2670 memzero_extent_buffer(eb, end, eb->len - end); in prepare_eb_write()
2677 end = BTRFS_LEAF_DATA_OFFSET + leaf_data_end(eb); in prepare_eb_write()
2678 memzero_extent_buffer(eb, start, end - start); in prepare_eb_write()
2686 static int write_one_subpage_eb(struct extent_buffer *eb, in write_one_subpage_eb() argument
2690 struct btrfs_fs_info *fs_info = eb->fs_info; in write_one_subpage_eb()
2691 struct page *page = eb->pages[0]; in write_one_subpage_eb()
2696 prepare_eb_write(eb); in write_one_subpage_eb()
2700 btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len); in write_one_subpage_eb()
2704 eb->start, eb->len); in write_one_subpage_eb()
2711 &epd->bio_ctrl, eb->start, page, eb->len, in write_one_subpage_eb()
2712 eb->start - page_offset(page), 0, false); in write_one_subpage_eb()
2714 btrfs_subpage_clear_writeback(fs_info, page, eb->start, eb->len); in write_one_subpage_eb()
2715 set_btree_ioerr(page, eb); in write_one_subpage_eb()
2718 if (atomic_dec_and_test(&eb->io_pages)) in write_one_subpage_eb()
2719 end_extent_buffer_writeback(eb); in write_one_subpage_eb()
2732 static noinline_for_stack int write_one_eb(struct extent_buffer *eb, in write_one_eb() argument
2736 u64 disk_bytenr = eb->start; in write_one_eb()
2741 prepare_eb_write(eb); in write_one_eb()
2745 num_pages = num_extent_pages(eb); in write_one_eb()
2747 struct page *p = eb->pages[i]; in write_one_eb()
2755 set_btree_ioerr(p, eb); in write_one_eb()
2758 if (atomic_sub_and_test(num_pages - i, &eb->io_pages)) in write_one_eb()
2759 end_extent_buffer_writeback(eb); in write_one_eb()
2770 struct page *p = eb->pages[i]; in write_one_eb()
2807 struct extent_buffer *eb; in submit_eb_subpage() local
2836 eb = find_extent_buffer_nolock(fs_info, start); in submit_eb_subpage()
2845 if (!eb) in submit_eb_subpage()
2848 ret = lock_extent_buffer_for_io(eb, epd); in submit_eb_subpage()
2850 free_extent_buffer(eb); in submit_eb_subpage()
2854 free_extent_buffer(eb); in submit_eb_subpage()
2857 ret = write_one_subpage_eb(eb, wbc, epd); in submit_eb_subpage()
2858 free_extent_buffer(eb); in submit_eb_subpage()
2897 struct extent_buffer *eb; in submit_eb_page() local
2912 eb = (struct extent_buffer *)page->private; in submit_eb_page()
2918 if (WARN_ON(!eb)) { in submit_eb_page()
2923 if (eb == *eb_context) { in submit_eb_page()
2927 ret = atomic_inc_not_zero(&eb->refs); in submit_eb_page()
2932 if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) { in submit_eb_page()
2941 free_extent_buffer(eb); in submit_eb_page()
2945 *eb_context = eb; in submit_eb_page()
2947 ret = lock_extent_buffer_for_io(eb, epd); in submit_eb_page()
2949 btrfs_revert_meta_write_pointer(cache, eb); in submit_eb_page()
2952 free_extent_buffer(eb); in submit_eb_page()
2959 btrfs_schedule_zone_finish_bg(cache, eb); in submit_eb_page()
2962 ret = write_one_eb(eb, wbc, epd); in submit_eb_page()
2963 free_extent_buffer(eb); in submit_eb_page()
4154 static void __free_extent_buffer(struct extent_buffer *eb) in __free_extent_buffer() argument
4156 kmem_cache_free(extent_buffer_cache, eb); in __free_extent_buffer()
4159 int extent_buffer_under_io(const struct extent_buffer *eb) in extent_buffer_under_io() argument
4161 return (atomic_read(&eb->io_pages) || in extent_buffer_under_io()
4162 test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || in extent_buffer_under_io()
4163 test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in extent_buffer_under_io()
4186 static void detach_extent_buffer_page(struct extent_buffer *eb, struct page *page) in detach_extent_buffer_page() argument
4188 struct btrfs_fs_info *fs_info = eb->fs_info; in detach_extent_buffer_page()
4189 const bool mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in detach_extent_buffer_page()
4213 page->private == (unsigned long)eb) { in detach_extent_buffer_page()
4214 BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); in detach_extent_buffer_page()
4251 static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) in btrfs_release_extent_buffer_pages() argument
4256 ASSERT(!extent_buffer_under_io(eb)); in btrfs_release_extent_buffer_pages()
4258 num_pages = num_extent_pages(eb); in btrfs_release_extent_buffer_pages()
4260 struct page *page = eb->pages[i]; in btrfs_release_extent_buffer_pages()
4265 detach_extent_buffer_page(eb, page); in btrfs_release_extent_buffer_pages()
4275 static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) in btrfs_release_extent_buffer() argument
4277 btrfs_release_extent_buffer_pages(eb); in btrfs_release_extent_buffer()
4278 btrfs_leak_debug_del_eb(eb); in btrfs_release_extent_buffer()
4279 __free_extent_buffer(eb); in btrfs_release_extent_buffer()
4286 struct extent_buffer *eb = NULL; in __alloc_extent_buffer() local
4288 eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS|__GFP_NOFAIL); in __alloc_extent_buffer()
4289 eb->start = start; in __alloc_extent_buffer()
4290 eb->len = len; in __alloc_extent_buffer()
4291 eb->fs_info = fs_info; in __alloc_extent_buffer()
4292 eb->bflags = 0; in __alloc_extent_buffer()
4293 init_rwsem(&eb->lock); in __alloc_extent_buffer()
4295 btrfs_leak_debug_add_eb(eb); in __alloc_extent_buffer()
4296 INIT_LIST_HEAD(&eb->release_list); in __alloc_extent_buffer()
4298 spin_lock_init(&eb->refs_lock); in __alloc_extent_buffer()
4299 atomic_set(&eb->refs, 1); in __alloc_extent_buffer()
4300 atomic_set(&eb->io_pages, 0); in __alloc_extent_buffer()
4304 return eb; in __alloc_extent_buffer()
4352 struct extent_buffer *eb; in __alloc_dummy_extent_buffer() local
4357 eb = __alloc_extent_buffer(fs_info, start, len); in __alloc_dummy_extent_buffer()
4358 if (!eb) in __alloc_dummy_extent_buffer()
4361 num_pages = num_extent_pages(eb); in __alloc_dummy_extent_buffer()
4362 ret = btrfs_alloc_page_array(num_pages, eb->pages); in __alloc_dummy_extent_buffer()
4367 struct page *p = eb->pages[i]; in __alloc_dummy_extent_buffer()
4369 ret = attach_extent_buffer_page(eb, p, NULL); in __alloc_dummy_extent_buffer()
4374 set_extent_buffer_uptodate(eb); in __alloc_dummy_extent_buffer()
4375 btrfs_set_header_nritems(eb, 0); in __alloc_dummy_extent_buffer()
4376 set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); in __alloc_dummy_extent_buffer()
4378 return eb; in __alloc_dummy_extent_buffer()
4381 if (eb->pages[i]) { in __alloc_dummy_extent_buffer()
4382 detach_extent_buffer_page(eb, eb->pages[i]); in __alloc_dummy_extent_buffer()
4383 __free_page(eb->pages[i]); in __alloc_dummy_extent_buffer()
4386 __free_extent_buffer(eb); in __alloc_dummy_extent_buffer()
4396 static void check_buffer_tree_ref(struct extent_buffer *eb) in check_buffer_tree_ref() argument
4422 refs = atomic_read(&eb->refs); in check_buffer_tree_ref()
4423 if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
4426 spin_lock(&eb->refs_lock); in check_buffer_tree_ref()
4427 if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in check_buffer_tree_ref()
4428 atomic_inc(&eb->refs); in check_buffer_tree_ref()
4429 spin_unlock(&eb->refs_lock); in check_buffer_tree_ref()
4432 static void mark_extent_buffer_accessed(struct extent_buffer *eb, in mark_extent_buffer_accessed() argument
4437 check_buffer_tree_ref(eb); in mark_extent_buffer_accessed()
4439 num_pages = num_extent_pages(eb); in mark_extent_buffer_accessed()
4441 struct page *p = eb->pages[i]; in mark_extent_buffer_accessed()
4451 struct extent_buffer *eb; in find_extent_buffer() local
4453 eb = find_extent_buffer_nolock(fs_info, start); in find_extent_buffer()
4454 if (!eb) in find_extent_buffer()
4469 if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) { in find_extent_buffer()
4470 spin_lock(&eb->refs_lock); in find_extent_buffer()
4471 spin_unlock(&eb->refs_lock); in find_extent_buffer()
4473 mark_extent_buffer_accessed(eb, NULL); in find_extent_buffer()
4474 return eb; in find_extent_buffer()
4481 struct extent_buffer *eb, *exists = NULL; in alloc_test_extent_buffer() local
4484 eb = find_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4485 if (eb) in alloc_test_extent_buffer()
4486 return eb; in alloc_test_extent_buffer()
4487 eb = alloc_dummy_extent_buffer(fs_info, start); in alloc_test_extent_buffer()
4488 if (!eb) in alloc_test_extent_buffer()
4490 eb->fs_info = fs_info; in alloc_test_extent_buffer()
4499 start >> fs_info->sectorsize_bits, eb); in alloc_test_extent_buffer()
4509 check_buffer_tree_ref(eb); in alloc_test_extent_buffer()
4510 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_test_extent_buffer()
4512 return eb; in alloc_test_extent_buffer()
4514 btrfs_release_extent_buffer(eb); in alloc_test_extent_buffer()
4582 struct extent_buffer *eb; in alloc_extent_buffer() local
4604 eb = find_extent_buffer(fs_info, start); in alloc_extent_buffer()
4605 if (eb) in alloc_extent_buffer()
4606 return eb; in alloc_extent_buffer()
4608 eb = __alloc_extent_buffer(fs_info, start, len); in alloc_extent_buffer()
4609 if (!eb) in alloc_extent_buffer()
4619 btrfs_set_buffer_lockdep_class(lockdep_owner, eb, level); in alloc_extent_buffer()
4621 num_pages = num_extent_pages(eb); in alloc_extent_buffer()
4663 ret = attach_extent_buffer_page(eb, p, prealloc); in alloc_extent_buffer()
4677 WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len)); in alloc_extent_buffer()
4678 eb->pages[i] = p; in alloc_extent_buffer()
4691 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in alloc_extent_buffer()
4701 start >> fs_info->sectorsize_bits, eb); in alloc_extent_buffer()
4712 check_buffer_tree_ref(eb); in alloc_extent_buffer()
4713 set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); in alloc_extent_buffer()
4721 unlock_page(eb->pages[i]); in alloc_extent_buffer()
4722 return eb; in alloc_extent_buffer()
4725 WARN_ON(!atomic_dec_and_test(&eb->refs)); in alloc_extent_buffer()
4727 if (eb->pages[i]) in alloc_extent_buffer()
4728 unlock_page(eb->pages[i]); in alloc_extent_buffer()
4731 btrfs_release_extent_buffer(eb); in alloc_extent_buffer()
4737 struct extent_buffer *eb = in btrfs_release_extent_buffer_rcu() local
4740 __free_extent_buffer(eb); in btrfs_release_extent_buffer_rcu()
4743 static int release_extent_buffer(struct extent_buffer *eb) in release_extent_buffer() argument
4744 __releases(&eb->refs_lock) in release_extent_buffer()
4746 lockdep_assert_held(&eb->refs_lock); in release_extent_buffer()
4748 WARN_ON(atomic_read(&eb->refs) == 0); in release_extent_buffer()
4749 if (atomic_dec_and_test(&eb->refs)) { in release_extent_buffer()
4750 if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { in release_extent_buffer()
4751 struct btrfs_fs_info *fs_info = eb->fs_info; in release_extent_buffer()
4753 spin_unlock(&eb->refs_lock); in release_extent_buffer()
4757 eb->start >> fs_info->sectorsize_bits); in release_extent_buffer()
4760 spin_unlock(&eb->refs_lock); in release_extent_buffer()
4763 btrfs_leak_debug_del_eb(eb); in release_extent_buffer()
4765 btrfs_release_extent_buffer_pages(eb); in release_extent_buffer()
4767 if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { in release_extent_buffer()
4768 __free_extent_buffer(eb); in release_extent_buffer()
4772 call_rcu(&eb->rcu_head, btrfs_release_extent_buffer_rcu); in release_extent_buffer()
4775 spin_unlock(&eb->refs_lock); in release_extent_buffer()
4780 void free_extent_buffer(struct extent_buffer *eb) in free_extent_buffer() argument
4783 if (!eb) in free_extent_buffer()
4786 refs = atomic_read(&eb->refs); in free_extent_buffer()
4788 if ((!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && refs <= 3) in free_extent_buffer()
4789 || (test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags) && in free_extent_buffer()
4792 if (atomic_try_cmpxchg(&eb->refs, &refs, refs - 1)) in free_extent_buffer()
4796 spin_lock(&eb->refs_lock); in free_extent_buffer()
4797 if (atomic_read(&eb->refs) == 2 && in free_extent_buffer()
4798 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && in free_extent_buffer()
4799 !extent_buffer_under_io(eb) && in free_extent_buffer()
4800 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer()
4801 atomic_dec(&eb->refs); in free_extent_buffer()
4807 release_extent_buffer(eb); in free_extent_buffer()
4810 void free_extent_buffer_stale(struct extent_buffer *eb) in free_extent_buffer_stale() argument
4812 if (!eb) in free_extent_buffer_stale()
4815 spin_lock(&eb->refs_lock); in free_extent_buffer_stale()
4816 set_bit(EXTENT_BUFFER_STALE, &eb->bflags); in free_extent_buffer_stale()
4818 if (atomic_read(&eb->refs) == 2 && !extent_buffer_under_io(eb) && in free_extent_buffer_stale()
4819 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) in free_extent_buffer_stale()
4820 atomic_dec(&eb->refs); in free_extent_buffer_stale()
4821 release_extent_buffer(eb); in free_extent_buffer_stale()
4836 static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb) in clear_subpage_extent_buffer_dirty() argument
4838 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_subpage_extent_buffer_dirty()
4839 struct page *page = eb->pages[0]; in clear_subpage_extent_buffer_dirty()
4844 last = btrfs_subpage_clear_and_test_dirty(fs_info, page, eb->start, in clear_subpage_extent_buffer_dirty()
4845 eb->len); in clear_subpage_extent_buffer_dirty()
4849 WARN_ON(atomic_read(&eb->refs) == 0); in clear_subpage_extent_buffer_dirty()
4852 void clear_extent_buffer_dirty(const struct extent_buffer *eb) in clear_extent_buffer_dirty() argument
4858 if (eb->fs_info->nodesize < PAGE_SIZE) in clear_extent_buffer_dirty()
4859 return clear_subpage_extent_buffer_dirty(eb); in clear_extent_buffer_dirty()
4861 num_pages = num_extent_pages(eb); in clear_extent_buffer_dirty()
4864 page = eb->pages[i]; in clear_extent_buffer_dirty()
4872 WARN_ON(atomic_read(&eb->refs) == 0); in clear_extent_buffer_dirty()
4875 bool set_extent_buffer_dirty(struct extent_buffer *eb) in set_extent_buffer_dirty() argument
4881 check_buffer_tree_ref(eb); in set_extent_buffer_dirty()
4883 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); in set_extent_buffer_dirty()
4885 num_pages = num_extent_pages(eb); in set_extent_buffer_dirty()
4886 WARN_ON(atomic_read(&eb->refs) == 0); in set_extent_buffer_dirty()
4887 WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); in set_extent_buffer_dirty()
4890 bool subpage = eb->fs_info->nodesize < PAGE_SIZE; in set_extent_buffer_dirty()
4904 lock_page(eb->pages[0]); in set_extent_buffer_dirty()
4906 btrfs_page_set_dirty(eb->fs_info, eb->pages[i], in set_extent_buffer_dirty()
4907 eb->start, eb->len); in set_extent_buffer_dirty()
4909 unlock_page(eb->pages[0]); in set_extent_buffer_dirty()
4913 ASSERT(PageDirty(eb->pages[i])); in set_extent_buffer_dirty()
4919 void clear_extent_buffer_uptodate(struct extent_buffer *eb) in clear_extent_buffer_uptodate() argument
4921 struct btrfs_fs_info *fs_info = eb->fs_info; in clear_extent_buffer_uptodate()
4926 clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in clear_extent_buffer_uptodate()
4927 num_pages = num_extent_pages(eb); in clear_extent_buffer_uptodate()
4929 page = eb->pages[i]; in clear_extent_buffer_uptodate()
4940 btrfs_subpage_clear_uptodate(fs_info, page, eb->start, in clear_extent_buffer_uptodate()
4941 eb->len); in clear_extent_buffer_uptodate()
4945 void set_extent_buffer_uptodate(struct extent_buffer *eb) in set_extent_buffer_uptodate() argument
4947 struct btrfs_fs_info *fs_info = eb->fs_info; in set_extent_buffer_uptodate()
4952 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in set_extent_buffer_uptodate()
4953 num_pages = num_extent_pages(eb); in set_extent_buffer_uptodate()
4955 page = eb->pages[i]; in set_extent_buffer_uptodate()
4964 btrfs_subpage_set_uptodate(fs_info, page, eb->start, in set_extent_buffer_uptodate()
4965 eb->len); in set_extent_buffer_uptodate()
4969 static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, in read_extent_buffer_subpage() argument
4972 struct btrfs_fs_info *fs_info = eb->fs_info; in read_extent_buffer_subpage()
4974 struct page *page = eb->pages[0]; in read_extent_buffer_subpage()
4980 ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)); in read_extent_buffer_subpage()
4985 if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1)) in read_extent_buffer_subpage()
4988 ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL); in read_extent_buffer_subpage()
4994 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) || in read_extent_buffer_subpage()
4996 btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) { in read_extent_buffer_subpage()
4997 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in read_extent_buffer_subpage()
4998 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, NULL); in read_extent_buffer_subpage()
5002 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_subpage()
5003 eb->read_mirror = 0; in read_extent_buffer_subpage()
5004 atomic_set(&eb->io_pages, 1); in read_extent_buffer_subpage()
5005 check_buffer_tree_ref(eb); in read_extent_buffer_subpage()
5008 btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len); in read_extent_buffer_subpage()
5010 btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len); in read_extent_buffer_subpage()
5012 eb->start, page, eb->len, in read_extent_buffer_subpage()
5013 eb->start - page_offset(page), 0, true); in read_extent_buffer_subpage()
5020 atomic_dec(&eb->io_pages); in read_extent_buffer_subpage()
5026 wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, EXTENT_LOCKED); in read_extent_buffer_subpage()
5027 if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_subpage()
5032 int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num) in read_extent_buffer_pages() argument
5046 if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) in read_extent_buffer_pages()
5054 if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) in read_extent_buffer_pages()
5057 if (eb->fs_info->nodesize < PAGE_SIZE) in read_extent_buffer_pages()
5058 return read_extent_buffer_subpage(eb, wait, mirror_num); in read_extent_buffer_pages()
5060 num_pages = num_extent_pages(eb); in read_extent_buffer_pages()
5062 page = eb->pages[i]; in read_extent_buffer_pages()
5084 page = eb->pages[i]; in read_extent_buffer_pages()
5092 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); in read_extent_buffer_pages()
5096 clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); in read_extent_buffer_pages()
5097 eb->read_mirror = 0; in read_extent_buffer_pages()
5098 atomic_set(&eb->io_pages, num_reads); in read_extent_buffer_pages()
5103 check_buffer_tree_ref(eb); in read_extent_buffer_pages()
5106 page = eb->pages[i]; in read_extent_buffer_pages()
5110 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5128 atomic_dec(&eb->io_pages); in read_extent_buffer_pages()
5141 page = eb->pages[i]; in read_extent_buffer_pages()
5152 page = eb->pages[locked_pages]; in read_extent_buffer_pages()
5158 static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, in report_eb_range() argument
5161 btrfs_warn(eb->fs_info, in report_eb_range()
5163 eb->start, eb->len, start, len); in report_eb_range()
5176 static inline int check_eb_range(const struct extent_buffer *eb, in check_eb_range() argument
5182 if (unlikely(check_add_overflow(start, len, &offset) || offset > eb->len)) in check_eb_range()
5183 return report_eb_range(eb, start, len); in check_eb_range()
5188 void read_extent_buffer(const struct extent_buffer *eb, void *dstv, in read_extent_buffer() argument
5198 if (check_eb_range(eb, start, len)) { in read_extent_buffer()
5207 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer()
5210 page = eb->pages[i]; in read_extent_buffer()
5223 int read_extent_buffer_to_user_nofault(const struct extent_buffer *eb, in read_extent_buffer_to_user_nofault() argument
5235 WARN_ON(start > eb->len); in read_extent_buffer_to_user_nofault()
5236 WARN_ON(start + len > eb->start + eb->len); in read_extent_buffer_to_user_nofault()
5238 offset = get_eb_offset_in_page(eb, start); in read_extent_buffer_to_user_nofault()
5241 page = eb->pages[i]; in read_extent_buffer_to_user_nofault()
5259 int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv, in memcmp_extent_buffer() argument
5270 if (check_eb_range(eb, start, len)) in memcmp_extent_buffer()
5273 offset = get_eb_offset_in_page(eb, start); in memcmp_extent_buffer()
5276 page = eb->pages[i]; in memcmp_extent_buffer()
5299 static void assert_eb_page_uptodate(const struct extent_buffer *eb, in assert_eb_page_uptodate() argument
5302 struct btrfs_fs_info *fs_info = eb->fs_info; in assert_eb_page_uptodate()
5317 eb->start, eb->len); in assert_eb_page_uptodate()
5318 error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len); in assert_eb_page_uptodate()
5325 void write_extent_buffer_chunk_tree_uuid(const struct extent_buffer *eb, in write_extent_buffer_chunk_tree_uuid() argument
5330 assert_eb_page_uptodate(eb, eb->pages[0]); in write_extent_buffer_chunk_tree_uuid()
5331 kaddr = page_address(eb->pages[0]) + in write_extent_buffer_chunk_tree_uuid()
5332 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, in write_extent_buffer_chunk_tree_uuid()
5337 void write_extent_buffer_fsid(const struct extent_buffer *eb, const void *srcv) in write_extent_buffer_fsid() argument
5341 assert_eb_page_uptodate(eb, eb->pages[0]); in write_extent_buffer_fsid()
5342 kaddr = page_address(eb->pages[0]) + in write_extent_buffer_fsid()
5343 get_eb_offset_in_page(eb, offsetof(struct btrfs_header, fsid)); in write_extent_buffer_fsid()
5347 void write_extent_buffer(const struct extent_buffer *eb, const void *srcv, in write_extent_buffer() argument
5357 WARN_ON(test_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags)); in write_extent_buffer()
5359 if (check_eb_range(eb, start, len)) in write_extent_buffer()
5362 offset = get_eb_offset_in_page(eb, start); in write_extent_buffer()
5365 page = eb->pages[i]; in write_extent_buffer()
5366 assert_eb_page_uptodate(eb, page); in write_extent_buffer()
5379 void memzero_extent_buffer(const struct extent_buffer *eb, unsigned long start, in memzero_extent_buffer() argument
5388 if (check_eb_range(eb, start, len)) in memzero_extent_buffer()
5391 offset = get_eb_offset_in_page(eb, start); in memzero_extent_buffer()
5394 page = eb->pages[i]; in memzero_extent_buffer()
5395 assert_eb_page_uptodate(eb, page); in memzero_extent_buffer()
5480 static inline void eb_bitmap_offset(const struct extent_buffer *eb, in eb_bitmap_offset() argument
5493 offset = start + offset_in_page(eb->start) + byte_offset; in eb_bitmap_offset()
5505 int extent_buffer_test_bit(const struct extent_buffer *eb, unsigned long start, in extent_buffer_test_bit() argument
5513 eb_bitmap_offset(eb, start, nr, &i, &offset); in extent_buffer_test_bit()
5514 page = eb->pages[i]; in extent_buffer_test_bit()
5515 assert_eb_page_uptodate(eb, page); in extent_buffer_test_bit()
5527 void extent_buffer_bitmap_set(const struct extent_buffer *eb, unsigned long start, in extent_buffer_bitmap_set() argument
5538 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_set()
5539 page = eb->pages[i]; in extent_buffer_bitmap_set()
5540 assert_eb_page_uptodate(eb, page); in extent_buffer_bitmap_set()
5550 page = eb->pages[++i]; in extent_buffer_bitmap_set()
5551 assert_eb_page_uptodate(eb, page); in extent_buffer_bitmap_set()
5569 void extent_buffer_bitmap_clear(const struct extent_buffer *eb, in extent_buffer_bitmap_clear() argument
5581 eb_bitmap_offset(eb, start, pos, &i, &offset); in extent_buffer_bitmap_clear()
5582 page = eb->pages[i]; in extent_buffer_bitmap_clear()
5583 assert_eb_page_uptodate(eb, page); in extent_buffer_bitmap_clear()
5593 page = eb->pages[++i]; in extent_buffer_bitmap_clear()
5594 assert_eb_page_uptodate(eb, page); in extent_buffer_bitmap_clear()
5751 struct extent_buffer *eb = NULL; in try_release_subpage_extent_buffer() local
5762 eb = get_next_extent_buffer(fs_info, page, cur); in try_release_subpage_extent_buffer()
5763 if (!eb) { in try_release_subpage_extent_buffer()
5768 cur = eb->start + eb->len; in try_release_subpage_extent_buffer()
5774 spin_lock(&eb->refs_lock); in try_release_subpage_extent_buffer()
5775 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_subpage_extent_buffer()
5776 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
5787 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_subpage_extent_buffer()
5788 spin_unlock(&eb->refs_lock); in try_release_subpage_extent_buffer()
5797 release_extent_buffer(eb); in try_release_subpage_extent_buffer()
5815 struct extent_buffer *eb; in try_release_extent_buffer() local
5830 eb = (struct extent_buffer *)page->private; in try_release_extent_buffer()
5831 BUG_ON(!eb); in try_release_extent_buffer()
5838 spin_lock(&eb->refs_lock); in try_release_extent_buffer()
5839 if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { in try_release_extent_buffer()
5840 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
5850 if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) { in try_release_extent_buffer()
5851 spin_unlock(&eb->refs_lock); in try_release_extent_buffer()
5855 return release_extent_buffer(eb); in try_release_extent_buffer()
5873 struct extent_buffer *eb; in btrfs_readahead_tree_block() local
5876 eb = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level); in btrfs_readahead_tree_block()
5877 if (IS_ERR(eb)) in btrfs_readahead_tree_block()
5880 if (btrfs_buffer_uptodate(eb, gen, 1)) { in btrfs_readahead_tree_block()
5881 free_extent_buffer(eb); in btrfs_readahead_tree_block()
5885 ret = read_extent_buffer_pages(eb, WAIT_NONE, 0); in btrfs_readahead_tree_block()
5887 free_extent_buffer_stale(eb); in btrfs_readahead_tree_block()
5889 free_extent_buffer(eb); in btrfs_readahead_tree_block()