• Home
  • Raw
  • Download

Lines Matching +full:oe +full:- +full:extra +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0
16 #include <linux/backing-dev.h>
37 #include "disk-io.h"
40 #include "print-tree.h"
41 #include "ordered-data.h"
43 #include "tree-log.h"
47 #include "free-space-cache.h"
48 #include "inode-map.h"
51 #include "delalloc-space.h"
52 #include "block-group.h"
53 #include "space-info.h"
113 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; in btrfs_cleanup_ordered_extents()
115 u64 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents()
120 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_cleanup_ordered_extents()
133 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents()
135 bytes -= PAGE_SIZE; in btrfs_cleanup_ordered_extents()
193 path->leave_spinning = 1; in insert_inline_extent()
199 leaf = path->nodes[0]; in insert_inline_extent()
200 ei = btrfs_item_ptr(leaf, path->slots[0], in insert_inline_extent()
202 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in insert_inline_extent()
223 compressed_size -= cur_size; in insert_inline_extent()
228 page = find_get_page(inode->i_mapping, in insert_inline_extent()
244 size = ALIGN(size, root->fs_info->sectorsize); in insert_inline_extent()
258 BTRFS_I(inode)->disk_i_size = inode->i_size; in insert_inline_extent()
276 struct btrfs_root *root = inode->root; in cow_file_range_inline()
277 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range_inline()
279 u64 isize = i_size_read(&inode->vfs_inode); in cow_file_range_inline()
281 u64 inline_len = actual_end - start; in cow_file_range_inline()
282 u64 aligned_end = ALIGN(end, fs_info->sectorsize); in cow_file_range_inline()
293 actual_end > fs_info->sectorsize || in cow_file_range_inline()
296 (actual_end & (fs_info->sectorsize - 1)) == 0) || in cow_file_range_inline()
298 data_len > fs_info->max_inline) { in cow_file_range_inline()
304 return -ENOMEM; in cow_file_range_inline()
311 trans->block_rsv = &inode->block_rsv; in cow_file_range_inline()
331 root, &inode->vfs_inode, start, in cow_file_range_inline()
334 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
337 } else if (ret == -ENOSPC) { in cow_file_range_inline()
342 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); in cow_file_range_inline()
343 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); in cow_file_range_inline()
395 BUG_ON(!async_extent); /* -ENOMEM */ in add_async_extent()
396 async_extent->start = start; in add_async_extent()
397 async_extent->ram_size = ram_size; in add_async_extent()
398 async_extent->compressed_size = compressed_size; in add_async_extent()
399 async_extent->pages = pages; in add_async_extent()
400 async_extent->nr_pages = nr_pages; in add_async_extent()
401 async_extent->compress_type = compress_type; in add_async_extent()
402 list_add_tail(&async_extent->list, &cow->extents); in add_async_extent()
411 if (inode->flags & BTRFS_INODE_NODATACOW || in inode_can_compress()
412 inode->flags & BTRFS_INODE_NODATASUM) in inode_can_compress()
424 struct btrfs_fs_info *fs_info = inode->root->fs_info; in inode_need_compress()
436 if (inode->defrag_compress) in inode_need_compress()
439 if (inode->flags & BTRFS_INODE_NOCOMPRESS) in inode_need_compress()
442 inode->flags & BTRFS_INODE_COMPRESS || in inode_need_compress()
443 inode->prop_compress) in inode_need_compress()
444 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); in inode_need_compress()
453 (start > 0 || end + 1 < inode->disk_i_size)) in inode_should_defrag()
476 struct inode *inode = async_chunk->inode; in compress_file_range()
477 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in compress_file_range()
478 u64 blocksize = fs_info->sectorsize; in compress_file_range()
479 u64 start = async_chunk->start; in compress_file_range()
480 u64 end = async_chunk->end; in compress_file_range()
490 int compress_type = fs_info->compress_type; in compress_file_range()
494 inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, in compress_file_range()
512 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; in compress_file_range()
530 total_compressed = actual_end - start; in compress_file_range()
537 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) in compress_file_range()
546 * we do compression for mount -o compress and when the in compress_file_range()
559 if (BTRFS_I(inode)->defrag_compress) in compress_file_range()
560 compress_type = BTRFS_I(inode)->defrag_compress; in compress_file_range()
561 else if (BTRFS_I(inode)->prop_compress) in compress_file_range()
562 compress_type = BTRFS_I(inode)->prop_compress; in compress_file_range()
583 compress_type | (fs_info->compress_level << 4), in compress_file_range()
584 inode->i_mapping, start, in compress_file_range()
592 struct page *page = pages[nr_pages - 1]; in compress_file_range()
601 PAGE_SIZE - offset); in compress_file_range()
657 WARN_ON(pages[i]->mapping); in compress_file_range()
707 WARN_ON(pages[i]->mapping); in compress_file_range()
717 !(BTRFS_I(inode)->prop_compress)) { in compress_file_range()
718 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; in compress_file_range()
728 if (async_chunk->locked_page && in compress_file_range()
729 (page_offset(async_chunk->locked_page) >= start && in compress_file_range()
730 page_offset(async_chunk->locked_page)) <= end) { in compress_file_range()
731 __set_page_dirty_nobuffers(async_chunk->locked_page); in compress_file_range()
737 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
748 if (!async_extent->pages) in free_async_extent_pages()
751 for (i = 0; i < async_extent->nr_pages; i++) { in free_async_extent_pages()
752 WARN_ON(async_extent->pages[i]->mapping); in free_async_extent_pages()
753 put_page(async_extent->pages[i]); in free_async_extent_pages()
755 kfree(async_extent->pages); in free_async_extent_pages()
756 async_extent->nr_pages = 0; in free_async_extent_pages()
757 async_extent->pages = NULL; in free_async_extent_pages()
768 struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); in submit_compressed_extents()
769 struct btrfs_fs_info *fs_info = inode->root->fs_info; in submit_compressed_extents()
774 struct btrfs_root *root = inode->root; in submit_compressed_extents()
775 struct extent_io_tree *io_tree = &inode->io_tree; in submit_compressed_extents()
779 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
780 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
782 list_del(&async_extent->list); in submit_compressed_extents()
785 lock_extent(io_tree, async_extent->start, in submit_compressed_extents()
786 async_extent->start + async_extent->ram_size - 1); in submit_compressed_extents()
788 if (!async_extent->pages) { in submit_compressed_extents()
793 ret = cow_file_range(inode, async_chunk->locked_page, in submit_compressed_extents()
794 async_extent->start, in submit_compressed_extents()
795 async_extent->start + in submit_compressed_extents()
796 async_extent->ram_size - 1, in submit_compressed_extents()
808 extent_write_locked_range(&inode->vfs_inode, in submit_compressed_extents()
809 async_extent->start, in submit_compressed_extents()
810 async_extent->start + in submit_compressed_extents()
811 async_extent->ram_size - 1, in submit_compressed_extents()
813 else if (ret && async_chunk->locked_page) in submit_compressed_extents()
814 unlock_page(async_chunk->locked_page); in submit_compressed_extents()
820 ret = btrfs_reserve_extent(root, async_extent->ram_size, in submit_compressed_extents()
821 async_extent->compressed_size, in submit_compressed_extents()
822 async_extent->compressed_size, in submit_compressed_extents()
827 if (ret == -ENOSPC) { in submit_compressed_extents()
828 unlock_extent(io_tree, async_extent->start, in submit_compressed_extents()
829 async_extent->start + in submit_compressed_extents()
830 async_extent->ram_size - 1); in submit_compressed_extents()
838 extent_range_redirty_for_io(&inode->vfs_inode, in submit_compressed_extents()
839 async_extent->start, in submit_compressed_extents()
840 async_extent->start + in submit_compressed_extents()
841 async_extent->ram_size - 1); in submit_compressed_extents()
851 em = create_io_em(inode, async_extent->start, in submit_compressed_extents()
852 async_extent->ram_size, /* len */ in submit_compressed_extents()
853 async_extent->start, /* orig_start */ in submit_compressed_extents()
857 async_extent->ram_size, /* ram_bytes */ in submit_compressed_extents()
858 async_extent->compress_type, in submit_compressed_extents()
866 async_extent->start, in submit_compressed_extents()
868 async_extent->ram_size, in submit_compressed_extents()
871 async_extent->compress_type); in submit_compressed_extents()
873 btrfs_drop_extent_cache(inode, async_extent->start, in submit_compressed_extents()
874 async_extent->start + in submit_compressed_extents()
875 async_extent->ram_size - 1, 0); in submit_compressed_extents()
883 extent_clear_unlock_delalloc(inode, async_extent->start, in submit_compressed_extents()
884 async_extent->start + in submit_compressed_extents()
885 async_extent->ram_size - 1, in submit_compressed_extents()
889 if (btrfs_submit_compressed_write(inode, async_extent->start, in submit_compressed_extents()
890 async_extent->ram_size, in submit_compressed_extents()
892 ins.offset, async_extent->pages, in submit_compressed_extents()
893 async_extent->nr_pages, in submit_compressed_extents()
894 async_chunk->write_flags, in submit_compressed_extents()
895 async_chunk->blkcg_css)) { in submit_compressed_extents()
896 struct page *p = async_extent->pages[0]; in submit_compressed_extents()
897 const u64 start = async_extent->start; in submit_compressed_extents()
898 const u64 end = start + async_extent->ram_size - 1; in submit_compressed_extents()
900 p->mapping = inode->vfs_inode.i_mapping; in submit_compressed_extents()
903 p->mapping = NULL; in submit_compressed_extents()
918 extent_clear_unlock_delalloc(inode, async_extent->start, in submit_compressed_extents()
919 async_extent->start + in submit_compressed_extents()
920 async_extent->ram_size - 1, in submit_compressed_extents()
935 struct extent_map_tree *em_tree = &inode->extent_tree; in get_extent_allocation_hint()
939 read_lock(&em_tree->lock); in get_extent_allocation_hint()
947 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in get_extent_allocation_hint()
950 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) in get_extent_allocation_hint()
951 alloc_hint = em->block_start; in get_extent_allocation_hint()
955 alloc_hint = em->block_start; in get_extent_allocation_hint()
959 read_unlock(&em_tree->lock); in get_extent_allocation_hint()
971 * it to make sure we don't do extra locks or unlocks.
982 struct btrfs_root *root = inode->root; in cow_file_range()
983 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range()
989 u64 blocksize = fs_info->sectorsize; in cow_file_range()
998 ret = -EINVAL; in cow_file_range()
1002 num_bytes = ALIGN(end - start + 1, blocksize); in cow_file_range()
1004 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); in cow_file_range()
1026 (end - start + PAGE_SIZE) / PAGE_SIZE; in cow_file_range()
1035 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); in cow_file_range()
1048 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in cow_file_range()
1051 min_alloc_size = fs_info->sectorsize; in cow_file_range()
1083 if (root->root_key.objectid == in cow_file_range()
1100 start + ram_size - 1, 0); in cow_file_range()
1115 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, in cow_file_range()
1122 num_bytes -= cur_alloc_size; in cow_file_range()
1139 btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); in cow_file_range()
1160 start + cur_alloc_size - 1, in cow_file_range()
1186 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_start()
1187 async_chunk->inode = NULL; in async_cow_start()
1201 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in async_cow_submit()
1205 * ->inode could be NULL if async_chunk_start has failed to compress, in async_cow_submit()
1207 * always adjust ->async_delalloc_pages as its paired with the init in async_cow_submit()
1210 if (async_chunk->inode) in async_cow_submit()
1214 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < in async_cow_submit()
1216 cond_wake_up_nomb(&fs_info->async_submit_wait); in async_cow_submit()
1224 if (async_chunk->inode) in async_cow_free()
1225 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1226 if (async_chunk->blkcg_css) in async_cow_free()
1227 css_put(async_chunk->blkcg_css); in async_cow_free()
1232 if (atomic_dec_and_test(async_chunk->pending)) in async_cow_free()
1233 kvfree(async_chunk->pending); in async_cow_free()
1242 struct btrfs_fs_info *fs_info = inode->root->fs_info; in cow_file_range_async()
1248 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); in cow_file_range_async()
1254 unlock_extent(&inode->io_tree, start, end); in cow_file_range_async()
1256 if (inode->flags & BTRFS_INODE_NOCOMPRESS && in cow_file_range_async()
1278 return -ENOMEM; in cow_file_range_async()
1281 async_chunk = ctx->chunks; in cow_file_range_async()
1282 atomic_set(&ctx->num_chunks, num_chunks); in cow_file_range_async()
1286 cur_end = min(end, start + SZ_512K - 1); in cow_file_range_async()
1294 ihold(&inode->vfs_inode); in cow_file_range_async()
1295 async_chunk[i].pending = &ctx->num_chunks; in cow_file_range_async()
1296 async_chunk[i].inode = &inode->vfs_inode; in cow_file_range_async()
1322 cur_end - start); in cow_file_range_async()
1339 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); in cow_file_range_async()
1340 atomic_add(nr_pages, &fs_info->async_delalloc_pages); in cow_file_range_async()
1342 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in cow_file_range_async()
1358 ret = btrfs_lookup_csums_range(fs_info->csum_root, bytenr, in csum_exist_in_range()
1359 bytenr + num_bytes - 1, &list, 0); in csum_exist_in_range()
1365 list_del(&sums->list); in csum_exist_in_range()
1378 const bool is_reloc_ino = (inode->root->root_key.objectid == in fallback_to_cow()
1380 const u64 range_bytes = end + 1 - start; in fallback_to_cow()
1381 struct extent_io_tree *io_tree = &inode->io_tree; in fallback_to_cow()
1421 struct btrfs_fs_info *fs_info = inode->root->fs_info; in fallback_to_cow()
1422 struct btrfs_space_info *sinfo = fs_info->data_sinfo; in fallback_to_cow()
1427 spin_lock(&sinfo->lock); in fallback_to_cow()
1429 spin_unlock(&sinfo->lock); in fallback_to_cow()
1453 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_nocow()
1454 struct btrfs_root *root = inode->root; in run_delalloc_nocow()
1456 u64 cow_start = (u64)-1; in run_delalloc_nocow()
1474 return -ENOMEM; in run_delalloc_nocow()
1500 if (ret > 0 && path->slots[0] > 0 && check_prev) { in run_delalloc_nocow()
1501 leaf = path->nodes[0]; in run_delalloc_nocow()
1503 path->slots[0] - 1); in run_delalloc_nocow()
1506 path->slots[0]--; in run_delalloc_nocow()
1511 leaf = path->nodes[0]; in run_delalloc_nocow()
1512 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in run_delalloc_nocow()
1515 if (cow_start != (u64)-1) in run_delalloc_nocow()
1521 leaf = path->nodes[0]; in run_delalloc_nocow()
1524 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in run_delalloc_nocow()
1535 path->slots[0]++; in run_delalloc_nocow()
1558 fi = btrfs_item_ptr(leaf, path->slots[0], in run_delalloc_nocow()
1576 path->slots[0]++; in run_delalloc_nocow()
1596 btrfs_root_last_snapshot(&root->root_item)) in run_delalloc_nocow()
1604 found_key.offset - in run_delalloc_nocow()
1608 * ret could be -EIO if the above fails to read in run_delalloc_nocow()
1612 if (cow_start != (u64)-1) in run_delalloc_nocow()
1621 disk_bytenr += cur_offset - found_key.offset; in run_delalloc_nocow()
1622 num_bytes = min(end + 1, extent_end) - cur_offset; in run_delalloc_nocow()
1627 if (!freespace_inode && atomic_read(&root->snapshot_force_cow)) in run_delalloc_nocow()
1638 * ret could be -EIO if the above fails to read in run_delalloc_nocow()
1642 if (cow_start != (u64)-1) in run_delalloc_nocow()
1654 extent_end = ALIGN(extent_end, fs_info->sectorsize); in run_delalloc_nocow()
1657 path->slots[0]++; in run_delalloc_nocow()
1670 if (cow_start == (u64)-1) in run_delalloc_nocow()
1675 path->slots[0]++; in run_delalloc_nocow()
1682 * COW range from cow_start to found_key.offset - 1. As the key in run_delalloc_nocow()
1686 if (cow_start != (u64)-1) { in run_delalloc_nocow()
1688 cow_start, found_key.offset - 1, in run_delalloc_nocow()
1692 cow_start = (u64)-1; in run_delalloc_nocow()
1696 u64 orig_start = found_key.offset - extent_offset; in run_delalloc_nocow()
1717 cur_offset + num_bytes - 1, in run_delalloc_nocow()
1734 if (root->root_key.objectid == in run_delalloc_nocow()
1745 cur_offset + num_bytes - 1, in run_delalloc_nocow()
1765 if (cur_offset <= end && cow_start == (u64)-1) in run_delalloc_nocow()
1768 if (cow_start != (u64)-1) { in run_delalloc_nocow()
1795 if (!(inode->flags & BTRFS_INODE_NODATACOW) && in need_force_cow()
1796 !(inode->flags & BTRFS_INODE_PREALLOC)) in need_force_cow()
1804 if (inode->defrag_bytes && in need_force_cow()
1805 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, 0, NULL)) in need_force_cow()
1822 if (inode->flags & BTRFS_INODE_NODATACOW && !force_cow) { in btrfs_run_delalloc_range()
1825 } else if (inode->flags & BTRFS_INODE_PREALLOC && !force_cow) { in btrfs_run_delalloc_range()
1833 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); in btrfs_run_delalloc_range()
1839 end - start + 1); in btrfs_run_delalloc_range()
1849 if (!(orig->state & EXTENT_DELALLOC)) in btrfs_split_delalloc_extent()
1852 size = orig->end - orig->start + 1; in btrfs_split_delalloc_extent()
1861 new_size = orig->end - split + 1; in btrfs_split_delalloc_extent()
1863 new_size = split - orig->start; in btrfs_split_delalloc_extent()
1869 spin_lock(&BTRFS_I(inode)->lock); in btrfs_split_delalloc_extent()
1871 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_split_delalloc_extent()
1886 if (!(other->state & EXTENT_DELALLOC)) in btrfs_merge_delalloc_extent()
1889 if (new->start > other->start) in btrfs_merge_delalloc_extent()
1890 new_size = new->end - other->start + 1; in btrfs_merge_delalloc_extent()
1892 new_size = other->end - new->start + 1; in btrfs_merge_delalloc_extent()
1896 spin_lock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1897 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); in btrfs_merge_delalloc_extent()
1898 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1920 old_size = other->end - other->start + 1; in btrfs_merge_delalloc_extent()
1922 old_size = new->end - new->start + 1; in btrfs_merge_delalloc_extent()
1927 spin_lock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1928 btrfs_mod_outstanding_extents(BTRFS_I(inode), -1); in btrfs_merge_delalloc_extent()
1929 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_merge_delalloc_extent()
1935 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_add_delalloc_inodes()
1937 spin_lock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
1938 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) { in btrfs_add_delalloc_inodes()
1939 list_add_tail(&BTRFS_I(inode)->delalloc_inodes, in btrfs_add_delalloc_inodes()
1940 &root->delalloc_inodes); in btrfs_add_delalloc_inodes()
1942 &BTRFS_I(inode)->runtime_flags); in btrfs_add_delalloc_inodes()
1943 root->nr_delalloc_inodes++; in btrfs_add_delalloc_inodes()
1944 if (root->nr_delalloc_inodes == 1) { in btrfs_add_delalloc_inodes()
1945 spin_lock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
1946 BUG_ON(!list_empty(&root->delalloc_root)); in btrfs_add_delalloc_inodes()
1947 list_add_tail(&root->delalloc_root, in btrfs_add_delalloc_inodes()
1948 &fs_info->delalloc_roots); in btrfs_add_delalloc_inodes()
1949 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
1952 spin_unlock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
1959 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_del_delalloc_inode()
1961 if (!list_empty(&inode->delalloc_inodes)) { in __btrfs_del_delalloc_inode()
1962 list_del_init(&inode->delalloc_inodes); in __btrfs_del_delalloc_inode()
1964 &inode->runtime_flags); in __btrfs_del_delalloc_inode()
1965 root->nr_delalloc_inodes--; in __btrfs_del_delalloc_inode()
1966 if (!root->nr_delalloc_inodes) { in __btrfs_del_delalloc_inode()
1967 ASSERT(list_empty(&root->delalloc_inodes)); in __btrfs_del_delalloc_inode()
1968 spin_lock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
1969 BUG_ON(list_empty(&root->delalloc_root)); in __btrfs_del_delalloc_inode()
1970 list_del_init(&root->delalloc_root); in __btrfs_del_delalloc_inode()
1971 spin_unlock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
1979 spin_lock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
1981 spin_unlock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
1991 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_set_delalloc_extent()
2000 if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { in btrfs_set_delalloc_extent()
2001 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_set_delalloc_extent()
2002 u64 len = state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2006 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2008 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2014 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, in btrfs_set_delalloc_extent()
2015 fs_info->delalloc_batch); in btrfs_set_delalloc_extent()
2016 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2017 BTRFS_I(inode)->delalloc_bytes += len; in btrfs_set_delalloc_extent()
2019 BTRFS_I(inode)->defrag_bytes += len; in btrfs_set_delalloc_extent()
2021 &BTRFS_I(inode)->runtime_flags)) in btrfs_set_delalloc_extent()
2023 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2026 if (!(state->state & EXTENT_DELALLOC_NEW) && in btrfs_set_delalloc_extent()
2028 spin_lock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2029 BTRFS_I(inode)->new_delalloc_bytes += state->end + 1 - in btrfs_set_delalloc_extent()
2030 state->start; in btrfs_set_delalloc_extent()
2031 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_set_delalloc_extent()
2043 struct btrfs_fs_info *fs_info = btrfs_sb(vfs_inode->i_sb); in btrfs_clear_delalloc_extent()
2044 u64 len = state->end + 1 - state->start; in btrfs_clear_delalloc_extent()
2047 if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) { in btrfs_clear_delalloc_extent()
2048 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2049 inode->defrag_bytes -= len; in btrfs_clear_delalloc_extent()
2050 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2058 if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { in btrfs_clear_delalloc_extent()
2059 struct btrfs_root *root = inode->root; in btrfs_clear_delalloc_extent()
2062 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2063 btrfs_mod_outstanding_extents(inode, -num_extents); in btrfs_clear_delalloc_extent()
2064 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2072 root != fs_info->tree_root) in btrfs_clear_delalloc_extent()
2079 if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && in btrfs_clear_delalloc_extent()
2080 do_list && !(state->state & EXTENT_NORESERVE) && in btrfs_clear_delalloc_extent()
2084 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, in btrfs_clear_delalloc_extent()
2085 fs_info->delalloc_batch); in btrfs_clear_delalloc_extent()
2086 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2087 inode->delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2088 if (do_list && inode->delalloc_bytes == 0 && in btrfs_clear_delalloc_extent()
2090 &inode->runtime_flags)) in btrfs_clear_delalloc_extent()
2092 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2095 if ((state->state & EXTENT_DELALLOC_NEW) && in btrfs_clear_delalloc_extent()
2097 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2098 ASSERT(inode->new_delalloc_bytes >= len); in btrfs_clear_delalloc_extent()
2099 inode->new_delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2100 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2105 * btrfs_bio_fits_in_stripe - Checks whether the size of the given bio will fit
2109 * @page - The page we are about to add to the bio
2110 * @size - size we want to add to the bio
2111 * @bio - bio we want to ensure is smaller than a stripe
2112 * @bio_flags - flags of the bio
2121 struct inode *inode = page->mapping->host; in btrfs_bio_fits_in_stripe()
2122 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_bio_fits_in_stripe()
2123 u64 logical = (u64)bio->bi_iter.bi_sector << 9; in btrfs_bio_fits_in_stripe()
2132 length = bio->bi_iter.bi_size; in btrfs_bio_fits_in_stripe()
2170 * c-1) if bio is issued by fsync: sync submit
2173 * c-2) if root is reloc root: sync submit
2176 * c-3) otherwise: async submit
2182 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_data_bio()
2183 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_submit_data_bio()
2187 int async = !atomic_read(&BTRFS_I(inode)->sync_writers); in btrfs_submit_data_bio()
2189 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; in btrfs_submit_data_bio()
2205 ret = btrfs_lookup_bio_sums(inode, bio, (u64)-1, NULL); in btrfs_submit_data_bio()
2212 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in btrfs_submit_data_bio()
2229 bio->bi_status = ret; in btrfs_submit_data_bio()
2246 trans->adding_csums = true; in add_pending_csums()
2247 ret = btrfs_csum_file_blocks(trans, trans->fs_info->csum_root, sum); in add_pending_csums()
2248 trans->adding_csums = false; in add_pending_csums()
2261 const u64 end = start + len - 1; in btrfs_find_new_delalloc_bytes()
2264 const u64 search_len = end - search_start + 1; in btrfs_find_new_delalloc_bytes()
2273 if (em->block_start != EXTENT_MAP_HOLE) in btrfs_find_new_delalloc_bytes()
2276 em_len = em->len; in btrfs_find_new_delalloc_bytes()
2277 if (em->start < search_start) in btrfs_find_new_delalloc_bytes()
2278 em_len -= search_start - em->start; in btrfs_find_new_delalloc_bytes()
2282 ret = set_extent_bit(&inode->io_tree, search_start, in btrfs_find_new_delalloc_bytes()
2283 search_start + em_len - 1, in btrfs_find_new_delalloc_bytes()
2301 if (start >= i_size_read(&inode->vfs_inode) && in btrfs_set_extent_delalloc()
2302 !(inode->flags & BTRFS_INODE_PREALLOC)) { in btrfs_set_extent_delalloc()
2312 end + 1 - start, in btrfs_set_extent_delalloc()
2318 return set_extent_delalloc(&inode->io_tree, start, end, extra_bits, in btrfs_set_extent_delalloc()
2343 page = fixup->page; in btrfs_writepage_fixup_worker()
2344 inode = BTRFS_I(fixup->inode); in btrfs_writepage_fixup_worker()
2346 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker()
2359 * page->mapping may go NULL, but it shouldn't be moved to a different in btrfs_writepage_fixup_worker()
2362 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { in btrfs_writepage_fixup_worker()
2397 lock_extent_bits(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2405 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2432 unlock_extent_cached(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2440 mapping_set_error(page->mapping, ret); in btrfs_writepage_fixup_worker()
2455 btrfs_add_delayed_iput(&inode->vfs_inode); in btrfs_writepage_fixup_worker()
2471 struct inode *inode = page->mapping->host; in btrfs_writepage_cow_fixup()
2472 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_writepage_cow_fixup()
2487 return -EAGAIN; in btrfs_writepage_cow_fixup()
2491 return -EAGAIN; in btrfs_writepage_cow_fixup()
2497 * page->mapping outside of the page lock. in btrfs_writepage_cow_fixup()
2502 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); in btrfs_writepage_cow_fixup()
2503 fixup->page = page; in btrfs_writepage_cow_fixup()
2504 fixup->inode = inode; in btrfs_writepage_cow_fixup()
2505 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); in btrfs_writepage_cow_fixup()
2507 return -EAGAIN; in btrfs_writepage_cow_fixup()
2515 struct btrfs_root *root = inode->root; in insert_reserved_file_extent()
2528 return -ENOMEM; in insert_reserved_file_extent()
2550 path->leave_spinning = 1; in insert_reserved_file_extent()
2556 leaf = path->nodes[0]; in insert_reserved_file_extent()
2557 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); in insert_reserved_file_extent()
2559 btrfs_item_ptr_offset(leaf, path->slots[0]), in insert_reserved_file_extent()
2565 inode_add_bytes(&inode->vfs_inode, num_bytes); in insert_reserved_file_extent()
2591 spin_lock(&cache->lock); in btrfs_release_delalloc_bytes()
2592 cache->delalloc_bytes -= len; in btrfs_release_delalloc_bytes()
2593 spin_unlock(&cache->lock); in btrfs_release_delalloc_bytes()
2599 struct btrfs_ordered_extent *oe) in insert_ordered_extent_file_extent() argument
2606 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); in insert_ordered_extent_file_extent()
2608 oe->disk_num_bytes); in insert_ordered_extent_file_extent()
2609 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) in insert_ordered_extent_file_extent()
2610 logical_len = oe->truncated_len; in insert_ordered_extent_file_extent()
2612 logical_len = oe->num_bytes; in insert_ordered_extent_file_extent()
2615 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); in insert_ordered_extent_file_extent()
2618 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), in insert_ordered_extent_file_extent()
2619 oe->file_offset, &stack_fi, in insert_ordered_extent_file_extent()
2620 oe->qgroup_rsv); in insert_ordered_extent_file_extent()
2630 struct inode *inode = ordered_extent->inode; in btrfs_finish_ordered_io()
2631 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_finish_ordered_io()
2632 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_finish_ordered_io()
2634 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_finish_ordered_io()
2639 u64 logical_len = ordered_extent->num_bytes; in btrfs_finish_ordered_io()
2647 start = ordered_extent->file_offset; in btrfs_finish_ordered_io()
2648 end = start + ordered_extent->num_bytes - 1; in btrfs_finish_ordered_io()
2650 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2651 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2652 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags)) in btrfs_finish_ordered_io()
2657 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2658 ret = -EIO; in btrfs_finish_ordered_io()
2664 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2666 logical_len = ordered_extent->truncated_len; in btrfs_finish_ordered_io()
2672 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2673 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ in btrfs_finish_ordered_io()
2685 trans->block_rsv = &BTRFS_I(inode)->block_rsv; in btrfs_finish_ordered_io()
2687 if (ret) /* -ENOMEM or corruption */ in btrfs_finish_ordered_io()
2705 trans->block_rsv = &BTRFS_I(inode)->block_rsv; in btrfs_finish_ordered_io()
2707 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) in btrfs_finish_ordered_io()
2708 compress_type = ordered_extent->compress_type; in btrfs_finish_ordered_io()
2709 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2712 ordered_extent->file_offset, in btrfs_finish_ordered_io()
2713 ordered_extent->file_offset + in btrfs_finish_ordered_io()
2716 BUG_ON(root == fs_info->tree_root); in btrfs_finish_ordered_io()
2721 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2722 ordered_extent->disk_num_bytes); in btrfs_finish_ordered_io()
2725 unpin_extent_cache(&BTRFS_I(inode)->extent_tree, in btrfs_finish_ordered_io()
2726 ordered_extent->file_offset, in btrfs_finish_ordered_io()
2727 ordered_extent->num_bytes, trans->transid); in btrfs_finish_ordered_io()
2733 ret = add_pending_csums(trans, &ordered_extent->list); in btrfs_finish_ordered_io()
2741 if (ret) { /* -ENOMEM or corruption */ in btrfs_finish_ordered_io()
2752 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, clear_bits, in btrfs_finish_ordered_io()
2771 &ordered_extent->flags)) in btrfs_finish_ordered_io()
2772 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); in btrfs_finish_ordered_io()
2793 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_ordered_io()
2794 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_ordered_io()
2801 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2802 ordered_extent->disk_num_bytes, in btrfs_finish_ordered_io()
2805 ordered_extent->disk_bytenr, in btrfs_finish_ordered_io()
2806 ordered_extent->disk_num_bytes, 1); in btrfs_finish_ordered_io()
2834 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_writepage_endio_finish_ordered()
2835 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_writepage_endio_finish_ordered()
2843 end - start + 1, uptodate)) in btrfs_writepage_endio_finish_ordered()
2847 wq = fs_info->endio_freespace_worker; in btrfs_writepage_endio_finish_ordered()
2849 wq = fs_info->endio_write_workers; in btrfs_writepage_endio_finish_ordered()
2851 btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); in btrfs_writepage_endio_finish_ordered()
2852 btrfs_queue_work(wq, &ordered_extent->work); in btrfs_writepage_endio_finish_ordered()
2859 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in check_data_csum()
2860 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in check_data_csum()
2862 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); in check_data_csum()
2866 csum_expected = ((u8 *)io_bio->csum) + icsum * csum_size; in check_data_csum()
2869 shash->tfm = fs_info->csum_shash; in check_data_csum()
2880 io_bio->mirror_num); in check_data_csum()
2881 if (io_bio->device) in check_data_csum()
2882 btrfs_dev_stat_inc_and_print(io_bio->device, in check_data_csum()
2887 return -EIO; in check_data_csum()
2898 size_t offset = start - page_offset(page); in btrfs_verify_data_csum()
2899 struct inode *inode = page->mapping->host; in btrfs_verify_data_csum()
2900 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_verify_data_csum()
2901 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_verify_data_csum()
2908 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) in btrfs_verify_data_csum()
2911 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && in btrfs_verify_data_csum()
2917 phy_offset >>= inode->i_sb->s_blocksize_bits; in btrfs_verify_data_csum()
2919 (size_t)(end - start + 1)); in btrfs_verify_data_csum()
2923 * btrfs_add_delayed_iput - perform a delayed iput on @inode
2934 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_add_delayed_iput()
2937 if (atomic_add_unless(&inode->i_count, -1, 1)) in btrfs_add_delayed_iput()
2940 atomic_inc(&fs_info->nr_delayed_iputs); in btrfs_add_delayed_iput()
2941 spin_lock(&fs_info->delayed_iput_lock); in btrfs_add_delayed_iput()
2942 ASSERT(list_empty(&binode->delayed_iput)); in btrfs_add_delayed_iput()
2943 list_add_tail(&binode->delayed_iput, &fs_info->delayed_iputs); in btrfs_add_delayed_iput()
2944 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_add_delayed_iput()
2945 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) in btrfs_add_delayed_iput()
2946 wake_up_process(fs_info->cleaner_kthread); in btrfs_add_delayed_iput()
2952 list_del_init(&inode->delayed_iput); in run_delayed_iput_locked()
2953 spin_unlock(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
2954 iput(&inode->vfs_inode); in run_delayed_iput_locked()
2955 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) in run_delayed_iput_locked()
2956 wake_up(&fs_info->delayed_iputs_wait); in run_delayed_iput_locked()
2957 spin_lock(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
2963 if (!list_empty(&inode->delayed_iput)) { in btrfs_run_delayed_iput()
2964 spin_lock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
2965 if (!list_empty(&inode->delayed_iput)) in btrfs_run_delayed_iput()
2967 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
2974 spin_lock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
2975 while (!list_empty(&fs_info->delayed_iputs)) { in btrfs_run_delayed_iputs()
2978 inode = list_first_entry(&fs_info->delayed_iputs, in btrfs_run_delayed_iputs()
2981 cond_resched_lock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
2983 spin_unlock(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
2987 * btrfs_wait_on_delayed_iputs - wait on the delayed iputs to be done running
2988 * @fs_info - the fs_info for this fs
2989 * @return - EINTR if we were killed, 0 if nothing's pending
2998 int ret = wait_event_killable(fs_info->delayed_iputs_wait, in btrfs_wait_on_delayed_iputs()
2999 atomic_read(&fs_info->nr_delayed_iputs) == 0); in btrfs_wait_on_delayed_iputs()
3001 return -EINTR; in btrfs_wait_on_delayed_iputs()
3014 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_add()
3015 if (ret && ret != -EEXIST) { in btrfs_orphan_add()
3030 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_del()
3039 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_orphan_cleanup()
3048 if (cmpxchg(&root->orphan_cleanup_state, 0, ORPHAN_CLEANUP_STARTED)) in btrfs_orphan_cleanup()
3053 ret = -ENOMEM; in btrfs_orphan_cleanup()
3056 path->reada = READA_BACK; in btrfs_orphan_cleanup()
3060 key.offset = (u64)-1; in btrfs_orphan_cleanup()
3074 if (path->slots[0] == 0) in btrfs_orphan_cleanup()
3076 path->slots[0]--; in btrfs_orphan_cleanup()
3080 leaf = path->nodes[0]; in btrfs_orphan_cleanup()
3081 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_orphan_cleanup()
3101 ret = -EINVAL; in btrfs_orphan_cleanup()
3110 inode = btrfs_iget(fs_info->sb, last_objectid, root); in btrfs_orphan_cleanup()
3112 if (ret && ret != -ENOENT) in btrfs_orphan_cleanup()
3115 if (ret == -ENOENT && root == fs_info->tree_root) { in btrfs_orphan_cleanup()
3131 spin_lock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3132 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, in btrfs_orphan_cleanup()
3134 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) in btrfs_orphan_cleanup()
3136 spin_unlock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3140 key.offset = found_key.objectid - 1; in btrfs_orphan_cleanup()
3156 * only if this filesystem was last used on a pre-v3.12 kernel in btrfs_orphan_cleanup()
3165 if (ret == -ENOENT || inode->i_nlink) { in btrfs_orphan_cleanup()
3191 root->orphan_cleanup_state = ORPHAN_CLEANUP_DONE; in btrfs_orphan_cleanup()
3193 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { in btrfs_orphan_cleanup()
3233 *first_xattr_slot = -1; in acls_after_inode_item()
3243 if (*first_xattr_slot == -1) in acls_after_inode_item()
3273 if (*first_xattr_slot == -1) in acls_after_inode_item()
3279 * read an inode from the btree into the in-memory inode
3284 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_read_locked_inode()
3288 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_read_locked_inode()
3304 return -ENOMEM; in btrfs_read_locked_inode()
3307 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); in btrfs_read_locked_inode()
3316 leaf = path->nodes[0]; in btrfs_read_locked_inode()
3321 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_read_locked_inode()
3323 inode->i_mode = btrfs_inode_mode(leaf, inode_item); in btrfs_read_locked_inode()
3329 round_up(i_size_read(inode), fs_info->sectorsize)); in btrfs_read_locked_inode()
3331 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3332 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3334 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3335 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3337 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); in btrfs_read_locked_inode()
3338 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); in btrfs_read_locked_inode()
3340 BTRFS_I(inode)->i_otime.tv_sec = in btrfs_read_locked_inode()
3341 btrfs_timespec_sec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3342 BTRFS_I(inode)->i_otime.tv_nsec = in btrfs_read_locked_inode()
3343 btrfs_timespec_nsec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3346 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); in btrfs_read_locked_inode()
3347 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); in btrfs_read_locked_inode()
3351 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_read_locked_inode()
3352 inode->i_rdev = 0; in btrfs_read_locked_inode()
3355 BTRFS_I(inode)->index_cnt = (u64)-1; in btrfs_read_locked_inode()
3356 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); in btrfs_read_locked_inode()
3361 * and then re-read we need to do a full sync since we don't have any in btrfs_read_locked_inode()
3365 * This is required for both inode re-read from disk and delayed inode in btrfs_read_locked_inode()
3368 if (BTRFS_I(inode)->last_trans == fs_info->generation) in btrfs_read_locked_inode()
3370 &BTRFS_I(inode)->runtime_flags); in btrfs_read_locked_inode()
3385 * xfs_io -c fsync mydir/foo in btrfs_read_locked_inode()
3399 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3407 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3409 path->slots[0]++; in btrfs_read_locked_inode()
3410 if (inode->i_nlink != 1 || in btrfs_read_locked_inode()
3411 path->slots[0] >= btrfs_header_nritems(leaf)) in btrfs_read_locked_inode()
3414 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); in btrfs_read_locked_inode()
3418 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_read_locked_inode()
3423 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); in btrfs_read_locked_inode()
3428 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, in btrfs_read_locked_inode()
3436 maybe_acls = acls_after_inode_item(leaf, path->slots[0], in btrfs_read_locked_inode()
3438 if (first_xattr_slot != -1) { in btrfs_read_locked_inode()
3439 path->slots[0] = first_xattr_slot; in btrfs_read_locked_inode()
3445 root->root_key.objectid, ret); in btrfs_read_locked_inode()
3453 switch (inode->i_mode & S_IFMT) { in btrfs_read_locked_inode()
3455 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3456 inode->i_fop = &btrfs_file_operations; in btrfs_read_locked_inode()
3457 inode->i_op = &btrfs_file_inode_operations; in btrfs_read_locked_inode()
3460 inode->i_fop = &btrfs_dir_file_operations; in btrfs_read_locked_inode()
3461 inode->i_op = &btrfs_dir_inode_operations; in btrfs_read_locked_inode()
3464 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_read_locked_inode()
3466 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3469 inode->i_op = &btrfs_special_inode_operations; in btrfs_read_locked_inode()
3470 init_special_inode(inode, inode->i_mode, rdev); in btrfs_read_locked_inode()
3492 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); in fill_inode_item()
3493 btrfs_set_token_inode_mode(&token, item, inode->i_mode); in fill_inode_item()
3494 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); in fill_inode_item()
3496 btrfs_set_token_timespec_sec(&token, &item->atime, in fill_inode_item()
3497 inode->i_atime.tv_sec); in fill_inode_item()
3498 btrfs_set_token_timespec_nsec(&token, &item->atime, in fill_inode_item()
3499 inode->i_atime.tv_nsec); in fill_inode_item()
3501 btrfs_set_token_timespec_sec(&token, &item->mtime, in fill_inode_item()
3502 inode->i_mtime.tv_sec); in fill_inode_item()
3503 btrfs_set_token_timespec_nsec(&token, &item->mtime, in fill_inode_item()
3504 inode->i_mtime.tv_nsec); in fill_inode_item()
3506 btrfs_set_token_timespec_sec(&token, &item->ctime, in fill_inode_item()
3507 inode->i_ctime.tv_sec); in fill_inode_item()
3508 btrfs_set_token_timespec_nsec(&token, &item->ctime, in fill_inode_item()
3509 inode->i_ctime.tv_nsec); in fill_inode_item()
3511 btrfs_set_token_timespec_sec(&token, &item->otime, in fill_inode_item()
3512 BTRFS_I(inode)->i_otime.tv_sec); in fill_inode_item()
3513 btrfs_set_token_timespec_nsec(&token, &item->otime, in fill_inode_item()
3514 BTRFS_I(inode)->i_otime.tv_nsec); in fill_inode_item()
3518 BTRFS_I(inode)->generation); in fill_inode_item()
3520 btrfs_set_token_inode_transid(&token, item, trans->transid); in fill_inode_item()
3521 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); in fill_inode_item()
3522 btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); in fill_inode_item()
3527 * copy everything in the in-memory inode into the btree.
3539 return -ENOMEM; in btrfs_update_inode_item()
3541 path->leave_spinning = 1; in btrfs_update_inode_item()
3542 ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, in btrfs_update_inode_item()
3546 ret = -ENOENT; in btrfs_update_inode_item()
3550 leaf = path->nodes[0]; in btrfs_update_inode_item()
3551 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_update_inode_item()
3564 * copy everything in the in-memory inode into the btree.
3569 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_inode()
3577 * without delay in btrfs_update_inode()
3580 && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID in btrfs_update_inode()
3581 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { in btrfs_update_inode()
3600 if (ret == -ENOSPC) in btrfs_update_inode_fallback()
3616 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_unlink_inode()
3626 ret = -ENOMEM; in __btrfs_unlink_inode()
3630 path->leave_spinning = 1; in __btrfs_unlink_inode()
3632 name, name_len, -1); in __btrfs_unlink_inode()
3634 ret = di ? PTR_ERR(di) : -ENOENT; in __btrfs_unlink_inode()
3649 * that we delay to delete it, and just do this deletion when in __btrfs_unlink_inode()
3652 if (inode->dir_index) { in __btrfs_unlink_inode()
3655 index = inode->dir_index; in __btrfs_unlink_inode()
3678 if (ret != 0 && ret != -ENOENT) { in __btrfs_unlink_inode()
3685 if (ret == -ENOENT) in __btrfs_unlink_inode()
3692 * being run in btrfs-cleaner context. If we have enough of these built in __btrfs_unlink_inode()
3693 * up we can end up burning a lot of time in btrfs-cleaner without any in __btrfs_unlink_inode()
3705 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); in __btrfs_unlink_inode()
3706 inode_inc_iversion(&inode->vfs_inode); in __btrfs_unlink_inode()
3707 inode_inc_iversion(&dir->vfs_inode); in __btrfs_unlink_inode()
3708 inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = in __btrfs_unlink_inode()
3709 dir->vfs_inode.i_ctime = current_time(&inode->vfs_inode); in __btrfs_unlink_inode()
3710 ret = btrfs_update_inode(trans, root, &dir->vfs_inode); in __btrfs_unlink_inode()
3723 drop_nlink(&inode->vfs_inode); in btrfs_unlink_inode()
3724 ret = btrfs_update_inode(trans, root, &inode->vfs_inode); in btrfs_unlink_inode()
3739 struct btrfs_root *root = BTRFS_I(dir)->root; in __unlink_start_trans()
3753 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_unlink()
3766 BTRFS_I(d_inode(dentry)), dentry->d_name.name, in btrfs_unlink()
3767 dentry->d_name.len); in btrfs_unlink()
3771 if (inode->i_nlink == 0) { in btrfs_unlink()
3779 btrfs_btree_balance_dirty(root->fs_info); in btrfs_unlink()
3786 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_unlink_subvol()
3792 const char *name = dentry->d_name.name; in btrfs_unlink_subvol()
3793 int name_len = dentry->d_name.len; in btrfs_unlink_subvol()
3800 objectid = inode->root->root_key.objectid; in btrfs_unlink_subvol()
3802 objectid = inode->location.objectid; in btrfs_unlink_subvol()
3805 return -EINVAL; in btrfs_unlink_subvol()
3810 return -ENOMEM; in btrfs_unlink_subvol()
3813 name, name_len, -1); in btrfs_unlink_subvol()
3815 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_unlink_subvol()
3819 leaf = path->nodes[0]; in btrfs_unlink_subvol()
3833 * depending on btrfs_del_root_ref to return -ENOENT here is incorret. in btrfs_unlink_subvol()
3843 ret = -ENOENT; in btrfs_unlink_subvol()
3850 leaf = path->nodes[0]; in btrfs_unlink_subvol()
3851 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_unlink_subvol()
3856 root->root_key.objectid, dir_ino, in btrfs_unlink_subvol()
3870 btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); in btrfs_unlink_subvol()
3872 dir->i_mtime = dir->i_ctime = current_time(dir); in btrfs_unlink_subvol()
3887 struct btrfs_fs_info *fs_info = root->fs_info; in may_destroy_subvol()
3896 return -ENOMEM; in may_destroy_subvol()
3899 dir_id = btrfs_super_root_dir(fs_info->super_copy); in may_destroy_subvol()
3900 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, in may_destroy_subvol()
3903 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); in may_destroy_subvol()
3904 if (key.objectid == root->root_key.objectid) { in may_destroy_subvol()
3905 ret = -EPERM; in may_destroy_subvol()
3914 key.objectid = root->root_key.objectid; in may_destroy_subvol()
3916 key.offset = (u64)-1; in may_destroy_subvol()
3918 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in may_destroy_subvol()
3924 if (path->slots[0] > 0) { in may_destroy_subvol()
3925 path->slots[0]--; in may_destroy_subvol()
3926 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in may_destroy_subvol()
3927 if (key.objectid == root->root_key.objectid && in may_destroy_subvol()
3929 ret = -ENOTEMPTY; in may_destroy_subvol()
3939 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_prune_dentries()
3946 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_prune_dentries()
3947 WARN_ON(btrfs_root_refs(&root->root_item) != 0); in btrfs_prune_dentries()
3949 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
3951 node = root->inode_tree.rb_node; in btrfs_prune_dentries()
3958 node = node->rb_left; in btrfs_prune_dentries()
3960 node = node->rb_right; in btrfs_prune_dentries()
3977 inode = igrab(&entry->vfs_inode); in btrfs_prune_dentries()
3979 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
3980 if (atomic_read(&inode->i_count) > 1) in btrfs_prune_dentries()
3988 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
3992 if (cond_resched_lock(&root->inode_lock)) in btrfs_prune_dentries()
3997 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
4002 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); in btrfs_delete_subvolume()
4003 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_delete_subvolume()
4005 struct btrfs_root *dest = BTRFS_I(inode)->root; in btrfs_delete_subvolume()
4011 down_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4018 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4019 if (dest->send_in_progress) { in btrfs_delete_subvolume()
4020 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4023 dest->root_key.objectid); in btrfs_delete_subvolume()
4024 ret = -EPERM; in btrfs_delete_subvolume()
4027 if (atomic_read(&dest->nr_swapfiles)) { in btrfs_delete_subvolume()
4028 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4031 root->root_key.objectid); in btrfs_delete_subvolume()
4032 ret = -EPERM; in btrfs_delete_subvolume()
4035 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4036 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4038 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4059 trans->block_rsv = &block_rsv; in btrfs_delete_subvolume()
4060 trans->bytes_reserved = block_rsv.size; in btrfs_delete_subvolume()
4072 memset(&dest->root_item.drop_progress, 0, in btrfs_delete_subvolume()
4073 sizeof(dest->root_item.drop_progress)); in btrfs_delete_subvolume()
4074 dest->root_item.drop_level = 0; in btrfs_delete_subvolume()
4075 btrfs_set_root_refs(&dest->root_item, 0); in btrfs_delete_subvolume()
4077 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { in btrfs_delete_subvolume()
4079 fs_info->tree_root, in btrfs_delete_subvolume()
4080 dest->root_key.objectid); in btrfs_delete_subvolume()
4087 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, in btrfs_delete_subvolume()
4089 dest->root_key.objectid); in btrfs_delete_subvolume()
4090 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4094 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { in btrfs_delete_subvolume()
4096 dest->root_item.received_uuid, in btrfs_delete_subvolume()
4098 dest->root_key.objectid); in btrfs_delete_subvolume()
4099 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4105 free_anon_bdev(dest->anon_dev); in btrfs_delete_subvolume()
4106 dest->anon_dev = 0; in btrfs_delete_subvolume()
4108 trans->block_rsv = NULL; in btrfs_delete_subvolume()
4109 trans->bytes_reserved = 0; in btrfs_delete_subvolume()
4111 inode->i_flags |= S_DEAD; in btrfs_delete_subvolume()
4116 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4117 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4118 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4120 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4123 up_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4127 ASSERT(dest->send_in_progress == 0); in btrfs_delete_subvolume()
4130 if (dest->ino_cache_inode) { in btrfs_delete_subvolume()
4131 iput(dest->ino_cache_inode); in btrfs_delete_subvolume()
4132 dest->ino_cache_inode = NULL; in btrfs_delete_subvolume()
4143 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_rmdir()
4147 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rmdir()
4148 return -ENOTEMPTY; in btrfs_rmdir()
4165 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; in btrfs_rmdir()
4169 BTRFS_I(d_inode(dentry)), dentry->d_name.name, in btrfs_rmdir()
4170 dentry->d_name.len); in btrfs_rmdir()
4184 if (last_unlink_trans >= trans->transid) in btrfs_rmdir()
4185 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; in btrfs_rmdir()
4189 btrfs_btree_balance_dirty(root->fs_info); in btrfs_rmdir()
4216 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_truncate_inode_items()
4227 u32 found_type = (u8)-1; in btrfs_truncate_inode_items()
4232 int extent_type = -1; in btrfs_truncate_inode_items()
4238 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); in btrfs_truncate_inode_items()
4244 * For non-free space inodes and non-shareable roots, we want to back in btrfs_truncate_inode_items()
4249 test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_truncate_inode_items()
4254 return -ENOMEM; in btrfs_truncate_inode_items()
4255 path->reada = READA_BACK; in btrfs_truncate_inode_items()
4257 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4258 lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1, in btrfs_truncate_inode_items()
4267 fs_info->sectorsize), in btrfs_truncate_inode_items()
4268 (u64)-1, 0); in btrfs_truncate_inode_items()
4273 * we relog the inode, so if root != BTRFS_I(inode)->root, it means in btrfs_truncate_inode_items()
4277 if (min_type == 0 && root == BTRFS_I(inode)->root) in btrfs_truncate_inode_items()
4281 key.offset = (u64)-1; in btrfs_truncate_inode_items()
4282 key.type = (u8)-1; in btrfs_truncate_inode_items()
4292 ret = -EAGAIN; in btrfs_truncate_inode_items()
4296 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); in btrfs_truncate_inode_items()
4305 if (path->slots[0] == 0) in btrfs_truncate_inode_items()
4307 path->slots[0]--; in btrfs_truncate_inode_items()
4314 leaf = path->nodes[0]; in btrfs_truncate_inode_items()
4315 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_truncate_inode_items()
4326 fi = btrfs_item_ptr(leaf, path->slots[0], in btrfs_truncate_inode_items()
4341 BTRFS_I(inode), leaf, fi, path->slots[0], in btrfs_truncate_inode_items()
4344 item_end--; in btrfs_truncate_inode_items()
4369 extent_num_bytes = ALIGN(new_size - in btrfs_truncate_inode_items()
4371 fs_info->sectorsize); in btrfs_truncate_inode_items()
4372 clear_start = ALIGN(new_size, fs_info->sectorsize); in btrfs_truncate_inode_items()
4375 num_dec = (orig_num_bytes - in btrfs_truncate_inode_items()
4378 &root->state) && in btrfs_truncate_inode_items()
4386 extent_offset = found_key.offset - in btrfs_truncate_inode_items()
4394 &root->state)) in btrfs_truncate_inode_items()
4408 u32 size = (u32)(new_size - found_key.offset); in btrfs_truncate_inode_items()
4426 clear_len = fs_info->sectorsize; in btrfs_truncate_inode_items()
4429 if (test_bit(BTRFS_ROOT_SHAREABLE, &root->state)) in btrfs_truncate_inode_items()
4430 inode_sub_bytes(inode, item_end + 1 - new_size); in btrfs_truncate_inode_items()
4438 if (root == BTRFS_I(inode)->root) { in btrfs_truncate_inode_items()
4454 pending_del_slot = path->slots[0]; in btrfs_truncate_inode_items()
4457 path->slots[0] + 1 == pending_del_slot) { in btrfs_truncate_inode_items()
4460 pending_del_slot = path->slots[0]; in btrfs_truncate_inode_items()
4470 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4477 ref.real_root = root->root_key.objectid; in btrfs_truncate_inode_items()
4494 if (path->slots[0] == 0 || in btrfs_truncate_inode_items()
4495 path->slots[0] != pending_del_slot || in btrfs_truncate_inode_items()
4523 ret = -EAGAIN; in btrfs_truncate_inode_items()
4529 path->slots[0]--; in btrfs_truncate_inode_items()
4543 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { in btrfs_truncate_inode_items()
4548 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, in btrfs_truncate_inode_items()
4549 (u64)-1, &cached_state); in btrfs_truncate_inode_items()
4557 * btrfs_truncate_block - read, zero a chunk and write a block
4558 * @inode - inode that we're zeroing
4559 * @from - the offset to start zeroing
4560 * @len - the length to zero, 0 to zero the entire range respective to the
4562 * @front - zero up to the offset instead of from the offset on
4570 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_truncate_block()
4571 struct address_space *mapping = inode->i_mapping; in btrfs_truncate_block()
4572 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_truncate_block()
4578 u32 blocksize = fs_info->sectorsize; in btrfs_truncate_block()
4580 unsigned offset = from & (blocksize - 1); in btrfs_truncate_block()
4593 block_end = block_start + blocksize - 1; in btrfs_truncate_block()
4619 ret = -ENOMEM; in btrfs_truncate_block()
4626 if (page->mapping != mapping) { in btrfs_truncate_block()
4632 ret = -EIO; in btrfs_truncate_block()
4652 clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end, in btrfs_truncate_block()
4666 len = blocksize - offset; in btrfs_truncate_block()
4669 memset(kaddr + (block_start - page_offset(page)), in btrfs_truncate_block()
4672 memset(kaddr + (block_start - page_offset(page)) + offset, in btrfs_truncate_block()
4682 set_extent_bit(&BTRFS_I(inode)->io_tree, block_start, in btrfs_truncate_block()
4708 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in maybe_insert_hole()
4717 BTRFS_I(inode)->last_trans = fs_info->generation; in maybe_insert_hole()
4718 BTRFS_I(inode)->last_sub_trans = root->log_transid; in maybe_insert_hole()
4719 BTRFS_I(inode)->last_log_commit = root->last_log_commit; in maybe_insert_hole()
4724 * 1 - for the one we're dropping in maybe_insert_hole()
4725 * 1 - for the one we're adding in maybe_insert_hole()
4726 * 1 - for updating the inode. in maybe_insert_hole()
4757 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_cont_expand()
4758 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_cont_expand()
4759 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_cont_expand()
4762 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in btrfs_cont_expand()
4763 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); in btrfs_cont_expand()
4764 u64 block_end = ALIGN(size, fs_info->sectorsize); in btrfs_cont_expand()
4783 block_end - 1, &cached_state); in btrfs_cont_expand()
4787 block_end - cur_offset); in btrfs_cont_expand()
4794 last_byte = ALIGN(last_byte, fs_info->sectorsize); in btrfs_cont_expand()
4795 hole_size = last_byte - cur_offset; in btrfs_cont_expand()
4797 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_cont_expand()
4811 cur_offset + hole_size - 1, 0); in btrfs_cont_expand()
4815 &BTRFS_I(inode)->runtime_flags); in btrfs_cont_expand()
4818 hole_em->start = cur_offset; in btrfs_cont_expand()
4819 hole_em->len = hole_size; in btrfs_cont_expand()
4820 hole_em->orig_start = cur_offset; in btrfs_cont_expand()
4822 hole_em->block_start = EXTENT_MAP_HOLE; in btrfs_cont_expand()
4823 hole_em->block_len = 0; in btrfs_cont_expand()
4824 hole_em->orig_block_len = 0; in btrfs_cont_expand()
4825 hole_em->ram_bytes = hole_size; in btrfs_cont_expand()
4826 hole_em->compress_type = BTRFS_COMPRESS_NONE; in btrfs_cont_expand()
4827 hole_em->generation = fs_info->generation; in btrfs_cont_expand()
4830 write_lock(&em_tree->lock); in btrfs_cont_expand()
4832 write_unlock(&em_tree->lock); in btrfs_cont_expand()
4833 if (err != -EEXIST) in btrfs_cont_expand()
4838 hole_size - 1, 0); in btrfs_cont_expand()
4855 unlock_extent_cached(io_tree, hole_start, block_end - 1, &cached_state); in btrfs_cont_expand()
4861 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setsize()
4864 loff_t newsize = attr->ia_size; in btrfs_setsize()
4865 int mask = attr->ia_valid; in btrfs_setsize()
4877 inode->i_ctime = inode->i_mtime = in btrfs_setsize()
4885 * state of this file - if the snapshot captures this expanding in btrfs_setsize()
4889 btrfs_drew_write_lock(&root->snapshot_lock); in btrfs_setsize()
4892 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4898 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4906 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4917 &BTRFS_I(inode)->runtime_flags); in btrfs_setsize()
4924 if (ret && inode->i_nlink) { in btrfs_setsize()
4928 * Truncate failed, so fix up the in-memory size. We in btrfs_setsize()
4931 * in-memory size to match. in btrfs_setsize()
4933 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_setsize()
4936 i_size_write(inode, BTRFS_I(inode)->disk_i_size); in btrfs_setsize()
4946 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setattr()
4950 return -EROFS; in btrfs_setattr()
4956 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in btrfs_setattr()
4962 if (attr->ia_valid) { in btrfs_setattr()
4967 if (!err && attr->ia_valid & ATTR_MODE) in btrfs_setattr()
4968 err = posix_acl_chmod(inode, inode->i_mode); in btrfs_setattr()
4988 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in evict_inode_truncate_pages()
4989 struct extent_map_tree *map_tree = &BTRFS_I(inode)->extent_tree; in evict_inode_truncate_pages()
4992 ASSERT(inode->i_state & I_FREEING); in evict_inode_truncate_pages()
4993 truncate_inode_pages_final(&inode->i_data); in evict_inode_truncate_pages()
4995 write_lock(&map_tree->lock); in evict_inode_truncate_pages()
4996 while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { in evict_inode_truncate_pages()
4999 node = rb_first_cached(&map_tree->map); in evict_inode_truncate_pages()
5001 clear_bit(EXTENT_FLAG_PINNED, &em->flags); in evict_inode_truncate_pages()
5002 clear_bit(EXTENT_FLAG_LOGGING, &em->flags); in evict_inode_truncate_pages()
5006 write_unlock(&map_tree->lock); in evict_inode_truncate_pages()
5008 write_lock(&map_tree->lock); in evict_inode_truncate_pages()
5011 write_unlock(&map_tree->lock); in evict_inode_truncate_pages()
5021 * queue kthread), inode references (inode->i_count) were not taken in evict_inode_truncate_pages()
5025 * reference count - if we don't do it, when they access the inode's in evict_inode_truncate_pages()
5027 * use-after-free issue. in evict_inode_truncate_pages()
5029 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5030 while (!RB_EMPTY_ROOT(&io_tree->state)) { in evict_inode_truncate_pages()
5037 node = rb_first(&io_tree->state); in evict_inode_truncate_pages()
5039 start = state->start; in evict_inode_truncate_pages()
5040 end = state->end; in evict_inode_truncate_pages()
5041 state_flags = state->state; in evict_inode_truncate_pages()
5042 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5056 end - start + 1); in evict_inode_truncate_pages()
5064 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5066 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5072 struct btrfs_fs_info *fs_info = root->fs_info; in evict_refill_and_join()
5073 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; in evict_refill_and_join()
5085 * above. We reserve our extra bit here because we generate a ton of in evict_refill_and_join()
5091 ret = btrfs_block_rsv_refill(root, rsv, rsv->size + delayed_refs_extra, in evict_refill_and_join()
5099 btrfs_block_rsv_migrate(global_rsv, rsv, rsv->size, 0)) { in evict_refill_and_join()
5102 return ERR_PTR(-ENOSPC); in evict_refill_and_join()
5112 trans->block_rsv = &fs_info->trans_block_rsv; in evict_refill_and_join()
5113 trans->bytes_reserved = delayed_refs_extra; in evict_refill_and_join()
5114 btrfs_block_rsv_migrate(rsv, trans->block_rsv, in evict_refill_and_join()
5122 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_evict_inode()
5124 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_evict_inode()
5137 if (inode->i_nlink && in btrfs_evict_inode()
5138 ((btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5139 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || in btrfs_evict_inode()
5146 btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); in btrfs_evict_inode()
5148 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) in btrfs_evict_inode()
5151 if (inode->i_nlink > 0) { in btrfs_evict_inode()
5152 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5153 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); in btrfs_evict_inode()
5164 rsv->size = btrfs_calc_metadata_size(fs_info, 1); in btrfs_evict_inode()
5165 rsv->failfast = 1; in btrfs_evict_inode()
5174 trans->block_rsv = rsv; in btrfs_evict_inode()
5177 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5180 if (ret && ret != -ENOSPC && ret != -EAGAIN) in btrfs_evict_inode()
5197 trans->block_rsv = rsv; in btrfs_evict_inode()
5199 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5203 if (!(root == fs_info->tree_root || in btrfs_evict_inode()
5204 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) in btrfs_evict_inode()
5223 * If no dir entries were found, returns -ENOENT.
5224 * If found a corrupted location in dir entry, returns -EUCLEAN.
5229 const char *name = dentry->d_name.name; in btrfs_inode_by_name()
5230 int namelen = dentry->d_name.len; in btrfs_inode_by_name()
5233 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_inode_by_name()
5238 return -ENOMEM; in btrfs_inode_by_name()
5243 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_inode_by_name()
5247 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); in btrfs_inode_by_name()
5248 if (location->type != BTRFS_INODE_ITEM_KEY && in btrfs_inode_by_name()
5249 location->type != BTRFS_ROOT_ITEM_KEY) { in btrfs_inode_by_name()
5250 ret = -EUCLEAN; in btrfs_inode_by_name()
5251 btrfs_warn(root->fs_info, in btrfs_inode_by_name()
5254 location->objectid, location->type, location->offset); in btrfs_inode_by_name()
5257 *type = btrfs_dir_type(path->nodes[0], di); in btrfs_inode_by_name()
5284 err = -ENOMEM; in fixup_tree_root_location()
5288 err = -ENOENT; in fixup_tree_root_location()
5289 key.objectid = BTRFS_I(dir)->root->root_key.objectid; in fixup_tree_root_location()
5291 key.offset = location->objectid; in fixup_tree_root_location()
5293 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in fixup_tree_root_location()
5300 leaf = path->nodes[0]; in fixup_tree_root_location()
5301 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); in fixup_tree_root_location()
5303 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) in fixup_tree_root_location()
5306 ret = memcmp_extent_buffer(leaf, dentry->d_name.name, in fixup_tree_root_location()
5308 dentry->d_name.len); in fixup_tree_root_location()
5314 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); in fixup_tree_root_location()
5321 location->objectid = btrfs_root_dirid(&new_root->root_item); in fixup_tree_root_location()
5322 location->type = BTRFS_INODE_ITEM_KEY; in fixup_tree_root_location()
5323 location->offset = 0; in fixup_tree_root_location()
5332 struct btrfs_root *root = BTRFS_I(inode)->root; in inode_tree_add()
5336 struct rb_node *new = &BTRFS_I(inode)->rb_node; in inode_tree_add()
5342 spin_lock(&root->inode_lock); in inode_tree_add()
5343 p = &root->inode_tree.rb_node; in inode_tree_add()
5349 p = &parent->rb_left; in inode_tree_add()
5351 p = &parent->rb_right; in inode_tree_add()
5353 WARN_ON(!(entry->vfs_inode.i_state & in inode_tree_add()
5355 rb_replace_node(parent, new, &root->inode_tree); in inode_tree_add()
5357 spin_unlock(&root->inode_lock); in inode_tree_add()
5362 rb_insert_color(new, &root->inode_tree); in inode_tree_add()
5363 spin_unlock(&root->inode_lock); in inode_tree_add()
5368 struct btrfs_root *root = inode->root; in inode_tree_del()
5371 spin_lock(&root->inode_lock); in inode_tree_del()
5372 if (!RB_EMPTY_NODE(&inode->rb_node)) { in inode_tree_del()
5373 rb_erase(&inode->rb_node, &root->inode_tree); in inode_tree_del()
5374 RB_CLEAR_NODE(&inode->rb_node); in inode_tree_del()
5375 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5377 spin_unlock(&root->inode_lock); in inode_tree_del()
5379 if (empty && btrfs_root_refs(&root->root_item) == 0) { in inode_tree_del()
5380 spin_lock(&root->inode_lock); in inode_tree_del()
5381 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5382 spin_unlock(&root->inode_lock); in inode_tree_del()
5393 inode->i_ino = args->ino; in btrfs_init_locked_inode()
5394 BTRFS_I(inode)->location.objectid = args->ino; in btrfs_init_locked_inode()
5395 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; in btrfs_init_locked_inode()
5396 BTRFS_I(inode)->location.offset = 0; in btrfs_init_locked_inode()
5397 BTRFS_I(inode)->root = btrfs_grab_root(args->root); in btrfs_init_locked_inode()
5398 BUG_ON(args->root && !BTRFS_I(inode)->root); in btrfs_init_locked_inode()
5406 return args->ino == BTRFS_I(inode)->location.objectid && in btrfs_find_actor()
5407 args->root == BTRFS_I(inode)->root; in btrfs_find_actor()
5439 return ERR_PTR(-ENOMEM); in btrfs_iget_path()
5441 if (inode->i_state & I_NEW) { in btrfs_iget_path()
5456 ret = -ENOENT; in btrfs_iget_path()
5476 return ERR_PTR(-ENOMEM); in new_simple_dir()
5478 BTRFS_I(inode)->root = btrfs_grab_root(root); in new_simple_dir()
5479 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); in new_simple_dir()
5480 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); in new_simple_dir()
5482 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; in new_simple_dir()
5484 * We only need lookup, the rest is read-only and there's no inode in new_simple_dir()
5487 inode->i_op = &simple_dir_inode_operations; in new_simple_dir()
5488 inode->i_opflags &= ~IOP_XATTR; in new_simple_dir()
5489 inode->i_fop = &simple_dir_operations; in new_simple_dir()
5490 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; in new_simple_dir()
5491 inode->i_mtime = current_time(inode); in new_simple_dir()
5492 inode->i_atime = inode->i_mtime; in new_simple_dir()
5493 inode->i_ctime = inode->i_mtime; in new_simple_dir()
5494 BTRFS_I(inode)->i_otime = inode->i_mtime; in new_simple_dir()
5502 * Compile-time asserts that generic FT_* types still match in btrfs_inode_type()
5514 return fs_umode_to_ftype(inode->i_mode); in btrfs_inode_type()
5519 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_lookup_dentry()
5521 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_lookup_dentry()
5527 if (dentry->d_name.len > BTRFS_NAME_LEN) in btrfs_lookup_dentry()
5528 return ERR_PTR(-ENAMETOOLONG); in btrfs_lookup_dentry()
5535 inode = btrfs_iget(dir->i_sb, location.objectid, root); in btrfs_lookup_dentry()
5539 /* Do extra check against inode mode with di_type */ in btrfs_lookup_dentry()
5543 inode->i_mode, btrfs_inode_type(inode), in btrfs_lookup_dentry()
5546 return ERR_PTR(-EUCLEAN); in btrfs_lookup_dentry()
5554 if (ret != -ENOENT) in btrfs_lookup_dentry()
5557 inode = new_simple_dir(dir->i_sb, &location, sub_root); in btrfs_lookup_dentry()
5559 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); in btrfs_lookup_dentry()
5565 down_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5566 if (!sb_rdonly(inode->i_sb)) in btrfs_lookup_dentry()
5568 up_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5584 inode = d_inode(dentry->d_parent); in btrfs_dentry_delete()
5587 root = BTRFS_I(inode)->root; in btrfs_dentry_delete()
5588 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_dentry_delete()
5602 if (inode == ERR_PTR(-ENOENT)) in btrfs_lookup()
5622 return -ENOMEM; in btrfs_opendir()
5623 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); in btrfs_opendir()
5624 if (!private->filldir_buf) { in btrfs_opendir()
5626 return -ENOMEM; in btrfs_opendir()
5628 file->private_data = private; in btrfs_opendir()
5641 while (entries--) { in btrfs_filldir()
5645 ctx->pos = get_unaligned(&entry->offset); in btrfs_filldir()
5646 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), in btrfs_filldir()
5647 get_unaligned(&entry->ino), in btrfs_filldir()
5648 get_unaligned(&entry->type))) in btrfs_filldir()
5651 get_unaligned(&entry->name_len); in btrfs_filldir()
5652 ctx->pos++; in btrfs_filldir()
5660 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_real_readdir()
5661 struct btrfs_file_private *private = file->private_data; in btrfs_real_readdir()
5684 return -ENOMEM; in btrfs_real_readdir()
5686 addr = private->filldir_buf; in btrfs_real_readdir()
5687 path->reada = READA_FORWARD; in btrfs_real_readdir()
5695 key.offset = ctx->pos; in btrfs_real_readdir()
5705 leaf = path->nodes[0]; in btrfs_real_readdir()
5706 slot = path->slots[0]; in btrfs_real_readdir()
5722 if (found_key.offset < ctx->pos) in btrfs_real_readdir()
5731 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5734 addr = private->filldir_buf; in btrfs_real_readdir()
5741 put_unaligned(name_len, &entry->name_len); in btrfs_real_readdir()
5746 &entry->type); in btrfs_real_readdir()
5748 put_unaligned(location.objectid, &entry->ino); in btrfs_real_readdir()
5749 put_unaligned(found_key.offset, &entry->offset); in btrfs_real_readdir()
5754 path->slots[0]++; in btrfs_real_readdir()
5758 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5774 * they're returned by readdir. Until we re-use freed offsets in btrfs_real_readdir()
5783 if (ctx->pos >= INT_MAX) in btrfs_real_readdir()
5784 ctx->pos = LLONG_MAX; in btrfs_real_readdir()
5786 ctx->pos = INT_MAX; in btrfs_real_readdir()
5804 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dirty_inode()
5805 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_dirty_inode()
5809 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) in btrfs_dirty_inode()
5817 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { in btrfs_dirty_inode()
5827 if (BTRFS_I(inode)->delayed_node) in btrfs_dirty_inode()
5840 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_update_time()
5844 return -EROFS; in btrfs_update_time()
5849 inode->i_ctime = *now; in btrfs_update_time()
5851 inode->i_mtime = *now; in btrfs_update_time()
5853 inode->i_atime = *now; in btrfs_update_time()
5859 * and then set the in-memory index_cnt variable to reflect
5864 struct btrfs_root *root = inode->root; in btrfs_set_inode_index_count()
5872 key.offset = (u64)-1; in btrfs_set_inode_index_count()
5876 return -ENOMEM; in btrfs_set_inode_index_count()
5892 if (path->slots[0] == 0) { in btrfs_set_inode_index_count()
5893 inode->index_cnt = 2; in btrfs_set_inode_index_count()
5897 path->slots[0]--; in btrfs_set_inode_index_count()
5899 leaf = path->nodes[0]; in btrfs_set_inode_index_count()
5900 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_set_inode_index_count()
5904 inode->index_cnt = 2; in btrfs_set_inode_index_count()
5908 inode->index_cnt = found_key.offset + 1; in btrfs_set_inode_index_count()
5922 if (dir->index_cnt == (u64)-1) { in btrfs_set_inode_index()
5931 *index = dir->index_cnt; in btrfs_set_inode_index()
5932 dir->index_cnt++; in btrfs_set_inode_index()
5941 args.ino = BTRFS_I(inode)->location.objectid; in btrfs_insert_inode_locked()
5942 args.root = BTRFS_I(inode)->root; in btrfs_insert_inode_locked()
5945 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), in btrfs_insert_inode_locked()
5961 flags = BTRFS_I(dir)->flags; in btrfs_inherit_iflags()
5964 BTRFS_I(inode)->flags &= ~BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
5965 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
5967 BTRFS_I(inode)->flags &= ~BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
5968 BTRFS_I(inode)->flags |= BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
5972 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW; in btrfs_inherit_iflags()
5973 if (S_ISREG(inode->i_mode)) in btrfs_inherit_iflags()
5974 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_inherit_iflags()
5987 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_inode()
6002 return ERR_PTR(-ENOMEM); in btrfs_new_inode()
6005 inode = new_inode(fs_info->sb); in btrfs_new_inode()
6009 return ERR_PTR(-ENOMEM); in btrfs_new_inode()
6023 inode->i_ino = objectid; in btrfs_new_inode()
6042 BTRFS_I(inode)->index_cnt = 2; in btrfs_new_inode()
6043 BTRFS_I(inode)->dir_index = *index; in btrfs_new_inode()
6044 BTRFS_I(inode)->root = btrfs_grab_root(root); in btrfs_new_inode()
6045 BTRFS_I(inode)->generation = trans->transid; in btrfs_new_inode()
6046 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_new_inode()
6054 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); in btrfs_new_inode()
6076 location = &BTRFS_I(inode)->location; in btrfs_new_inode()
6077 location->objectid = objectid; in btrfs_new_inode()
6078 location->offset = 0; in btrfs_new_inode()
6079 location->type = BTRFS_INODE_ITEM_KEY; in btrfs_new_inode()
6087 path->leave_spinning = 1; in btrfs_new_inode()
6095 inode->i_mtime = current_time(inode); in btrfs_new_inode()
6096 inode->i_atime = inode->i_mtime; in btrfs_new_inode()
6097 inode->i_ctime = inode->i_mtime; in btrfs_new_inode()
6098 BTRFS_I(inode)->i_otime = inode->i_mtime; in btrfs_new_inode()
6100 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_new_inode()
6102 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, in btrfs_new_inode()
6104 fill_inode_item(trans, path->nodes[0], inode_item, inode); in btrfs_new_inode()
6107 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, in btrfs_new_inode()
6109 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len); in btrfs_new_inode()
6110 btrfs_set_inode_ref_index(path->nodes[0], ref, *index); in btrfs_new_inode()
6112 write_extent_buffer(path->nodes[0], name, ptr, name_len); in btrfs_new_inode()
6115 btrfs_mark_buffer_dirty(path->nodes[0]); in btrfs_new_inode()
6122 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_new_inode()
6124 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | in btrfs_new_inode()
6139 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, ret); in btrfs_new_inode()
6147 BTRFS_I(dir)->index_cnt--; in btrfs_new_inode()
6164 struct btrfs_root *root = parent_inode->root; in btrfs_add_link()
6169 memcpy(&key, &inode->root->root_key, sizeof(key)); in btrfs_add_link()
6178 root->root_key.objectid, parent_ino, in btrfs_add_link()
6190 btrfs_inode_type(&inode->vfs_inode), index); in btrfs_add_link()
6191 if (ret == -EEXIST || ret == -EOVERFLOW) in btrfs_add_link()
6198 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + in btrfs_add_link()
6200 inode_inc_iversion(&parent_inode->vfs_inode); in btrfs_add_link()
6207 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) { in btrfs_add_link()
6208 struct timespec64 now = current_time(&parent_inode->vfs_inode); in btrfs_add_link()
6210 parent_inode->vfs_inode.i_mtime = now; in btrfs_add_link()
6211 parent_inode->vfs_inode.i_ctime = now; in btrfs_add_link()
6213 ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); in btrfs_add_link()
6223 root->root_key.objectid, parent_ino, in btrfs_add_link()
6246 dentry->d_name.name, dentry->d_name.len, in btrfs_add_nondir()
6249 err = -EEXIST; in btrfs_add_nondir()
6256 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_mknod()
6258 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_mknod()
6277 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_mknod()
6278 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_mknod()
6292 inode->i_op = &btrfs_special_inode_operations; in btrfs_mknod()
6293 init_special_inode(inode, inode->i_mode, rdev); in btrfs_mknod()
6295 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_mknod()
6320 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create()
6322 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_create()
6341 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_create()
6342 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_create()
6355 inode->i_fop = &btrfs_file_operations; in btrfs_create()
6356 inode->i_op = &btrfs_file_inode_operations; in btrfs_create()
6357 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_create()
6359 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_create()
6388 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_link()
6390 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_link()
6396 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) in btrfs_link()
6397 return -EXDEV; in btrfs_link()
6399 if (inode->i_nlink >= BTRFS_LINK_MAX) in btrfs_link()
6400 return -EMLINK; in btrfs_link()
6412 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); in btrfs_link()
6420 BTRFS_I(inode)->dir_index = 0ULL; in btrfs_link()
6423 inode->i_ctime = current_time(inode); in btrfs_link()
6425 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); in btrfs_link()
6433 struct dentry *parent = dentry->d_parent; in btrfs_link()
6438 if (inode->i_nlink == 1) { in btrfs_link()
6464 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_mkdir()
6467 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_mkdir()
6485 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_mkdir()
6486 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), objectid, in btrfs_mkdir()
6495 inode->i_op = &btrfs_dir_inode_operations; in btrfs_mkdir()
6496 inode->i_fop = &btrfs_dir_file_operations; in btrfs_mkdir()
6498 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_mkdir()
6508 dentry->d_name.name, in btrfs_mkdir()
6509 dentry->d_name.len, 0, index); in btrfs_mkdir()
6531 struct extent_buffer *leaf = path->nodes[0]; in uncompress_inline()
6542 btrfs_item_nr(path->slots[0])); in uncompress_inline()
6545 return -ENOMEM; in uncompress_inline()
6564 memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); in uncompress_inline()
6572 * btrfs_get_extent - Lookup the first extent overlapping a range in a file.
6580 * range, reading it from the B-tree and caching it if necessary. Note that
6587 * Return: ERR_PTR on error, non-NULL extent_map on success.
6593 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_get_extent()
6598 int extent_type = -1; in btrfs_get_extent()
6600 struct btrfs_root *root = inode->root; in btrfs_get_extent()
6605 struct extent_map_tree *em_tree = &inode->extent_tree; in btrfs_get_extent()
6606 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_get_extent()
6608 read_lock(&em_tree->lock); in btrfs_get_extent()
6610 read_unlock(&em_tree->lock); in btrfs_get_extent()
6613 if (em->start > start || em->start + em->len <= start) in btrfs_get_extent()
6615 else if (em->block_start == EXTENT_MAP_INLINE && page) in btrfs_get_extent()
6622 ret = -ENOMEM; in btrfs_get_extent()
6625 em->start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6626 em->orig_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6627 em->len = (u64)-1; in btrfs_get_extent()
6628 em->block_len = (u64)-1; in btrfs_get_extent()
6632 ret = -ENOMEM; in btrfs_get_extent()
6637 path->reada = READA_FORWARD; in btrfs_get_extent()
6643 path->leave_spinning = 1; in btrfs_get_extent()
6645 path->recurse = btrfs_is_free_space_inode(inode); in btrfs_get_extent()
6651 if (path->slots[0] == 0) in btrfs_get_extent()
6653 path->slots[0]--; in btrfs_get_extent()
6657 leaf = path->nodes[0]; in btrfs_get_extent()
6658 item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_get_extent()
6660 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6679 if (!S_ISREG(inode->vfs_inode.i_mode)) { in btrfs_get_extent()
6680 ret = -EUCLEAN; in btrfs_get_extent()
6682 "regular/prealloc extent found for non-regular inode %llu", in btrfs_get_extent()
6690 path->slots[0], in btrfs_get_extent()
6695 path->slots[0]++; in btrfs_get_extent()
6696 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_get_extent()
6703 leaf = path->nodes[0]; in btrfs_get_extent()
6705 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6715 em->start = start; in btrfs_get_extent()
6716 em->orig_start = start; in btrfs_get_extent()
6717 em->len = found_key.offset - start; in btrfs_get_extent()
6718 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6738 extent_offset = page_offset(page) + pg_offset - extent_start; in btrfs_get_extent()
6739 copy_size = min_t(u64, PAGE_SIZE - pg_offset, in btrfs_get_extent()
6740 size - extent_offset); in btrfs_get_extent()
6741 em->start = extent_start + extent_offset; in btrfs_get_extent()
6742 em->len = ALIGN(copy_size, fs_info->sectorsize); in btrfs_get_extent()
6743 em->orig_block_len = em->len; in btrfs_get_extent()
6744 em->orig_start = em->start; in btrfs_get_extent()
6761 PAGE_SIZE - pg_offset - in btrfs_get_extent()
6768 set_extent_uptodate(io_tree, em->start, in btrfs_get_extent()
6769 extent_map_end(em) - 1, NULL, GFP_NOFS); in btrfs_get_extent()
6773 em->start = start; in btrfs_get_extent()
6774 em->orig_start = start; in btrfs_get_extent()
6775 em->len = len; in btrfs_get_extent()
6776 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6780 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent()
6783 em->start, em->len, start, len); in btrfs_get_extent()
6784 ret = -EIO; in btrfs_get_extent()
6788 write_lock(&em_tree->lock); in btrfs_get_extent()
6790 write_unlock(&em_tree->lock); in btrfs_get_extent()
6819 * - a hole or in btrfs_get_extent_fiemap()
6820 * - a pre-alloc extent, in btrfs_get_extent_fiemap()
6823 if (em->block_start != EXTENT_MAP_HOLE && in btrfs_get_extent_fiemap()
6824 !test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_extent_fiemap()
6829 /* check to see if we've wrapped (len == -1 or similar) */ in btrfs_get_extent_fiemap()
6832 end = (u64)-1; in btrfs_get_extent_fiemap()
6834 end -= 1; in btrfs_get_extent_fiemap()
6839 delalloc_len = count_range_bits(&inode->io_tree, &delalloc_start, in btrfs_get_extent_fiemap()
6843 delalloc_end = (u64)-1; in btrfs_get_extent_fiemap()
6860 delalloc_len = delalloc_end - delalloc_start; in btrfs_get_extent_fiemap()
6869 err = -ENOMEM; in btrfs_get_extent_fiemap()
6881 if (hole_end <= start || hole_em->start > end) { in btrfs_get_extent_fiemap()
6885 hole_start = max(hole_em->start, start); in btrfs_get_extent_fiemap()
6886 hole_len = hole_end - hole_start; in btrfs_get_extent_fiemap()
6895 em->len = min(hole_len, delalloc_start - hole_start); in btrfs_get_extent_fiemap()
6896 em->start = hole_start; in btrfs_get_extent_fiemap()
6897 em->orig_start = hole_start; in btrfs_get_extent_fiemap()
6902 em->block_start = hole_em->block_start; in btrfs_get_extent_fiemap()
6903 em->block_len = hole_len; in btrfs_get_extent_fiemap()
6904 if (test_bit(EXTENT_FLAG_PREALLOC, &hole_em->flags)) in btrfs_get_extent_fiemap()
6905 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in btrfs_get_extent_fiemap()
6911 em->start = delalloc_start; in btrfs_get_extent_fiemap()
6912 em->len = delalloc_len; in btrfs_get_extent_fiemap()
6913 em->orig_start = delalloc_start; in btrfs_get_extent_fiemap()
6914 em->block_start = EXTENT_MAP_DELALLOC; in btrfs_get_extent_fiemap()
6915 em->block_len = delalloc_len; in btrfs_get_extent_fiemap()
6956 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); in btrfs_create_dio_extent()
6968 struct btrfs_root *root = inode->root; in btrfs_new_extent_direct()
6969 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_extent_direct()
6976 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, in btrfs_new_extent_direct()
6999 * @orig_len: (optional) Return the original on-disk length of the file extent
7019 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in can_nocow_extent()
7023 struct btrfs_root *root = BTRFS_I(inode)->root; in can_nocow_extent()
7024 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in can_nocow_extent()
7033 bool nocow = (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW); in can_nocow_extent()
7037 return -ENOMEM; in can_nocow_extent()
7044 slot = path->slots[0]; in can_nocow_extent()
7051 slot--; in can_nocow_extent()
7054 leaf = path->nodes[0]; in can_nocow_extent()
7097 btrfs_root_last_snapshot(&root->root_item))) in can_nocow_extent()
7103 *orig_start = key.offset - backref_offset; in can_nocow_extent()
7111 num_bytes = min(offset + *len, extent_end) - offset; in can_nocow_extent()
7116 root->fs_info->sectorsize) - 1; in can_nocow_extent()
7120 ret = -EAGAIN; in can_nocow_extent()
7133 key.offset - backref_offset, disk_bytenr, in can_nocow_extent()
7147 disk_bytenr += offset - key.offset; in can_nocow_extent()
7168 lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, in lock_extent_direct()
7176 lockend - lockstart + 1); in lock_extent_direct()
7186 (!writing || !filemap_range_has_page(inode->i_mapping, in lock_extent_direct()
7190 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, in lock_extent_direct()
7210 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) in lock_extent_direct()
7213 ret = -ENOTBLK; in lock_extent_direct()
7229 ret = -ENOTBLK; in lock_extent_direct()
7257 em_tree = &inode->extent_tree; in create_io_em()
7260 return ERR_PTR(-ENOMEM); in create_io_em()
7262 em->start = start; in create_io_em()
7263 em->orig_start = orig_start; in create_io_em()
7264 em->len = len; in create_io_em()
7265 em->block_len = block_len; in create_io_em()
7266 em->block_start = block_start; in create_io_em()
7267 em->orig_block_len = orig_block_len; in create_io_em()
7268 em->ram_bytes = ram_bytes; in create_io_em()
7269 em->generation = -1; in create_io_em()
7270 set_bit(EXTENT_FLAG_PINNED, &em->flags); in create_io_em()
7272 set_bit(EXTENT_FLAG_FILLING, &em->flags); in create_io_em()
7274 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in create_io_em()
7275 em->compress_type = compress_type; in create_io_em()
7279 btrfs_drop_extent_cache(inode, em->start, in create_io_em()
7280 em->start + em->len - 1, 0); in create_io_em()
7281 write_lock(&em_tree->lock); in create_io_em()
7283 write_unlock(&em_tree->lock); in create_io_em()
7288 } while (ret == -EEXIST); in create_io_em()
7305 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_get_blocks_direct_write()
7318 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || in btrfs_get_blocks_direct_write()
7319 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in btrfs_get_blocks_direct_write()
7320 em->block_start != EXTENT_MAP_HOLE)) { in btrfs_get_blocks_direct_write()
7324 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_blocks_direct_write()
7328 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7329 block_start = em->block_start + (start - em->start); in btrfs_get_blocks_direct_write()
7368 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7378 dio_data->reserve -= len; in btrfs_get_blocks_direct_write()
7387 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dio_iomap_begin()
7396 bool sync = (current->journal_info == BTRFS_DIO_SYNC_STUB); in btrfs_dio_iomap_begin()
7399 * We used current->journal_info here to see if we were sync, but in btrfs_dio_iomap_begin()
7401 * we have a journal_info set, so we need to clear this out and re-set in btrfs_dio_iomap_begin()
7404 ASSERT(current->journal_info == NULL || in btrfs_dio_iomap_begin()
7405 current->journal_info == BTRFS_DIO_SYNC_STUB); in btrfs_dio_iomap_begin()
7406 current->journal_info = NULL; in btrfs_dio_iomap_begin()
7409 len = min_t(u64, len, fs_info->sectorsize); in btrfs_dio_iomap_begin()
7412 lockend = start + len - 1; in btrfs_dio_iomap_begin()
7421 &BTRFS_I(inode)->runtime_flags)) { in btrfs_dio_iomap_begin()
7422 ret = filemap_fdatawrite_range(inode->i_mapping, start, in btrfs_dio_iomap_begin()
7423 start + length - 1); in btrfs_dio_iomap_begin()
7430 return -ENOMEM; in btrfs_dio_iomap_begin()
7432 dio_data->sync = sync; in btrfs_dio_iomap_begin()
7433 dio_data->length = length; in btrfs_dio_iomap_begin()
7435 dio_data->reserve = round_up(length, fs_info->sectorsize); in btrfs_dio_iomap_begin()
7437 &dio_data->data_reserved, in btrfs_dio_iomap_begin()
7438 start, dio_data->reserve); in btrfs_dio_iomap_begin()
7440 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7445 iomap->private = dio_data; in btrfs_dio_iomap_begin()
7453 ret = -ENOTBLK; in btrfs_dio_iomap_begin()
7473 * We return -ENOTBLK because that's what makes DIO go ahead and go back in btrfs_dio_iomap_begin()
7477 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || in btrfs_dio_iomap_begin()
7478 em->block_start == EXTENT_MAP_INLINE) { in btrfs_dio_iomap_begin()
7481 * If we are in a NOWAIT context, return -EAGAIN in order to in btrfs_dio_iomap_begin()
7485 * space - this happens if we were able to read some data from in btrfs_dio_iomap_begin()
7486 * previous non-compressed extents and then when we fallback to in btrfs_dio_iomap_begin()
7490 * of bytes previously read is > 0, so it does not return -EFAULT). in btrfs_dio_iomap_begin()
7492 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; in btrfs_dio_iomap_begin()
7496 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7504 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7516 unlock_extent_cached(&BTRFS_I(inode)->io_tree, in btrfs_dio_iomap_begin()
7526 if ((em->block_start == EXTENT_MAP_HOLE) || in btrfs_dio_iomap_begin()
7527 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { in btrfs_dio_iomap_begin()
7528 iomap->addr = IOMAP_NULL_ADDR; in btrfs_dio_iomap_begin()
7529 iomap->type = IOMAP_HOLE; in btrfs_dio_iomap_begin()
7531 iomap->addr = em->block_start + (start - em->start); in btrfs_dio_iomap_begin()
7532 iomap->type = IOMAP_MAPPED; in btrfs_dio_iomap_begin()
7534 iomap->offset = start; in btrfs_dio_iomap_begin()
7535 iomap->bdev = fs_info->fs_devices->latest_bdev; in btrfs_dio_iomap_begin()
7536 iomap->length = len; in btrfs_dio_iomap_begin()
7543 unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7548 dio_data->data_reserved, start, in btrfs_dio_iomap_begin()
7549 dio_data->reserve, true); in btrfs_dio_iomap_begin()
7550 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->reserve); in btrfs_dio_iomap_begin()
7551 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7561 struct btrfs_dio_data *dio_data = iomap->private; in btrfs_dio_iomap_end()
7562 size_t submitted = dio_data->submitted; in btrfs_dio_iomap_end()
7565 if (!write && (iomap->type == IOMAP_HOLE)) { in btrfs_dio_iomap_end()
7567 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1); in btrfs_dio_iomap_end()
7573 length -= submitted; in btrfs_dio_iomap_end()
7578 unlock_extent(&BTRFS_I(inode)->io_tree, pos, in btrfs_dio_iomap_end()
7579 pos + length - 1); in btrfs_dio_iomap_end()
7580 ret = -ENOTBLK; in btrfs_dio_iomap_end()
7584 if (dio_data->reserve) in btrfs_dio_iomap_end()
7586 dio_data->data_reserved, pos, in btrfs_dio_iomap_end()
7587 dio_data->reserve, true); in btrfs_dio_iomap_end()
7588 btrfs_delalloc_release_extents(BTRFS_I(inode), dio_data->length); in btrfs_dio_iomap_end()
7589 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_end()
7593 * We're all done, we can re-set the current->journal_info now safely in btrfs_dio_iomap_end()
7596 if (dio_data->sync) { in btrfs_dio_iomap_end()
7597 ASSERT(current->journal_info == NULL); in btrfs_dio_iomap_end()
7598 current->journal_info = BTRFS_DIO_SYNC_STUB; in btrfs_dio_iomap_end()
7601 iomap->private = NULL; in btrfs_dio_iomap_end()
7609 * This implies a barrier so that stores to dio_bio->bi_status before in btrfs_dio_private_put()
7610 * this and loads of dio_bio->bi_status after this are fully ordered. in btrfs_dio_private_put()
7612 if (!refcount_dec_and_test(&dip->refs)) in btrfs_dio_private_put()
7615 if (bio_op(dip->dio_bio) == REQ_OP_WRITE) { in btrfs_dio_private_put()
7616 __endio_write_update_ordered(BTRFS_I(dip->inode), in btrfs_dio_private_put()
7617 dip->logical_offset, in btrfs_dio_private_put()
7618 dip->bytes, in btrfs_dio_private_put()
7619 !dip->dio_bio->bi_status); in btrfs_dio_private_put()
7621 unlock_extent(&BTRFS_I(dip->inode)->io_tree, in btrfs_dio_private_put()
7622 dip->logical_offset, in btrfs_dio_private_put()
7623 dip->logical_offset + dip->bytes - 1); in btrfs_dio_private_put()
7626 bio_endio(dip->dio_bio); in btrfs_dio_private_put()
7634 struct btrfs_dio_private *dip = bio->bi_private; in submit_dio_repair_bio()
7635 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in submit_dio_repair_bio()
7644 refcount_inc(&dip->refs); in submit_dio_repair_bio()
7647 refcount_dec(&dip->refs); in submit_dio_repair_bio()
7655 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_check_read_dio_bio()
7656 const u32 sectorsize = fs_info->sectorsize; in btrfs_check_read_dio_bio()
7657 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; in btrfs_check_read_dio_bio()
7658 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_check_read_dio_bio()
7659 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_check_read_dio_bio()
7662 u64 start = io_bio->logical; in btrfs_check_read_dio_bio()
7666 __bio_for_each_segment(bvec, &io_bio->bio, iter, io_bio->iter) { in btrfs_check_read_dio_bio()
7685 &io_bio->bio, in btrfs_check_read_dio_bio()
7686 start - io_bio->logical, in btrfs_check_read_dio_bio()
7689 start + sectorsize - 1, in btrfs_check_read_dio_bio()
7690 io_bio->mirror_num, in btrfs_check_read_dio_bio()
7707 struct btrfs_fs_info *fs_info = inode->root->fs_info; in __endio_write_update_ordered()
7715 wq = fs_info->endio_freespace_worker; in __endio_write_update_ordered()
7717 wq = fs_info->endio_write_workers; in __endio_write_update_ordered()
7725 btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, in __endio_write_update_ordered()
7727 btrfs_queue_work(wq, &ordered->work); in __endio_write_update_ordered()
7740 ordered_bytes = offset + bytes - ordered_offset; in __endio_write_update_ordered()
7756 struct btrfs_dio_private *dip = bio->bi_private; in btrfs_end_dio_bio()
7757 blk_status_t err = bio->bi_status; in btrfs_end_dio_bio()
7760 btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, in btrfs_end_dio_bio()
7762 btrfs_ino(BTRFS_I(dip->inode)), bio_op(bio), in btrfs_end_dio_bio()
7763 bio->bi_opf, in btrfs_end_dio_bio()
7764 (unsigned long long)bio->bi_iter.bi_sector, in btrfs_end_dio_bio()
7765 bio->bi_iter.bi_size, err); in btrfs_end_dio_bio()
7768 err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio), in btrfs_end_dio_bio()
7773 dip->dio_bio->bi_status = err; in btrfs_end_dio_bio()
7782 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_dio_bio()
7783 struct btrfs_dio_private *dip = bio->bi_private; in btrfs_submit_dio_bio()
7789 async_submit = !atomic_read(&BTRFS_I(inode)->sync_writers); in btrfs_submit_dio_bio()
7797 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) in btrfs_submit_dio_bio()
7816 csum_offset = file_offset - dip->logical_offset; in btrfs_submit_dio_bio()
7817 csum_offset >>= inode->i_sb->s_blocksize_bits; in btrfs_submit_dio_bio()
7818 csum_offset *= btrfs_super_csum_size(fs_info->super_copy); in btrfs_submit_dio_bio()
7819 btrfs_io_bio(bio)->csum = dip->csums + csum_offset; in btrfs_submit_dio_bio()
7836 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_create_dio_private()
7842 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_create_dio_private()
7843 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); in btrfs_create_dio_private()
7846 nblocks = dio_bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits; in btrfs_create_dio_private()
7854 dip->inode = inode; in btrfs_create_dio_private()
7855 dip->logical_offset = file_offset; in btrfs_create_dio_private()
7856 dip->bytes = dio_bio->bi_iter.bi_size; in btrfs_create_dio_private()
7857 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; in btrfs_create_dio_private()
7858 dip->dio_bio = dio_bio; in btrfs_create_dio_private()
7859 refcount_set(&dip->refs, 1); in btrfs_create_dio_private()
7867 const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); in btrfs_submit_direct()
7868 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_submit_direct()
7881 struct btrfs_dio_data *dio_data = iomap->private; in btrfs_submit_direct()
7886 unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, in btrfs_submit_direct()
7887 file_offset + dio_bio->bi_iter.bi_size - 1); in btrfs_submit_direct()
7889 dio_bio->bi_status = BLK_STS_RESOURCE; in btrfs_submit_direct()
7900 dip->csums); in btrfs_submit_direct()
7905 start_sector = dio_bio->bi_iter.bi_sector; in btrfs_submit_direct()
7906 submit_len = dio_bio->bi_iter.bi_size; in btrfs_submit_direct()
7925 bio->bi_private = dip; in btrfs_submit_direct()
7926 bio->bi_end_io = btrfs_end_dio_bio; in btrfs_submit_direct()
7927 btrfs_io_bio(bio)->logical = file_offset; in btrfs_submit_direct()
7930 submit_len -= clone_len; in btrfs_submit_direct()
7942 refcount_inc(&dip->refs); in btrfs_submit_direct()
7958 refcount_dec(&dip->refs); in btrfs_submit_direct()
7962 dio_data->submitted += clone_len; in btrfs_submit_direct()
7970 dip->dio_bio->bi_status = status; in btrfs_submit_direct()
7980 unsigned int blocksize_mask = fs_info->sectorsize - 1; in check_direct_IO()
7981 ssize_t retval = -EINVAL; in check_direct_IO()
7997 for (seg = 0; seg < iter->nr_segs; seg++) { in check_direct_IO()
7998 for (i = seg + 1; i < iter->nr_segs; i++) { in check_direct_IO()
7999 if (iter->iov[seg].iov_base == iter->iov[i].iov_base) in check_direct_IO()
8016 if (current->journal_info == BTRFS_DIO_SYNC_STUB) { in btrfs_maybe_fsync_end_io()
8017 current->journal_info = NULL; in btrfs_maybe_fsync_end_io()
8025 iocb->ki_flags |= IOCB_DSYNC; in btrfs_maybe_fsync_end_io()
8048 struct file *file = iocb->ki_filp; in btrfs_direct_IO()
8049 struct inode *inode = file->f_mapping->host; in btrfs_direct_IO()
8050 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_direct_IO()
8052 loff_t offset = iocb->ki_pos; in btrfs_direct_IO()
8058 ASSERT(current->journal_info == NULL || in btrfs_direct_IO()
8059 current->journal_info == BTRFS_DIO_SYNC_STUB); in btrfs_direct_IO()
8060 current->journal_info = NULL; in btrfs_direct_IO()
8071 if (offset + count <= inode->i_size) { in btrfs_direct_IO()
8075 down_read(&BTRFS_I(inode)->dio_sem); in btrfs_direct_IO()
8082 if (current->journal_info) in btrfs_direct_IO()
8089 if (ret == -ENOTBLK) in btrfs_direct_IO()
8093 up_read(&BTRFS_I(inode)->dio_sem); in btrfs_direct_IO()
8116 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_readpage()
8118 u64 end = start + PAGE_SIZE - 1; in btrfs_readpage()
8133 struct inode *inode = page->mapping->host; in btrfs_writepage()
8136 if (current->flags & PF_MEMALLOC) { in btrfs_writepage()
8212 struct btrfs_inode *inode = BTRFS_I(page->mapping->host); in btrfs_invalidatepage()
8213 struct extent_io_tree *tree = &inode->io_tree; in btrfs_invalidatepage()
8217 u64 page_end = page_start + PAGE_SIZE - 1; in btrfs_invalidatepage()
8220 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; in btrfs_invalidatepage()
8236 * shouldn't clear page extent mapped, as page->private can still in btrfs_invalidatepage()
8253 ordered = btrfs_lookup_ordered_range(inode, start, page_end - start + 1); in btrfs_invalidatepage()
8256 ordered->file_offset + ordered->num_bytes - 1); in btrfs_invalidatepage()
8274 tree = &inode->ordered_tree; in btrfs_invalidatepage()
8276 spin_lock_irq(&tree->lock); in btrfs_invalidatepage()
8277 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); in btrfs_invalidatepage()
8278 new_len = start - ordered->file_offset; in btrfs_invalidatepage()
8279 if (new_len < ordered->truncated_len) in btrfs_invalidatepage()
8280 ordered->truncated_len = new_len; in btrfs_invalidatepage()
8281 spin_unlock_irq(&tree->lock); in btrfs_invalidatepage()
8285 end - start + 1, 1)) in btrfs_invalidatepage()
8344 struct page *page = vmf->page; in btrfs_page_mkwrite()
8345 struct inode *inode = file_inode(vmf->vma->vm_file); in btrfs_page_mkwrite()
8346 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_page_mkwrite()
8347 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_page_mkwrite()
8364 sb_start_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8366 page_end = page_start + PAGE_SIZE - 1; in btrfs_page_mkwrite()
8380 ret2 = file_update_time(vmf->vma->vm_file); in btrfs_page_mkwrite()
8395 if ((page->mapping != inode->i_mapping) || in btrfs_page_mkwrite()
8420 if (page->index == ((size - 1) >> PAGE_SHIFT)) { in btrfs_page_mkwrite()
8421 reserved_space = round_up(size - page_start, in btrfs_page_mkwrite()
8422 fs_info->sectorsize); in btrfs_page_mkwrite()
8424 end = page_start + reserved_space - 1; in btrfs_page_mkwrite()
8427 PAGE_SIZE - reserved_space, true); in btrfs_page_mkwrite()
8438 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, in btrfs_page_mkwrite()
8459 memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); in btrfs_page_mkwrite()
8472 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8483 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8490 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_truncate()
8491 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_truncate()
8495 u64 mask = fs_info->sectorsize - 1; in btrfs_truncate()
8499 ret = btrfs_wait_ordered_range(inode, inode->i_size & (~mask), in btrfs_truncate()
8500 (u64)-1); in btrfs_truncate()
8528 * 1) rsv - for the truncate reservation, which we will steal from the in btrfs_truncate()
8530 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for in btrfs_truncate()
8535 return -ENOMEM; in btrfs_truncate()
8536 rsv->size = min_size; in btrfs_truncate()
8537 rsv->failfast = 1; in btrfs_truncate()
8550 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, in btrfs_truncate()
8561 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); in btrfs_truncate()
8562 trans->block_rsv = rsv; in btrfs_truncate()
8566 inode->i_size, in btrfs_truncate()
8568 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8569 if (ret != -ENOSPC && ret != -EAGAIN) in btrfs_truncate()
8586 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); in btrfs_truncate()
8587 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, in btrfs_truncate()
8590 trans->block_rsv = rsv; in btrfs_truncate()
8603 ret = btrfs_truncate_block(inode, inode->i_size, 0, 0); in btrfs_truncate()
8617 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8651 inode->i_op = &btrfs_dir_inode_operations; in btrfs_create_subvol_root()
8652 inode->i_fop = &btrfs_dir_file_operations; in btrfs_create_subvol_root()
8660 btrfs_err(new_root->fs_info, in btrfs_create_subvol_root()
8662 new_root->root_key.objectid, err); in btrfs_create_subvol_root()
8680 ei->root = NULL; in btrfs_alloc_inode()
8681 ei->generation = 0; in btrfs_alloc_inode()
8682 ei->last_trans = 0; in btrfs_alloc_inode()
8683 ei->last_sub_trans = 0; in btrfs_alloc_inode()
8684 ei->logged_trans = 0; in btrfs_alloc_inode()
8685 ei->delalloc_bytes = 0; in btrfs_alloc_inode()
8686 ei->new_delalloc_bytes = 0; in btrfs_alloc_inode()
8687 ei->defrag_bytes = 0; in btrfs_alloc_inode()
8688 ei->disk_i_size = 0; in btrfs_alloc_inode()
8689 ei->flags = 0; in btrfs_alloc_inode()
8690 ei->csum_bytes = 0; in btrfs_alloc_inode()
8691 ei->index_cnt = (u64)-1; in btrfs_alloc_inode()
8692 ei->dir_index = 0; in btrfs_alloc_inode()
8693 ei->last_unlink_trans = 0; in btrfs_alloc_inode()
8694 ei->last_reflink_trans = 0; in btrfs_alloc_inode()
8695 ei->last_log_commit = 0; in btrfs_alloc_inode()
8697 spin_lock_init(&ei->lock); in btrfs_alloc_inode()
8698 ei->outstanding_extents = 0; in btrfs_alloc_inode()
8699 if (sb->s_magic != BTRFS_TEST_MAGIC) in btrfs_alloc_inode()
8700 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, in btrfs_alloc_inode()
8702 ei->runtime_flags = 0; in btrfs_alloc_inode()
8703 ei->prop_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8704 ei->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8706 ei->delayed_node = NULL; in btrfs_alloc_inode()
8708 ei->i_otime.tv_sec = 0; in btrfs_alloc_inode()
8709 ei->i_otime.tv_nsec = 0; in btrfs_alloc_inode()
8711 inode = &ei->vfs_inode; in btrfs_alloc_inode()
8712 extent_map_tree_init(&ei->extent_tree); in btrfs_alloc_inode()
8713 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO, inode); in btrfs_alloc_inode()
8714 extent_io_tree_init(fs_info, &ei->io_failure_tree, in btrfs_alloc_inode()
8716 extent_io_tree_init(fs_info, &ei->file_extent_tree, in btrfs_alloc_inode()
8718 ei->io_tree.track_uptodate = true; in btrfs_alloc_inode()
8719 ei->io_failure_tree.track_uptodate = true; in btrfs_alloc_inode()
8720 atomic_set(&ei->sync_writers, 0); in btrfs_alloc_inode()
8721 mutex_init(&ei->log_mutex); in btrfs_alloc_inode()
8722 btrfs_ordered_inode_tree_init(&ei->ordered_tree); in btrfs_alloc_inode()
8723 INIT_LIST_HEAD(&ei->delalloc_inodes); in btrfs_alloc_inode()
8724 INIT_LIST_HEAD(&ei->delayed_iput); in btrfs_alloc_inode()
8725 RB_CLEAR_NODE(&ei->rb_node); in btrfs_alloc_inode()
8726 init_rwsem(&ei->dio_sem); in btrfs_alloc_inode()
8734 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); in btrfs_test_destroy_inode()
8748 struct btrfs_root *root = inode->root; in btrfs_destroy_inode()
8750 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); in btrfs_destroy_inode()
8751 WARN_ON(vfs_inode->i_data.nrpages); in btrfs_destroy_inode()
8752 WARN_ON(inode->block_rsv.reserved); in btrfs_destroy_inode()
8753 WARN_ON(inode->block_rsv.size); in btrfs_destroy_inode()
8754 WARN_ON(inode->outstanding_extents); in btrfs_destroy_inode()
8755 WARN_ON(inode->delalloc_bytes); in btrfs_destroy_inode()
8756 WARN_ON(inode->new_delalloc_bytes); in btrfs_destroy_inode()
8757 WARN_ON(inode->csum_bytes); in btrfs_destroy_inode()
8758 WARN_ON(inode->defrag_bytes); in btrfs_destroy_inode()
8769 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); in btrfs_destroy_inode()
8773 btrfs_err(root->fs_info, in btrfs_destroy_inode()
8775 ordered->file_offset, ordered->num_bytes); in btrfs_destroy_inode()
8783 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); in btrfs_destroy_inode()
8784 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); in btrfs_destroy_inode()
8785 btrfs_put_root(inode->root); in btrfs_destroy_inode()
8790 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_drop_inode()
8796 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_drop_inode()
8806 inode_init_once(&ei->vfs_inode); in init_once()
8859 return -ENOMEM; in btrfs_init_cachep()
8866 struct inode *inode = d_inode(path->dentry); in btrfs_getattr()
8867 u32 blocksize = inode->i_sb->s_blocksize; in btrfs_getattr()
8868 u32 bi_flags = BTRFS_I(inode)->flags; in btrfs_getattr()
8870 stat->result_mask |= STATX_BTIME; in btrfs_getattr()
8871 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; in btrfs_getattr()
8872 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; in btrfs_getattr()
8874 stat->attributes |= STATX_ATTR_APPEND; in btrfs_getattr()
8876 stat->attributes |= STATX_ATTR_COMPRESSED; in btrfs_getattr()
8878 stat->attributes |= STATX_ATTR_IMMUTABLE; in btrfs_getattr()
8880 stat->attributes |= STATX_ATTR_NODUMP; in btrfs_getattr()
8882 stat->attributes_mask |= (STATX_ATTR_APPEND | in btrfs_getattr()
8888 stat->dev = BTRFS_I(inode)->root->anon_dev; in btrfs_getattr()
8890 spin_lock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8891 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; in btrfs_getattr()
8892 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8893 stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) + in btrfs_getattr()
8903 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename_exchange()
8905 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename_exchange()
8906 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename_exchange()
8907 struct inode *new_inode = new_dentry->d_inode; in btrfs_rename_exchange()
8908 struct inode *old_inode = old_dentry->d_inode; in btrfs_rename_exchange()
8921 * For non-subvolumes allow exchange only within one subvolume, in the in btrfs_rename_exchange()
8928 return -EXDEV; in btrfs_rename_exchange()
8933 down_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8963 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8964 BTRFS_I(new_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8974 new_dentry->d_name.name, in btrfs_rename_exchange()
8975 new_dentry->d_name.len, in btrfs_rename_exchange()
8992 old_dentry->d_name.name, in btrfs_rename_exchange()
8993 old_dentry->d_name.len, in btrfs_rename_exchange()
9009 old_dir->i_ctime = old_dir->i_mtime = ctime; in btrfs_rename_exchange()
9010 new_dir->i_ctime = new_dir->i_mtime = ctime; in btrfs_rename_exchange()
9011 old_inode->i_ctime = ctime; in btrfs_rename_exchange()
9012 new_inode->i_ctime = ctime; in btrfs_rename_exchange()
9014 if (old_dentry->d_parent != new_dentry->d_parent) { in btrfs_rename_exchange()
9026 BTRFS_I(old_dentry->d_inode), in btrfs_rename_exchange()
9027 old_dentry->d_name.name, in btrfs_rename_exchange()
9028 old_dentry->d_name.len); in btrfs_rename_exchange()
9042 BTRFS_I(new_dentry->d_inode), in btrfs_rename_exchange()
9043 new_dentry->d_name.name, in btrfs_rename_exchange()
9044 new_dentry->d_name.len); in btrfs_rename_exchange()
9054 new_dentry->d_name.name, in btrfs_rename_exchange()
9055 new_dentry->d_name.len, 0, old_idx); in btrfs_rename_exchange()
9062 old_dentry->d_name.name, in btrfs_rename_exchange()
9063 old_dentry->d_name.len, 0, new_idx); in btrfs_rename_exchange()
9069 if (old_inode->i_nlink == 1) in btrfs_rename_exchange()
9070 BTRFS_I(old_inode)->dir_index = old_idx; in btrfs_rename_exchange()
9071 if (new_inode->i_nlink == 1) in btrfs_rename_exchange()
9072 BTRFS_I(new_inode)->dir_index = new_idx; in btrfs_rename_exchange()
9076 new_dentry->d_parent); in btrfs_rename_exchange()
9082 old_dentry->d_parent); in btrfs_rename_exchange()
9099 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || in btrfs_rename_exchange()
9100 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || in btrfs_rename_exchange()
9101 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || in btrfs_rename_exchange()
9103 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) in btrfs_rename_exchange()
9120 up_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
9140 dentry->d_name.name, in btrfs_whiteout_for_rename()
9141 dentry->d_name.len, in btrfs_whiteout_for_rename()
9152 inode->i_op = &btrfs_special_inode_operations; in btrfs_whiteout_for_rename()
9153 init_special_inode(inode, inode->i_mode, in btrfs_whiteout_for_rename()
9157 &dentry->d_name); in btrfs_whiteout_for_rename()
9180 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename()
9183 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename()
9184 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename()
9194 return -EPERM; in btrfs_rename()
9198 return -EXDEV; in btrfs_rename()
9202 return -ENOTEMPTY; in btrfs_rename()
9204 if (S_ISDIR(old_inode->i_mode) && new_inode && in btrfs_rename()
9205 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rename()
9206 return -ENOTEMPTY; in btrfs_rename()
9210 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, in btrfs_rename()
9211 new_dentry->d_name.name, in btrfs_rename()
9212 new_dentry->d_name.len); in btrfs_rename()
9215 if (ret == -EEXIST) { in btrfs_rename()
9222 /* maybe -EOVERFLOW */ in btrfs_rename()
9232 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) in btrfs_rename()
9233 filemap_flush(old_inode->i_mapping); in btrfs_rename()
9237 down_read(&fs_info->subvol_sem); in btrfs_rename()
9265 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename()
9273 new_dentry->d_name.name, in btrfs_rename()
9274 new_dentry->d_name.len, in btrfs_rename()
9284 old_dir->i_ctime = old_dir->i_mtime = in btrfs_rename()
9285 new_dir->i_ctime = new_dir->i_mtime = in btrfs_rename()
9286 old_inode->i_ctime = current_time(old_dir); in btrfs_rename()
9288 if (old_dentry->d_parent != new_dentry->d_parent) in btrfs_rename()
9297 old_dentry->d_name.name, in btrfs_rename()
9298 old_dentry->d_name.len); in btrfs_rename()
9309 new_inode->i_ctime = current_time(new_inode); in btrfs_rename()
9313 BUG_ON(new_inode->i_nlink == 0); in btrfs_rename()
9317 new_dentry->d_name.name, in btrfs_rename()
9318 new_dentry->d_name.len); in btrfs_rename()
9320 if (!ret && new_inode->i_nlink == 0) in btrfs_rename()
9330 new_dentry->d_name.name, in btrfs_rename()
9331 new_dentry->d_name.len, 0, index); in btrfs_rename()
9337 if (old_inode->i_nlink == 1) in btrfs_rename()
9338 BTRFS_I(old_inode)->dir_index = index; in btrfs_rename()
9342 new_dentry->d_parent); in btrfs_rename()
9369 if (btrfs_inode_in_log(BTRFS_I(old_dir), fs_info->generation) || in btrfs_rename()
9370 btrfs_inode_in_log(BTRFS_I(new_dir), fs_info->generation) || in btrfs_rename()
9371 btrfs_inode_in_log(BTRFS_I(old_inode), fs_info->generation) || in btrfs_rename()
9373 btrfs_inode_in_log(BTRFS_I(new_inode), fs_info->generation))) in btrfs_rename()
9383 up_read(&fs_info->subvol_sem); in btrfs_rename()
9393 return -EINVAL; in btrfs_rename2()
9416 inode = delalloc_work->inode; in btrfs_run_delalloc_work()
9417 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9419 &BTRFS_I(inode)->runtime_flags)) in btrfs_run_delalloc_work()
9420 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9423 complete(&delalloc_work->completion); in btrfs_run_delalloc_work()
9434 init_completion(&work->completion); in btrfs_alloc_delalloc_work()
9435 INIT_LIST_HEAD(&work->list); in btrfs_alloc_delalloc_work()
9436 work->inode = inode; in btrfs_alloc_delalloc_work()
9437 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); in btrfs_alloc_delalloc_work()
9456 bool full_flush = wbc->nr_to_write == LONG_MAX; in start_delalloc_inodes()
9461 mutex_lock(&root->delalloc_mutex); in start_delalloc_inodes()
9462 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9463 list_splice_init(&root->delalloc_inodes, &splice); in start_delalloc_inodes()
9468 list_move_tail(&binode->delalloc_inodes, in start_delalloc_inodes()
9469 &root->delalloc_inodes); in start_delalloc_inodes()
9472 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) in start_delalloc_inodes()
9475 inode = igrab(&binode->vfs_inode); in start_delalloc_inodes()
9477 cond_resched_lock(&root->delalloc_lock); in start_delalloc_inodes()
9480 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9484 &binode->runtime_flags); in start_delalloc_inodes()
9489 ret = -ENOMEM; in start_delalloc_inodes()
9492 list_add_tail(&work->list, &works); in start_delalloc_inodes()
9493 btrfs_queue_work(root->fs_info->flush_workers, in start_delalloc_inodes()
9494 &work->work); in start_delalloc_inodes()
9499 &BTRFS_I(inode)->runtime_flags)) in start_delalloc_inodes()
9502 if (ret || wbc->nr_to_write <= 0) in start_delalloc_inodes()
9506 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9508 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9512 list_del_init(&work->list); in start_delalloc_inodes()
9513 wait_for_completion(&work->completion); in start_delalloc_inodes()
9518 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9519 list_splice_tail(&splice, &root->delalloc_inodes); in start_delalloc_inodes()
9520 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9522 mutex_unlock(&root->delalloc_mutex); in start_delalloc_inodes()
9534 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_start_delalloc_snapshot()
9536 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_start_delalloc_snapshot()
9537 return -EROFS; in btrfs_start_delalloc_snapshot()
9555 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) in btrfs_start_delalloc_roots()
9556 return -EROFS; in btrfs_start_delalloc_roots()
9560 mutex_lock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9561 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9562 list_splice_init(&fs_info->delalloc_roots, &splice); in btrfs_start_delalloc_roots()
9575 list_move_tail(&root->delalloc_root, in btrfs_start_delalloc_roots()
9576 &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9577 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9583 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9585 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9590 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9591 list_splice_tail(&splice, &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9592 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9594 mutex_unlock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9601 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_symlink()
9603 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_symlink()
9618 return -ENAMETOOLONG; in btrfs_symlink()
9635 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, in btrfs_symlink()
9636 dentry->d_name.len, btrfs_ino(BTRFS_I(dir)), in btrfs_symlink()
9650 inode->i_fop = &btrfs_file_operations; in btrfs_symlink()
9651 inode->i_op = &btrfs_file_inode_operations; in btrfs_symlink()
9652 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_symlink()
9654 err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); in btrfs_symlink()
9660 err = -ENOMEM; in btrfs_symlink()
9673 leaf = path->nodes[0]; in btrfs_symlink()
9674 ei = btrfs_item_ptr(leaf, path->slots[0], in btrfs_symlink()
9676 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in btrfs_symlink()
9689 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_symlink()
9696 * last step to avoid extra cleanup of these indexes if an error happens in btrfs_symlink()
9726 u64 start = ins->objectid; in insert_prealloc_file_extent()
9727 u64 len = ins->offset; in insert_prealloc_file_extent()
9764 return ERR_PTR(-ENOMEM); in insert_prealloc_file_extent()
9767 file_offset + len - 1, &extent_info, in insert_prealloc_file_extent()
9781 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in __btrfs_prealloc_file_range()
9782 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; in __btrfs_prealloc_file_range()
9784 struct btrfs_root *root = BTRFS_I(inode)->root; in __btrfs_prealloc_file_range()
9790 u64 last_alloc = (u64)-1; in __btrfs_prealloc_file_range()
9793 u64 end = start + num_bytes - 1; in __btrfs_prealloc_file_range()
9814 * ->bytes_may_use to ->bytes_reserved. Any error that happens in __btrfs_prealloc_file_range()
9838 cur_offset + ins.offset -1, 0); in __btrfs_prealloc_file_range()
9843 &BTRFS_I(inode)->runtime_flags); in __btrfs_prealloc_file_range()
9847 em->start = cur_offset; in __btrfs_prealloc_file_range()
9848 em->orig_start = cur_offset; in __btrfs_prealloc_file_range()
9849 em->len = ins.offset; in __btrfs_prealloc_file_range()
9850 em->block_start = ins.objectid; in __btrfs_prealloc_file_range()
9851 em->block_len = ins.offset; in __btrfs_prealloc_file_range()
9852 em->orig_block_len = ins.offset; in __btrfs_prealloc_file_range()
9853 em->ram_bytes = ins.offset; in __btrfs_prealloc_file_range()
9854 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in __btrfs_prealloc_file_range()
9855 em->generation = trans->transid; in __btrfs_prealloc_file_range()
9858 write_lock(&em_tree->lock); in __btrfs_prealloc_file_range()
9860 write_unlock(&em_tree->lock); in __btrfs_prealloc_file_range()
9861 if (ret != -EEXIST) in __btrfs_prealloc_file_range()
9864 cur_offset + ins.offset - 1, in __btrfs_prealloc_file_range()
9869 num_bytes -= ins.offset; in __btrfs_prealloc_file_range()
9874 inode->i_ctime = current_time(inode); in __btrfs_prealloc_file_range()
9875 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; in __btrfs_prealloc_file_range()
9877 (actual_len > inode->i_size) && in __btrfs_prealloc_file_range()
9878 (cur_offset > inode->i_size)) { in __btrfs_prealloc_file_range()
9903 end - clear_offset + 1); in __btrfs_prealloc_file_range()
9932 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_permission()
9933 umode_t mode = inode->i_mode; in btrfs_permission()
9938 return -EROFS; in btrfs_permission()
9939 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) in btrfs_permission()
9940 return -EACCES; in btrfs_permission()
9947 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_tmpfile()
9949 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_tmpfile()
9974 inode->i_fop = &btrfs_file_operations; in btrfs_tmpfile()
9975 inode->i_op = &btrfs_file_inode_operations; in btrfs_tmpfile()
9977 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_tmpfile()
9995 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() in btrfs_tmpfile()
10011 struct inode *inode = tree->private_data; in btrfs_set_range_writeback()
10017 page = find_get_page(inode->i_mapping, index); in btrfs_set_range_writeback()
10034 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_add_swapfile_pin()
10041 return -ENOMEM; in btrfs_add_swapfile_pin()
10042 sp->ptr = ptr; in btrfs_add_swapfile_pin()
10043 sp->inode = inode; in btrfs_add_swapfile_pin()
10044 sp->is_block_group = is_block_group; in btrfs_add_swapfile_pin()
10045 sp->bg_extent_count = 1; in btrfs_add_swapfile_pin()
10047 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10048 p = &fs_info->swapfile_pins.rb_node; in btrfs_add_swapfile_pin()
10052 if (sp->ptr < entry->ptr || in btrfs_add_swapfile_pin()
10053 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { in btrfs_add_swapfile_pin()
10054 p = &(*p)->rb_left; in btrfs_add_swapfile_pin()
10055 } else if (sp->ptr > entry->ptr || in btrfs_add_swapfile_pin()
10056 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { in btrfs_add_swapfile_pin()
10057 p = &(*p)->rb_right; in btrfs_add_swapfile_pin()
10060 entry->bg_extent_count++; in btrfs_add_swapfile_pin()
10061 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10066 rb_link_node(&sp->node, parent, p); in btrfs_add_swapfile_pin()
10067 rb_insert_color(&sp->node, &fs_info->swapfile_pins); in btrfs_add_swapfile_pin()
10068 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10075 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_free_swapfile_pins()
10079 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10080 node = rb_first(&fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10084 if (sp->inode == inode) { in btrfs_free_swapfile_pins()
10085 rb_erase(&sp->node, &fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10086 if (sp->is_block_group) { in btrfs_free_swapfile_pins()
10087 btrfs_dec_block_group_swap_extents(sp->ptr, in btrfs_free_swapfile_pins()
10088 sp->bg_extent_count); in btrfs_free_swapfile_pins()
10089 btrfs_put_block_group(sp->ptr); in btrfs_free_swapfile_pins()
10095 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10121 if (bsi->nr_pages >= sis->max) in btrfs_add_swap_extent()
10124 max_pages = sis->max - bsi->nr_pages; in btrfs_add_swap_extent()
10125 first_ppage = ALIGN(bsi->block_start, PAGE_SIZE) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10126 next_ppage = ALIGN_DOWN(bsi->block_start + bsi->block_len, in btrfs_add_swap_extent()
10131 nr_pages = next_ppage - first_ppage; in btrfs_add_swap_extent()
10135 if (bsi->start == 0) in btrfs_add_swap_extent()
10137 if (bsi->lowest_ppage > first_ppage_reported) in btrfs_add_swap_extent()
10138 bsi->lowest_ppage = first_ppage_reported; in btrfs_add_swap_extent()
10139 if (bsi->highest_ppage < (next_ppage - 1)) in btrfs_add_swap_extent()
10140 bsi->highest_ppage = next_ppage - 1; in btrfs_add_swap_extent()
10142 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); in btrfs_add_swap_extent()
10145 bsi->nr_extents += ret; in btrfs_add_swap_extent()
10146 bsi->nr_pages += nr_pages; in btrfs_add_swap_extent()
10155 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); in btrfs_swap_deactivate()
10162 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_swap_activate()
10163 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_swap_activate()
10164 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_swap_activate()
10169 .lowest_ppage = (sector_t)-1ULL, in btrfs_swap_activate()
10180 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_swap_activate()
10187 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { in btrfs_swap_activate()
10189 return -EINVAL; in btrfs_swap_activate()
10191 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { in btrfs_swap_activate()
10192 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10193 return -EINVAL; in btrfs_swap_activate()
10195 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { in btrfs_swap_activate()
10197 return -EINVAL; in btrfs_swap_activate()
10204 * fs_info->swapfile_pins prevents them from running while the swap in btrfs_swap_activate()
10212 return -EBUSY; in btrfs_swap_activate()
10222 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { in btrfs_swap_activate()
10226 return -EINVAL; in btrfs_swap_activate()
10238 spin_lock(&root->root_item_lock); in btrfs_swap_activate()
10240 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10245 root->root_key.objectid); in btrfs_swap_activate()
10246 return -EPERM; in btrfs_swap_activate()
10248 atomic_inc(&root->nr_swapfiles); in btrfs_swap_activate()
10249 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10251 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); in btrfs_swap_activate()
10253 lock_extent_bits(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10258 u64 len = isize - start; in btrfs_swap_activate()
10266 if (em->block_start == EXTENT_MAP_HOLE) { in btrfs_swap_activate()
10268 ret = -EINVAL; in btrfs_swap_activate()
10271 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_swap_activate()
10280 ret = -EINVAL; in btrfs_swap_activate()
10283 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_swap_activate()
10285 ret = -EINVAL; in btrfs_swap_activate()
10289 logical_block_start = em->block_start + (start - em->start); in btrfs_swap_activate()
10290 len = min(len, em->len - (start - em->start)); in btrfs_swap_activate()
10301 "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10302 ret = -EINVAL; in btrfs_swap_activate()
10312 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { in btrfs_swap_activate()
10315 ret = -EINVAL; in btrfs_swap_activate()
10320 device = em->map_lookup->stripes[0].dev; in btrfs_swap_activate()
10326 } else if (device != em->map_lookup->stripes[0].dev) { in btrfs_swap_activate()
10328 ret = -EINVAL; in btrfs_swap_activate()
10332 physical_block_start = (em->map_lookup->stripes[0].physical + in btrfs_swap_activate()
10333 (logical_block_start - em->start)); in btrfs_swap_activate()
10334 len = min(len, em->len - (logical_block_start - em->start)); in btrfs_swap_activate()
10342 ret = -EINVAL; in btrfs_swap_activate()
10348 "block group for swapfile at %llu is read-only%s", in btrfs_swap_activate()
10349 bg->start, in btrfs_swap_activate()
10350 atomic_read(&fs_info->scrubs_running) ? in btrfs_swap_activate()
10353 ret = -EINVAL; in btrfs_swap_activate()
10390 unlock_extent_cached(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10395 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_swap_activate()
10403 sis->bdev = device->bdev; in btrfs_swap_activate()
10404 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; in btrfs_swap_activate()
10405 sis->max = bsi.nr_pages; in btrfs_swap_activate()
10406 sis->pages = bsi.nr_pages - 1; in btrfs_swap_activate()
10407 sis->highest_bit = bsi.nr_pages - 1; in btrfs_swap_activate()
10418 return -EOPNOTSUPP; in btrfs_swap_activate()