• Home
  • Raw
  • Download

Lines Matching +full:oe +full:- +full:extra +full:- +full:delay

1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/blk-cgroup.h>
17 #include <linux/backing-dev.h>
39 #include "disk-io.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
45 #include "tree-log.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
63 #include "dir-item.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
142 struct btrfs_fs_info *fs_info = warn->fs_info; in data_reloc_print_warning_inode()
163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); in data_reloc_print_warning_inode()
166 btrfs_release_path(&warn->path); in data_reloc_print_warning_inode()
170 eb = warn->path.nodes[0]; in data_reloc_print_warning_inode()
171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); in data_reloc_print_warning_inode()
173 btrfs_release_path(&warn->path); in data_reloc_print_warning_inode()
176 ipath = init_ipath(4096, local_root, &warn->path); in data_reloc_print_warning_inode()
183 * -ENOMEM, not a critical error, just output an generic error in data_reloc_print_warning_inode()
188 warn->logical, warn->mirror_num, root, inum, offset); in data_reloc_print_warning_inode()
199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { in data_reloc_print_warning_inode()
202 warn->logical, warn->mirror_num, root, inum, offset, in data_reloc_print_warning_inode()
203 fs_info->sectorsize, nlink, in data_reloc_print_warning_inode()
204 (char *)(unsigned long)ipath->fspath->val[i]); in data_reloc_print_warning_inode()
214 warn->logical, warn->mirror_num, root, inum, offset, ret); in data_reloc_print_warning_inode()
221 * Do extra user-friendly error output (e.g. lookup all the affected files).
230 struct btrfs_fs_info *fs_info = inode->root->fs_info; in print_data_reloc_error()
235 const u32 csum_size = fs_info->csum_size; in print_data_reloc_error()
241 mutex_lock(&fs_info->reloc_mutex); in print_data_reloc_error()
243 mutex_unlock(&fs_info->reloc_mutex); in print_data_reloc_error()
249 inode->root->root_key.objectid, btrfs_ino(inode), file_off, in print_data_reloc_error()
259 inode->root->root_key.objectid, in print_data_reloc_error()
306 ctx.extent_item_pos = logical - found_key.objectid; in print_data_reloc_error()
322 struct btrfs_root *root = inode->root; in btrfs_print_data_csum_error()
323 const u32 csum_size = root->fs_info->csum_size; in btrfs_print_data_csum_error()
326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in btrfs_print_data_csum_error()
331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { in btrfs_print_data_csum_error()
332 btrfs_warn_rl(root->fs_info, in btrfs_print_data_csum_error()
334 root->root_key.objectid, btrfs_ino(inode), in btrfs_print_data_csum_error()
340 btrfs_warn_rl(root->fs_info, in btrfs_print_data_csum_error()
342 root->root_key.objectid, btrfs_ino(inode), in btrfs_print_data_csum_error()
351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
357 * return -EAGAIN
358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
364 if (!inode_trylock_shared(&inode->vfs_inode)) in btrfs_inode_lock()
365 return -EAGAIN; in btrfs_inode_lock()
369 inode_lock_shared(&inode->vfs_inode); in btrfs_inode_lock()
372 if (!inode_trylock(&inode->vfs_inode)) in btrfs_inode_lock()
373 return -EAGAIN; in btrfs_inode_lock()
377 inode_lock(&inode->vfs_inode); in btrfs_inode_lock()
380 down_write(&inode->i_mmap_lock); in btrfs_inode_lock()
385 * btrfs_inode_unlock - unock inode i_rwsem
393 up_write(&inode->i_mmap_lock); in btrfs_inode_unlock()
395 inode_unlock_shared(&inode->vfs_inode); in btrfs_inode_unlock()
397 inode_unlock(&inode->vfs_inode); in btrfs_inode_unlock()
415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; in btrfs_cleanup_ordered_extents()
421 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents()
439 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_cleanup_ordered_extents()
449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, in btrfs_cleanup_ordered_extents()
464 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents()
465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; in btrfs_cleanup_ordered_extents()
480 if (args->default_acl) { in btrfs_init_inode_security()
481 err = __btrfs_set_acl(trans, args->inode, args->default_acl, in btrfs_init_inode_security()
486 if (args->acl) { in btrfs_init_inode_security()
487 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); in btrfs_init_inode_security()
491 if (!args->default_acl && !args->acl) in btrfs_init_inode_security()
492 cache_no_acl(args->inode); in btrfs_init_inode_security()
493 return btrfs_xattr_security_init(trans, args->inode, args->dir, in btrfs_init_inode_security()
494 &args->dentry->d_name); in btrfs_init_inode_security()
510 struct btrfs_root *root = inode->root; in insert_inline_extent()
540 leaf = path->nodes[0]; in insert_inline_extent()
541 ei = btrfs_item_ptr(leaf, path->slots[0], in insert_inline_extent()
543 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in insert_inline_extent()
564 compressed_size -= cur_size; in insert_inline_extent()
569 page = find_get_page(inode->vfs_inode.i_mapping, 0); in insert_inline_extent()
584 ALIGN(size, root->fs_info->sectorsize)); in insert_inline_extent()
595 i_size = i_size_read(&inode->vfs_inode); in insert_inline_extent()
597 i_size_write(&inode->vfs_inode, size); in insert_inline_extent()
600 inode->disk_i_size = i_size; in insert_inline_extent()
619 struct btrfs_root *root = inode->root; in cow_file_range_inline()
620 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range_inline()
632 if (size < i_size_read(&inode->vfs_inode) || in cow_file_range_inline()
633 size > fs_info->sectorsize || in cow_file_range_inline()
635 data_len > fs_info->max_inline) in cow_file_range_inline()
640 return -ENOMEM; in cow_file_range_inline()
647 trans->block_rsv = &inode->block_rsv; in cow_file_range_inline()
651 drop_args.end = fs_info->sectorsize; in cow_file_range_inline()
664 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
667 } else if (ret == -ENOSPC) { in cow_file_range_inline()
674 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
677 } else if (ret == -ENOSPC) { in cow_file_range_inline()
733 BUG_ON(!async_extent); /* -ENOMEM */ in add_async_extent()
734 async_extent->start = start; in add_async_extent()
735 async_extent->ram_size = ram_size; in add_async_extent()
736 async_extent->compressed_size = compressed_size; in add_async_extent()
737 async_extent->pages = pages; in add_async_extent()
738 async_extent->nr_pages = nr_pages; in add_async_extent()
739 async_extent->compress_type = compress_type; in add_async_extent()
740 list_add_tail(&async_extent->list, &cow->extents); in add_async_extent()
751 struct btrfs_fs_info *fs_info = inode->root->fs_info; in inode_need_compress()
767 * \- A \- B in inode_need_compress()
785 if (fs_info->sectorsize < PAGE_SIZE) { in inode_need_compress()
795 if (inode->defrag_compress) in inode_need_compress()
798 if (inode->flags & BTRFS_INODE_NOCOMPRESS) in inode_need_compress()
801 inode->flags & BTRFS_INODE_COMPRESS || in inode_need_compress()
802 inode->prop_compress) in inode_need_compress()
803 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); in inode_need_compress()
812 (start > 0 || end + 1 < inode->disk_i_size)) in inode_should_defrag()
833 struct btrfs_inode *inode = async_chunk->inode; in compress_file_range()
834 struct btrfs_fs_info *fs_info = inode->root->fs_info; in compress_file_range()
835 struct address_space *mapping = inode->vfs_inode.i_mapping; in compress_file_range()
836 u64 blocksize = fs_info->sectorsize; in compress_file_range()
837 u64 start = async_chunk->start; in compress_file_range()
838 u64 end = async_chunk->end; in compress_file_range()
848 int compress_type = fs_info->compress_type; in compress_file_range()
850 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); in compress_file_range()
857 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); in compress_file_range()
869 i_size = i_size_read(&inode->vfs_inode); in compress_file_range()
874 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; in compress_file_range()
890 total_compressed = actual_end - start; in compress_file_range()
897 (start > 0 || end + 1 < inode->disk_i_size)) in compress_file_range()
917 * We do compression for mount -o compress and when the inode has not in compress_file_range()
933 if (inode->defrag_compress) in compress_file_range()
934 compress_type = inode->defrag_compress; in compress_file_range()
935 else if (inode->prop_compress) in compress_file_range()
936 compress_type = inode->prop_compress; in compress_file_range()
939 ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4), in compress_file_range()
951 memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff); in compress_file_range()
962 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { in compress_file_range()
979 mapping_set_error(mapping, -EIO); in compress_file_range()
1012 total_in = round_up(total_in, fs_info->sectorsize); in compress_file_range()
1030 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) in compress_file_range()
1031 inode->flags |= BTRFS_INODE_NOCOMPRESS; in compress_file_range()
1033 add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
1038 WARN_ON(pages[i]->mapping); in compress_file_range()
1049 if (!async_extent->pages) in free_async_extent_pages()
1052 for (i = 0; i < async_extent->nr_pages; i++) { in free_async_extent_pages()
1053 WARN_ON(async_extent->pages[i]->mapping); in free_async_extent_pages()
1054 put_page(async_extent->pages[i]); in free_async_extent_pages()
1056 kfree(async_extent->pages); in free_async_extent_pages()
1057 async_extent->nr_pages = 0; in free_async_extent_pages()
1058 async_extent->pages = NULL; in free_async_extent_pages()
1065 u64 start = async_extent->start; in submit_uncompressed_range()
1066 u64 end = async_extent->start + async_extent->ram_size - 1; in submit_uncompressed_range()
1075 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); in submit_uncompressed_range()
1079 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); in submit_uncompressed_range()
1088 mapping_set_error(locked_page->mapping, ret); in submit_uncompressed_range()
1098 struct btrfs_inode *inode = async_chunk->inode; in submit_one_async_extent()
1099 struct extent_io_tree *io_tree = &inode->io_tree; in submit_one_async_extent()
1100 struct btrfs_root *root = inode->root; in submit_one_async_extent()
1101 struct btrfs_fs_info *fs_info = root->fs_info; in submit_one_async_extent()
1107 u64 start = async_extent->start; in submit_one_async_extent()
1108 u64 end = async_extent->start + async_extent->ram_size - 1; in submit_one_async_extent()
1110 if (async_chunk->blkcg_css) in submit_one_async_extent()
1111 kthread_associate_blkcg(async_chunk->blkcg_css); in submit_one_async_extent()
1114 * If async_chunk->locked_page is in the async_extent range, we need to in submit_one_async_extent()
1117 if (async_chunk->locked_page) { in submit_one_async_extent()
1118 u64 locked_page_start = page_offset(async_chunk->locked_page); in submit_one_async_extent()
1119 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; in submit_one_async_extent()
1122 locked_page = async_chunk->locked_page; in submit_one_async_extent()
1126 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { in submit_one_async_extent()
1131 ret = btrfs_reserve_extent(root, async_extent->ram_size, in submit_one_async_extent()
1132 async_extent->compressed_size, in submit_one_async_extent()
1133 async_extent->compressed_size, in submit_one_async_extent()
1137 * Here we used to try again by going back to non-compressed in submit_one_async_extent()
1148 async_extent->ram_size, /* len */ in submit_one_async_extent()
1153 async_extent->ram_size, /* ram_bytes */ in submit_one_async_extent()
1154 async_extent->compress_type, in submit_one_async_extent()
1163 async_extent->ram_size, /* num_bytes */ in submit_one_async_extent()
1164 async_extent->ram_size, /* ram_bytes */ in submit_one_async_extent()
1169 async_extent->compress_type); in submit_one_async_extent()
1182 async_extent->pages, /* compressed_pages */ in submit_one_async_extent()
1183 async_extent->nr_pages, in submit_one_async_extent()
1184 async_chunk->write_flags, true); in submit_one_async_extent()
1187 if (async_chunk->blkcg_css) in submit_one_async_extent()
1196 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); in submit_one_async_extent()
1204 if (async_chunk->blkcg_css) in submit_one_async_extent()
1208 root->root_key.objectid, btrfs_ino(inode), start, in submit_one_async_extent()
1209 async_extent->ram_size, ret); in submit_one_async_extent()
1216 struct extent_map_tree *em_tree = &inode->extent_tree; in get_extent_allocation_hint()
1220 read_lock(&em_tree->lock); in get_extent_allocation_hint()
1228 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in get_extent_allocation_hint()
1231 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) in get_extent_allocation_hint()
1232 alloc_hint = em->block_start; in get_extent_allocation_hint()
1236 alloc_hint = em->block_start; in get_extent_allocation_hint()
1240 read_unlock(&em_tree->lock); in get_extent_allocation_hint()
1252 * it to make sure we don't do extra locks or unlocks.
1264 * - If @keep_locked is set, all pages are kept locked.
1265 * - Else all pages except for @locked_page are unlocked.
1268 * while-loop, the ordered extents created in previous iterations are kept
1278 struct btrfs_root *root = inode->root; in cow_file_range()
1279 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range()
1286 u64 blocksize = fs_info->sectorsize; in cow_file_range()
1295 ret = -EINVAL; in cow_file_range()
1299 num_bytes = ALIGN(end - start + 1, blocksize); in cow_file_range()
1301 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); in cow_file_range()
1315 if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) { in cow_file_range()
1316 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), in cow_file_range()
1371 min_alloc_size = fs_info->sectorsize; in cow_file_range()
1380 if (ret == -EAGAIN) { in cow_file_range()
1382 * btrfs_reserve_extent only returns -EAGAIN for zoned in cow_file_range()
1390 * us, or return -ENOSPC if it can't handle retries. in cow_file_range()
1394 wait_on_bit_io(&inode->root->fs_info->flags, in cow_file_range()
1400 *done_offset = start - 1; in cow_file_range()
1403 ret = -ENOSPC; in cow_file_range()
1450 start + ram_size - 1, in cow_file_range()
1468 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, in cow_file_range()
1475 num_bytes -= cur_alloc_size; in cow_file_range()
1494 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); in cow_file_range()
1502 * |-------(1)----|---(2)---|-------------(3)----------| in cow_file_range()
1503 * `- orig_start `- start `- start + cur_alloc_size `- end in cow_file_range()
1526 mapping_set_error(inode->vfs_inode.i_mapping, ret); in cow_file_range()
1527 extent_clear_unlock_delalloc(inode, orig_start, start - 1, in cow_file_range()
1543 start + cur_alloc_size - 1, in cow_file_range()
1578 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in submit_compressed_extents()
1581 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
1582 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
1584 list_del(&async_extent->list); in submit_compressed_extents()
1589 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < in submit_compressed_extents()
1591 cond_wake_up_nomb(&fs_info->async_submit_wait); in submit_compressed_extents()
1600 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1601 if (async_chunk->blkcg_css) in async_cow_free()
1602 css_put(async_chunk->blkcg_css); in async_cow_free()
1604 async_cow = async_chunk->async_cow; in async_cow_free()
1605 if (atomic_dec_and_test(&async_cow->num_chunks)) in async_cow_free()
1613 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_compressed()
1618 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); in run_delalloc_compressed()
1629 unlock_extent(&inode->io_tree, start, end, NULL); in run_delalloc_compressed()
1630 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); in run_delalloc_compressed()
1632 async_chunk = ctx->chunks; in run_delalloc_compressed()
1633 atomic_set(&ctx->num_chunks, num_chunks); in run_delalloc_compressed()
1636 u64 cur_end = min(end, start + SZ_512K - 1); in run_delalloc_compressed()
1642 ihold(&inode->vfs_inode); in run_delalloc_compressed()
1670 cur_end - start); in run_delalloc_compressed()
1688 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); in run_delalloc_compressed()
1689 atomic_add(nr_pages, &fs_info->async_delalloc_pages); in run_delalloc_compressed()
1691 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in run_delalloc_compressed()
1715 extent_write_locked_range(&inode->vfs_inode, locked_page, start, in run_delalloc_cow()
1731 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, in csum_exist_in_range()
1738 list_del(&sums->list); in csum_exist_in_range()
1750 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); in fallback_to_cow()
1751 const u64 range_bytes = end + 1 - start; in fallback_to_cow()
1752 struct extent_io_tree *io_tree = &inode->io_tree; in fallback_to_cow()
1793 struct btrfs_fs_info *fs_info = inode->root->fs_info; in fallback_to_cow()
1794 struct btrfs_space_info *sinfo = fs_info->data_sinfo; in fallback_to_cow()
1799 spin_lock(&sinfo->lock); in fallback_to_cow()
1801 spin_unlock(&sinfo->lock); in fallback_to_cow()
1845 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1857 struct extent_buffer *leaf = path->nodes[0]; in can_nocow_file_extent()
1858 struct btrfs_root *root = inode->root; in can_nocow_file_extent()
1864 bool nowait = path->nowait; in can_nocow_file_extent()
1866 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in can_nocow_file_extent()
1873 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); in can_nocow_file_extent()
1874 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); in can_nocow_file_extent()
1875 args->extent_offset = btrfs_file_extent_offset(leaf, fi); in can_nocow_file_extent()
1877 if (!(inode->flags & BTRFS_INODE_NODATACOW) && in can_nocow_file_extent()
1886 if (!args->strict && in can_nocow_file_extent()
1888 btrfs_root_last_snapshot(&root->root_item)) in can_nocow_file_extent()
1892 if (args->disk_bytenr == 0) in can_nocow_file_extent()
1911 key->offset - args->extent_offset, in can_nocow_file_extent()
1912 args->disk_bytenr, args->strict, path); in can_nocow_file_extent()
1917 if (args->free_path) { in can_nocow_file_extent()
1921 * another path. So free the path to avoid unnecessary extra in can_nocow_file_extent()
1929 if (args->writeback_path && !is_freespace_inode && in can_nocow_file_extent()
1930 atomic_read(&root->snapshot_force_cow)) in can_nocow_file_extent()
1933 args->disk_bytenr += args->extent_offset; in can_nocow_file_extent()
1934 args->disk_bytenr += args->start - key->offset; in can_nocow_file_extent()
1935 args->num_bytes = min(args->end + 1, extent_end) - args->start; in can_nocow_file_extent()
1941 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, in can_nocow_file_extent()
1949 if (args->free_path && path) in can_nocow_file_extent()
1966 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_nocow()
1967 struct btrfs_root *root = inode->root; in run_delalloc_nocow()
1969 u64 cow_start = (u64)-1; in run_delalloc_nocow()
1985 ret = -ENOMEM; in run_delalloc_nocow()
2014 if (ret > 0 && path->slots[0] > 0 && check_prev) { in run_delalloc_nocow()
2015 leaf = path->nodes[0]; in run_delalloc_nocow()
2017 path->slots[0] - 1); in run_delalloc_nocow()
2020 path->slots[0]--; in run_delalloc_nocow()
2025 leaf = path->nodes[0]; in run_delalloc_nocow()
2026 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in run_delalloc_nocow()
2032 leaf = path->nodes[0]; in run_delalloc_nocow()
2035 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in run_delalloc_nocow()
2046 path->slots[0]++; in run_delalloc_nocow()
2069 fi = btrfs_item_ptr(leaf, path->slots[0], in run_delalloc_nocow()
2075 ret = -EUCLEAN; in run_delalloc_nocow()
2086 path->slots[0]++; in run_delalloc_nocow()
2108 if (cow_start == (u64)-1) in run_delalloc_nocow()
2113 if (!path->nodes[0]) in run_delalloc_nocow()
2115 path->slots[0]++; in run_delalloc_nocow()
2120 * COW range from cow_start to found_key.offset - 1. As the key in run_delalloc_nocow()
2124 if (cow_start != (u64)-1) { in run_delalloc_nocow()
2126 cow_start, found_key.offset - 1); in run_delalloc_nocow()
2127 cow_start = (u64)-1; in run_delalloc_nocow()
2134 nocow_end = cur_offset + nocow_args.num_bytes - 1; in run_delalloc_nocow()
2137 u64 orig_start = found_key.offset - nocow_args.extent_offset; in run_delalloc_nocow()
2201 if (cur_offset <= end && cow_start == (u64)-1) in run_delalloc_nocow()
2204 if (cow_start != (u64)-1) { in run_delalloc_nocow()
2207 cow_start = (u64)-1; in run_delalloc_nocow()
2221 if (cow_start != (u64)-1) in run_delalloc_nocow()
2236 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { in should_nocow()
2237 if (inode->defrag_bytes && in should_nocow()
2238 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, in should_nocow()
2253 const bool zoned = btrfs_is_zoned(inode->root->fs_info); in btrfs_run_delalloc_range()
2283 end - start + 1); in btrfs_run_delalloc_range()
2290 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_split_delalloc_extent()
2294 if (!(orig->state & EXTENT_DELALLOC)) in btrfs_split_delalloc_extent()
2297 size = orig->end - orig->start + 1; in btrfs_split_delalloc_extent()
2298 if (size > fs_info->max_extent_size) { in btrfs_split_delalloc_extent()
2306 new_size = orig->end - split + 1; in btrfs_split_delalloc_extent()
2308 new_size = split - orig->start; in btrfs_split_delalloc_extent()
2314 spin_lock(&inode->lock); in btrfs_split_delalloc_extent()
2316 spin_unlock(&inode->lock); in btrfs_split_delalloc_extent()
2327 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_merge_delalloc_extent()
2332 if (!(other->state & EXTENT_DELALLOC)) in btrfs_merge_delalloc_extent()
2335 if (new->start > other->start) in btrfs_merge_delalloc_extent()
2336 new_size = new->end - other->start + 1; in btrfs_merge_delalloc_extent()
2338 new_size = other->end - new->start + 1; in btrfs_merge_delalloc_extent()
2341 if (new_size <= fs_info->max_extent_size) { in btrfs_merge_delalloc_extent()
2342 spin_lock(&inode->lock); in btrfs_merge_delalloc_extent()
2343 btrfs_mod_outstanding_extents(inode, -1); in btrfs_merge_delalloc_extent()
2344 spin_unlock(&inode->lock); in btrfs_merge_delalloc_extent()
2366 old_size = other->end - other->start + 1; in btrfs_merge_delalloc_extent()
2368 old_size = new->end - new->start + 1; in btrfs_merge_delalloc_extent()
2373 spin_lock(&inode->lock); in btrfs_merge_delalloc_extent()
2374 btrfs_mod_outstanding_extents(inode, -1); in btrfs_merge_delalloc_extent()
2375 spin_unlock(&inode->lock); in btrfs_merge_delalloc_extent()
2381 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_add_delalloc_inodes()
2383 spin_lock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
2384 if (list_empty(&inode->delalloc_inodes)) { in btrfs_add_delalloc_inodes()
2385 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); in btrfs_add_delalloc_inodes()
2386 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); in btrfs_add_delalloc_inodes()
2387 root->nr_delalloc_inodes++; in btrfs_add_delalloc_inodes()
2388 if (root->nr_delalloc_inodes == 1) { in btrfs_add_delalloc_inodes()
2389 spin_lock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
2390 BUG_ON(!list_empty(&root->delalloc_root)); in btrfs_add_delalloc_inodes()
2391 list_add_tail(&root->delalloc_root, in btrfs_add_delalloc_inodes()
2392 &fs_info->delalloc_roots); in btrfs_add_delalloc_inodes()
2393 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
2396 spin_unlock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
2402 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_del_delalloc_inode()
2404 if (!list_empty(&inode->delalloc_inodes)) { in __btrfs_del_delalloc_inode()
2405 list_del_init(&inode->delalloc_inodes); in __btrfs_del_delalloc_inode()
2407 &inode->runtime_flags); in __btrfs_del_delalloc_inode()
2408 root->nr_delalloc_inodes--; in __btrfs_del_delalloc_inode()
2409 if (!root->nr_delalloc_inodes) { in __btrfs_del_delalloc_inode()
2410 ASSERT(list_empty(&root->delalloc_inodes)); in __btrfs_del_delalloc_inode()
2411 spin_lock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
2412 BUG_ON(list_empty(&root->delalloc_root)); in __btrfs_del_delalloc_inode()
2413 list_del_init(&root->delalloc_root); in __btrfs_del_delalloc_inode()
2414 spin_unlock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
2422 spin_lock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
2424 spin_unlock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
2434 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_set_delalloc_extent()
2443 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { in btrfs_set_delalloc_extent()
2444 struct btrfs_root *root = inode->root; in btrfs_set_delalloc_extent()
2445 u64 len = state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2449 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2451 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2457 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, in btrfs_set_delalloc_extent()
2458 fs_info->delalloc_batch); in btrfs_set_delalloc_extent()
2459 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2460 inode->delalloc_bytes += len; in btrfs_set_delalloc_extent()
2462 inode->defrag_bytes += len; in btrfs_set_delalloc_extent()
2464 &inode->runtime_flags)) in btrfs_set_delalloc_extent()
2466 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2469 if (!(state->state & EXTENT_DELALLOC_NEW) && in btrfs_set_delalloc_extent()
2471 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2472 inode->new_delalloc_bytes += state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2473 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2484 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_clear_delalloc_extent()
2485 u64 len = state->end + 1 - state->start; in btrfs_clear_delalloc_extent()
2488 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { in btrfs_clear_delalloc_extent()
2489 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2490 inode->defrag_bytes -= len; in btrfs_clear_delalloc_extent()
2491 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2499 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { in btrfs_clear_delalloc_extent()
2500 struct btrfs_root *root = inode->root; in btrfs_clear_delalloc_extent()
2503 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2504 btrfs_mod_outstanding_extents(inode, -num_extents); in btrfs_clear_delalloc_extent()
2505 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2513 root != fs_info->tree_root) in btrfs_clear_delalloc_extent()
2521 do_list && !(state->state & EXTENT_NORESERVE) && in btrfs_clear_delalloc_extent()
2525 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, in btrfs_clear_delalloc_extent()
2526 fs_info->delalloc_batch); in btrfs_clear_delalloc_extent()
2527 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2528 inode->delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2529 if (do_list && inode->delalloc_bytes == 0 && in btrfs_clear_delalloc_extent()
2531 &inode->runtime_flags)) in btrfs_clear_delalloc_extent()
2533 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2536 if ((state->state & EXTENT_DELALLOC_NEW) && in btrfs_clear_delalloc_extent()
2538 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2539 ASSERT(inode->new_delalloc_bytes >= len); in btrfs_clear_delalloc_extent()
2540 inode->new_delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2542 inode_add_bytes(&inode->vfs_inode, len); in btrfs_clear_delalloc_extent()
2543 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2550 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_extract_ordered_extent()
2551 u64 len = bbio->bio.bi_iter.bi_size; in btrfs_extract_ordered_extent()
2556 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) in btrfs_extract_ordered_extent()
2557 return -EINVAL; in btrfs_extract_ordered_extent()
2560 if (ordered->disk_num_bytes == len) { in btrfs_extract_ordered_extent()
2561 refcount_inc(&ordered->refs); in btrfs_extract_ordered_extent()
2562 bbio->ordered = ordered; in btrfs_extract_ordered_extent()
2568 * a pre-existing one. in btrfs_extract_ordered_extent()
2570 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { in btrfs_extract_ordered_extent()
2571 ret = split_extent_map(bbio->inode, bbio->file_offset, in btrfs_extract_ordered_extent()
2572 ordered->num_bytes, len, in btrfs_extract_ordered_extent()
2573 ordered->disk_bytenr); in btrfs_extract_ordered_extent()
2581 bbio->ordered = new; in btrfs_extract_ordered_extent()
2597 trans->adding_csums = true; in add_pending_csums()
2599 csum_root = btrfs_csum_root(trans->fs_info, in add_pending_csums()
2600 sum->logical); in add_pending_csums()
2602 trans->adding_csums = false; in add_pending_csums()
2615 const u64 end = start + len - 1; in btrfs_find_new_delalloc_bytes()
2618 const u64 search_len = end - search_start + 1; in btrfs_find_new_delalloc_bytes()
2627 if (em->block_start != EXTENT_MAP_HOLE) in btrfs_find_new_delalloc_bytes()
2630 em_len = em->len; in btrfs_find_new_delalloc_bytes()
2631 if (em->start < search_start) in btrfs_find_new_delalloc_bytes()
2632 em_len -= search_start - em->start; in btrfs_find_new_delalloc_bytes()
2636 ret = set_extent_bit(&inode->io_tree, search_start, in btrfs_find_new_delalloc_bytes()
2637 search_start + em_len - 1, in btrfs_find_new_delalloc_bytes()
2654 if (start >= i_size_read(&inode->vfs_inode) && in btrfs_set_extent_delalloc()
2655 !(inode->flags & BTRFS_INODE_PREALLOC)) { in btrfs_set_extent_delalloc()
2665 end + 1 - start, in btrfs_set_extent_delalloc()
2671 return set_extent_bit(&inode->io_tree, start, end, in btrfs_set_extent_delalloc()
2689 struct page *page = fixup->page; in btrfs_writepage_fixup_worker()
2690 struct btrfs_inode *inode = fixup->inode; in btrfs_writepage_fixup_worker()
2691 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_writepage_fixup_worker()
2693 u64 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker()
2708 * page->mapping may go NULL, but it shouldn't be moved to a different in btrfs_writepage_fixup_worker()
2711 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { in btrfs_writepage_fixup_worker()
2746 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2754 unlock_extent(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2781 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2788 mapping_set_error(page->mapping, ret); in btrfs_writepage_fixup_worker()
2819 struct inode *inode = page->mapping->host; in btrfs_writepage_cow_fixup()
2820 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_writepage_cow_fixup()
2835 return -EAGAIN; in btrfs_writepage_cow_fixup()
2839 return -EAGAIN; in btrfs_writepage_cow_fixup()
2845 * page->mapping outside of the page lock. in btrfs_writepage_cow_fixup()
2850 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); in btrfs_writepage_cow_fixup()
2851 fixup->page = page; in btrfs_writepage_cow_fixup()
2852 fixup->inode = BTRFS_I(inode); in btrfs_writepage_cow_fixup()
2853 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); in btrfs_writepage_cow_fixup()
2855 return -EAGAIN; in btrfs_writepage_cow_fixup()
2864 struct btrfs_root *root = inode->root; in insert_reserved_file_extent()
2865 const u64 sectorsize = root->fs_info->sectorsize; in insert_reserved_file_extent()
2879 return -ENOMEM; in insert_reserved_file_extent()
2909 leaf = path->nodes[0]; in insert_reserved_file_extent()
2910 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); in insert_reserved_file_extent()
2912 btrfs_item_ptr_offset(leaf, path->slots[0]), in insert_reserved_file_extent()
2928 inline_size = drop_args.bytes_found - inline_size; in insert_reserved_file_extent()
2930 drop_args.bytes_found -= inline_size; in insert_reserved_file_extent()
2931 num_bytes -= sectorsize; in insert_reserved_file_extent()
2946 file_pos - offset, in insert_reserved_file_extent()
2962 spin_lock(&cache->lock); in btrfs_release_delalloc_bytes()
2963 cache->delalloc_bytes -= len; in btrfs_release_delalloc_bytes()
2964 spin_unlock(&cache->lock); in btrfs_release_delalloc_bytes()
2970 struct btrfs_ordered_extent *oe) in insert_ordered_extent_file_extent() argument
2974 u64 num_bytes = oe->num_bytes; in insert_ordered_extent_file_extent()
2975 u64 ram_bytes = oe->ram_bytes; in insert_ordered_extent_file_extent()
2979 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); in insert_ordered_extent_file_extent()
2981 oe->disk_num_bytes); in insert_ordered_extent_file_extent()
2982 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); in insert_ordered_extent_file_extent()
2983 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { in insert_ordered_extent_file_extent()
2984 num_bytes = oe->truncated_len; in insert_ordered_extent_file_extent()
2989 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); in insert_ordered_extent_file_extent()
2998 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || in insert_ordered_extent_file_extent()
2999 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || in insert_ordered_extent_file_extent()
3000 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); in insert_ordered_extent_file_extent()
3002 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), in insert_ordered_extent_file_extent()
3003 oe->file_offset, &stack_fi, in insert_ordered_extent_file_extent()
3004 update_inode_bytes, oe->qgroup_rsv); in insert_ordered_extent_file_extent()
3014 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); in btrfs_finish_one_ordered()
3015 struct btrfs_root *root = inode->root; in btrfs_finish_one_ordered()
3016 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_finish_one_ordered()
3018 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_finish_one_ordered()
3023 u64 logical_len = ordered_extent->num_bytes; in btrfs_finish_one_ordered()
3029 start = ordered_extent->file_offset; in btrfs_finish_one_ordered()
3030 end = start + ordered_extent->num_bytes - 1; in btrfs_finish_one_ordered()
3032 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3033 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3034 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3035 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3042 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3043 ret = -EIO; in btrfs_finish_one_ordered()
3048 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3049 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3051 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3053 logical_len = ordered_extent->truncated_len; in btrfs_finish_one_ordered()
3059 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3060 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ in btrfs_finish_one_ordered()
3072 trans->block_rsv = &inode->block_rsv; in btrfs_finish_one_ordered()
3074 if (ret) /* -ENOMEM or corruption */ in btrfs_finish_one_ordered()
3092 trans->block_rsv = &inode->block_rsv; in btrfs_finish_one_ordered()
3094 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3095 compress_type = ordered_extent->compress_type; in btrfs_finish_one_ordered()
3096 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3099 ordered_extent->file_offset, in btrfs_finish_one_ordered()
3100 ordered_extent->file_offset + in btrfs_finish_one_ordered()
3102 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3103 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3105 BUG_ON(root == fs_info->tree_root); in btrfs_finish_one_ordered()
3110 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3111 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3114 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, in btrfs_finish_one_ordered()
3115 ordered_extent->num_bytes, trans->transid); in btrfs_finish_one_ordered()
3121 ret = add_pending_csums(trans, &ordered_extent->list); in btrfs_finish_one_ordered()
3133 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3134 clear_extent_bit(&inode->io_tree, start, end, in btrfs_finish_one_ordered()
3140 if (ret) { /* -ENOMEM or corruption */ in btrfs_finish_one_ordered()
3146 clear_extent_bit(&inode->io_tree, start, end, clear_bits, in btrfs_finish_one_ordered()
3164 &ordered_extent->flags)) in btrfs_finish_one_ordered()
3165 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); in btrfs_finish_one_ordered()
3178 * writepage where we do ASSERT(em->block_start != in btrfs_finish_one_ordered()
3201 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3202 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3209 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3210 ordered_extent->disk_num_bytes, in btrfs_finish_one_ordered()
3213 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3214 ordered_extent->disk_num_bytes, 1); in btrfs_finish_one_ordered()
3219 btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid, in btrfs_finish_one_ordered()
3220 ordered_extent->qgroup_rsv, in btrfs_finish_one_ordered()
3241 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && in btrfs_finish_ordered_io()
3242 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_finish_ordered_io()
3248 * Verify the checksum for a single sector without any extra action that depend
3254 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in btrfs_check_sector_csum()
3257 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); in btrfs_check_sector_csum()
3259 shash->tfm = fs_info->csum_shash; in btrfs_check_sector_csum()
3262 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); in btrfs_check_sector_csum()
3265 if (memcmp(csum, csum_expected, fs_info->csum_size)) in btrfs_check_sector_csum()
3266 return -EIO; in btrfs_check_sector_csum()
3286 struct btrfs_inode *inode = bbio->inode; in btrfs_data_csum_ok()
3287 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_data_csum_ok()
3288 u64 file_offset = bbio->file_offset + bio_offset; in btrfs_data_csum_ok()
3289 u64 end = file_offset + bv->bv_len - 1; in btrfs_data_csum_ok()
3293 ASSERT(bv->bv_len == fs_info->sectorsize); in btrfs_data_csum_ok()
3295 if (!bbio->csum) in btrfs_data_csum_ok()
3298 if (btrfs_is_data_reloc_root(inode->root) && in btrfs_data_csum_ok()
3299 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, in btrfs_data_csum_ok()
3302 clear_extent_bits(&inode->io_tree, file_offset, end, in btrfs_data_csum_ok()
3307 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * in btrfs_data_csum_ok()
3308 fs_info->csum_size; in btrfs_data_csum_ok()
3309 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, in btrfs_data_csum_ok()
3316 bbio->mirror_num); in btrfs_data_csum_ok()
3324 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3335 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_add_delayed_iput()
3338 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) in btrfs_add_delayed_iput()
3341 atomic_inc(&fs_info->nr_delayed_iputs); in btrfs_add_delayed_iput()
3344 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq in btrfs_add_delayed_iput()
3347 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); in btrfs_add_delayed_iput()
3348 ASSERT(list_empty(&inode->delayed_iput)); in btrfs_add_delayed_iput()
3349 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); in btrfs_add_delayed_iput()
3350 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); in btrfs_add_delayed_iput()
3351 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) in btrfs_add_delayed_iput()
3352 wake_up_process(fs_info->cleaner_kthread); in btrfs_add_delayed_iput()
3358 list_del_init(&inode->delayed_iput); in run_delayed_iput_locked()
3359 spin_unlock_irq(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
3360 iput(&inode->vfs_inode); in run_delayed_iput_locked()
3361 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) in run_delayed_iput_locked()
3362 wake_up(&fs_info->delayed_iputs_wait); in run_delayed_iput_locked()
3363 spin_lock_irq(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
3369 if (!list_empty(&inode->delayed_iput)) { in btrfs_run_delayed_iput()
3370 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
3371 if (!list_empty(&inode->delayed_iput)) in btrfs_run_delayed_iput()
3373 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
3382 * fs_info->delayed_iput_lock. So we need to disable irqs here to in btrfs_run_delayed_iputs()
3385 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3386 while (!list_empty(&fs_info->delayed_iputs)) { in btrfs_run_delayed_iputs()
3389 inode = list_first_entry(&fs_info->delayed_iputs, in btrfs_run_delayed_iputs()
3393 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3395 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3398 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3415 int ret = wait_event_killable(fs_info->delayed_iputs_wait, in btrfs_wait_on_delayed_iputs()
3416 atomic_read(&fs_info->nr_delayed_iputs) == 0); in btrfs_wait_on_delayed_iputs()
3418 return -EINTR; in btrfs_wait_on_delayed_iputs()
3431 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_add()
3432 if (ret && ret != -EEXIST) { in btrfs_orphan_add()
3447 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_del()
3456 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_orphan_cleanup()
3465 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) in btrfs_orphan_cleanup()
3470 ret = -ENOMEM; in btrfs_orphan_cleanup()
3473 path->reada = READA_BACK; in btrfs_orphan_cleanup()
3477 key.offset = (u64)-1; in btrfs_orphan_cleanup()
3491 if (path->slots[0] == 0) in btrfs_orphan_cleanup()
3493 path->slots[0]--; in btrfs_orphan_cleanup()
3497 leaf = path->nodes[0]; in btrfs_orphan_cleanup()
3498 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_orphan_cleanup()
3520 * due to -ENOSPC for example, so try to grab the error in btrfs_orphan_cleanup()
3525 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; in btrfs_orphan_cleanup()
3534 inode = btrfs_iget(fs_info->sb, last_objectid, root); in btrfs_orphan_cleanup()
3538 if (ret != -ENOENT) in btrfs_orphan_cleanup()
3542 if (!inode && root == fs_info->tree_root) { in btrfs_orphan_cleanup()
3557 * fs_info->fs_roots_radix. So here we can find if an in btrfs_orphan_cleanup()
3562 spin_lock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3563 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, in btrfs_orphan_cleanup()
3565 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) in btrfs_orphan_cleanup()
3567 spin_unlock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3571 key.offset = found_key.objectid - 1; in btrfs_orphan_cleanup()
3594 * only if this filesystem was last used on a pre-v3.12 kernel in btrfs_orphan_cleanup()
3603 if (!inode || inode->i_nlink) { in btrfs_orphan_cleanup()
3634 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { in btrfs_orphan_cleanup()
3674 *first_xattr_slot = -1; in acls_after_inode_item()
3684 if (*first_xattr_slot == -1) in acls_after_inode_item()
3714 if (*first_xattr_slot == -1) in acls_after_inode_item()
3720 * read an inode from the btree into the in-memory inode
3725 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_read_locked_inode()
3729 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_read_locked_inode()
3745 return -ENOMEM; in btrfs_read_locked_inode()
3748 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); in btrfs_read_locked_inode()
3757 leaf = path->nodes[0]; in btrfs_read_locked_inode()
3762 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_read_locked_inode()
3764 inode->i_mode = btrfs_inode_mode(leaf, inode_item); in btrfs_read_locked_inode()
3770 round_up(i_size_read(inode), fs_info->sectorsize)); in btrfs_read_locked_inode()
3772 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3773 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3775 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3776 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3778 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), in btrfs_read_locked_inode()
3779 btrfs_timespec_nsec(leaf, &inode_item->ctime)); in btrfs_read_locked_inode()
3781 BTRFS_I(inode)->i_otime.tv_sec = in btrfs_read_locked_inode()
3782 btrfs_timespec_sec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3783 BTRFS_I(inode)->i_otime.tv_nsec = in btrfs_read_locked_inode()
3784 btrfs_timespec_nsec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3787 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); in btrfs_read_locked_inode()
3788 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); in btrfs_read_locked_inode()
3792 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_read_locked_inode()
3793 inode->i_rdev = 0; in btrfs_read_locked_inode()
3796 BTRFS_I(inode)->index_cnt = (u64)-1; in btrfs_read_locked_inode()
3798 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); in btrfs_read_locked_inode()
3803 * and then re-read we need to do a full sync since we don't have any in btrfs_read_locked_inode()
3807 * This is required for both inode re-read from disk and delayed inode in btrfs_read_locked_inode()
3810 if (BTRFS_I(inode)->last_trans == fs_info->generation) in btrfs_read_locked_inode()
3812 &BTRFS_I(inode)->runtime_flags); in btrfs_read_locked_inode()
3827 * xfs_io -c fsync mydir/foo in btrfs_read_locked_inode()
3841 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3849 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3851 path->slots[0]++; in btrfs_read_locked_inode()
3852 if (inode->i_nlink != 1 || in btrfs_read_locked_inode()
3853 path->slots[0] >= btrfs_header_nritems(leaf)) in btrfs_read_locked_inode()
3856 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); in btrfs_read_locked_inode()
3860 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_read_locked_inode()
3865 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); in btrfs_read_locked_inode()
3870 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, in btrfs_read_locked_inode()
3878 maybe_acls = acls_after_inode_item(leaf, path->slots[0], in btrfs_read_locked_inode()
3880 if (first_xattr_slot != -1) { in btrfs_read_locked_inode()
3881 path->slots[0] = first_xattr_slot; in btrfs_read_locked_inode()
3887 root->root_key.objectid, ret); in btrfs_read_locked_inode()
3895 switch (inode->i_mode & S_IFMT) { in btrfs_read_locked_inode()
3897 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3898 inode->i_fop = &btrfs_file_operations; in btrfs_read_locked_inode()
3899 inode->i_op = &btrfs_file_inode_operations; in btrfs_read_locked_inode()
3902 inode->i_fop = &btrfs_dir_file_operations; in btrfs_read_locked_inode()
3903 inode->i_op = &btrfs_dir_inode_operations; in btrfs_read_locked_inode()
3906 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_read_locked_inode()
3908 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3911 inode->i_op = &btrfs_special_inode_operations; in btrfs_read_locked_inode()
3912 init_special_inode(inode, inode->i_mode, rdev); in btrfs_read_locked_inode()
3935 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); in fill_inode_item()
3936 btrfs_set_token_inode_mode(&token, item, inode->i_mode); in fill_inode_item()
3937 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); in fill_inode_item()
3939 btrfs_set_token_timespec_sec(&token, &item->atime, in fill_inode_item()
3940 inode->i_atime.tv_sec); in fill_inode_item()
3941 btrfs_set_token_timespec_nsec(&token, &item->atime, in fill_inode_item()
3942 inode->i_atime.tv_nsec); in fill_inode_item()
3944 btrfs_set_token_timespec_sec(&token, &item->mtime, in fill_inode_item()
3945 inode->i_mtime.tv_sec); in fill_inode_item()
3946 btrfs_set_token_timespec_nsec(&token, &item->mtime, in fill_inode_item()
3947 inode->i_mtime.tv_nsec); in fill_inode_item()
3949 btrfs_set_token_timespec_sec(&token, &item->ctime, in fill_inode_item()
3951 btrfs_set_token_timespec_nsec(&token, &item->ctime, in fill_inode_item()
3954 btrfs_set_token_timespec_sec(&token, &item->otime, in fill_inode_item()
3955 BTRFS_I(inode)->i_otime.tv_sec); in fill_inode_item()
3956 btrfs_set_token_timespec_nsec(&token, &item->otime, in fill_inode_item()
3957 BTRFS_I(inode)->i_otime.tv_nsec); in fill_inode_item()
3961 BTRFS_I(inode)->generation); in fill_inode_item()
3963 btrfs_set_token_inode_transid(&token, item, trans->transid); in fill_inode_item()
3964 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); in fill_inode_item()
3965 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, in fill_inode_item()
3966 BTRFS_I(inode)->ro_flags); in fill_inode_item()
3972 * copy everything in the in-memory inode into the btree.
3985 return -ENOMEM; in btrfs_update_inode_item()
3987 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); in btrfs_update_inode_item()
3990 ret = -ENOENT; in btrfs_update_inode_item()
3994 leaf = path->nodes[0]; in btrfs_update_inode_item()
3995 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_update_inode_item()
3998 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); in btrfs_update_inode_item()
4008 * copy everything in the in-memory inode into the btree.
4014 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_inode()
4022 * without delay in btrfs_update_inode()
4026 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { in btrfs_update_inode()
4044 if (ret == -ENOSPC) in btrfs_update_inode_fallback()
4060 struct btrfs_root *root = dir->root; in __btrfs_unlink_inode()
4061 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_unlink_inode()
4071 ret = -ENOMEM; in __btrfs_unlink_inode()
4075 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); in __btrfs_unlink_inode()
4077 ret = di ? PTR_ERR(di) : -ENOENT; in __btrfs_unlink_inode()
4092 * that we delay to delete it, and just do this deletion when in __btrfs_unlink_inode()
4095 if (inode->dir_index) { in __btrfs_unlink_inode()
4098 index = inode->dir_index; in __btrfs_unlink_inode()
4107 name->len, name->name, ino, dir_ino); in __btrfs_unlink_inode()
4113 rename_ctx->index = index; in __btrfs_unlink_inode()
4124 * Besides that, doing it here would only cause extra unnecessary btree in __btrfs_unlink_inode()
4134 * being run in btrfs-cleaner context. If we have enough of these built in __btrfs_unlink_inode()
4135 * up we can end up burning a lot of time in btrfs-cleaner without any in __btrfs_unlink_inode()
4147 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); in __btrfs_unlink_inode()
4148 inode_inc_iversion(&inode->vfs_inode); in __btrfs_unlink_inode()
4149 inode_inc_iversion(&dir->vfs_inode); in __btrfs_unlink_inode()
4150 inode_set_ctime_current(&inode->vfs_inode); in __btrfs_unlink_inode()
4151 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); in __btrfs_unlink_inode()
4165 drop_nlink(&inode->vfs_inode); in btrfs_unlink_inode()
4166 ret = btrfs_update_inode(trans, inode->root, inode); in btrfs_unlink_inode()
4181 struct btrfs_root *root = dir->root; in __unlink_start_trans()
4194 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); in btrfs_unlink()
4198 /* This needs to handle no-key deletions later on */ in btrfs_unlink()
4214 if (inode->i_nlink == 0) { in btrfs_unlink()
4222 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); in btrfs_unlink()
4231 struct btrfs_root *root = dir->root; in btrfs_unlink_subvol()
4243 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); in btrfs_unlink_subvol()
4247 /* This needs to handle no-key deletions later on */ in btrfs_unlink_subvol()
4250 objectid = inode->root->root_key.objectid; in btrfs_unlink_subvol()
4252 objectid = inode->location.objectid; in btrfs_unlink_subvol()
4256 return -EINVAL; in btrfs_unlink_subvol()
4261 ret = -ENOMEM; in btrfs_unlink_subvol()
4266 &fname.disk_name, -1); in btrfs_unlink_subvol()
4268 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_unlink_subvol()
4272 leaf = path->nodes[0]; in btrfs_unlink_subvol()
4286 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. in btrfs_unlink_subvol()
4295 ret = -ENOENT; in btrfs_unlink_subvol()
4302 leaf = path->nodes[0]; in btrfs_unlink_subvol()
4303 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_unlink_subvol()
4308 root->root_key.objectid, dir_ino, in btrfs_unlink_subvol()
4322 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); in btrfs_unlink_subvol()
4323 inode_inc_iversion(&dir->vfs_inode); in btrfs_unlink_subvol()
4324 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); in btrfs_unlink_subvol()
4340 struct btrfs_fs_info *fs_info = root->fs_info; in may_destroy_subvol()
4350 return -ENOMEM; in may_destroy_subvol()
4353 dir_id = btrfs_super_root_dir(fs_info->super_copy); in may_destroy_subvol()
4354 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, in may_destroy_subvol()
4357 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); in may_destroy_subvol()
4358 if (key.objectid == root->root_key.objectid) { in may_destroy_subvol()
4359 ret = -EPERM; in may_destroy_subvol()
4368 key.objectid = root->root_key.objectid; in may_destroy_subvol()
4370 key.offset = (u64)-1; in may_destroy_subvol()
4372 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in may_destroy_subvol()
4378 if (path->slots[0] > 0) { in may_destroy_subvol()
4379 path->slots[0]--; in may_destroy_subvol()
4380 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in may_destroy_subvol()
4381 if (key.objectid == root->root_key.objectid && in may_destroy_subvol()
4383 ret = -ENOTEMPTY; in may_destroy_subvol()
4393 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_prune_dentries()
4401 WARN_ON(btrfs_root_refs(&root->root_item) != 0); in btrfs_prune_dentries()
4403 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
4405 node = root->inode_tree.rb_node; in btrfs_prune_dentries()
4412 node = node->rb_left; in btrfs_prune_dentries()
4414 node = node->rb_right; in btrfs_prune_dentries()
4431 inode = igrab(&entry->vfs_inode); in btrfs_prune_dentries()
4433 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
4434 if (atomic_read(&inode->i_count) > 1) in btrfs_prune_dentries()
4442 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
4446 if (cond_resched_lock(&root->inode_lock)) in btrfs_prune_dentries()
4451 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
4456 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); in btrfs_delete_subvolume()
4457 struct btrfs_root *root = dir->root; in btrfs_delete_subvolume()
4459 struct btrfs_root *dest = BTRFS_I(inode)->root; in btrfs_delete_subvolume()
4465 down_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4472 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4473 if (dest->send_in_progress) { in btrfs_delete_subvolume()
4474 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4477 dest->root_key.objectid); in btrfs_delete_subvolume()
4478 ret = -EPERM; in btrfs_delete_subvolume()
4481 if (atomic_read(&dest->nr_swapfiles)) { in btrfs_delete_subvolume()
4482 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4485 root->root_key.objectid); in btrfs_delete_subvolume()
4486 ret = -EPERM; in btrfs_delete_subvolume()
4489 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4490 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4492 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4513 trans->block_rsv = &block_rsv; in btrfs_delete_subvolume()
4514 trans->bytes_reserved = block_rsv.size; in btrfs_delete_subvolume()
4530 memset(&dest->root_item.drop_progress, 0, in btrfs_delete_subvolume()
4531 sizeof(dest->root_item.drop_progress)); in btrfs_delete_subvolume()
4532 btrfs_set_root_drop_level(&dest->root_item, 0); in btrfs_delete_subvolume()
4533 btrfs_set_root_refs(&dest->root_item, 0); in btrfs_delete_subvolume()
4535 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { in btrfs_delete_subvolume()
4537 fs_info->tree_root, in btrfs_delete_subvolume()
4538 dest->root_key.objectid); in btrfs_delete_subvolume()
4545 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, in btrfs_delete_subvolume()
4547 dest->root_key.objectid); in btrfs_delete_subvolume()
4548 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4552 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { in btrfs_delete_subvolume()
4554 dest->root_item.received_uuid, in btrfs_delete_subvolume()
4556 dest->root_key.objectid); in btrfs_delete_subvolume()
4557 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4563 free_anon_bdev(dest->anon_dev); in btrfs_delete_subvolume()
4564 dest->anon_dev = 0; in btrfs_delete_subvolume()
4566 trans->block_rsv = NULL; in btrfs_delete_subvolume()
4567 trans->bytes_reserved = 0; in btrfs_delete_subvolume()
4569 inode->i_flags |= S_DEAD; in btrfs_delete_subvolume()
4574 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4575 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4576 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4578 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4581 up_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4585 ASSERT(dest->send_in_progress == 0); in btrfs_delete_subvolume()
4594 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_rmdir()
4600 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rmdir()
4601 return -ENOTEMPTY; in btrfs_rmdir()
4606 return -EOPNOTSUPP; in btrfs_rmdir()
4611 err = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); in btrfs_rmdir()
4615 /* This needs to handle no-key deletions later on */ in btrfs_rmdir()
4632 last_unlink_trans = BTRFS_I(inode)->last_unlink_trans; in btrfs_rmdir()
4650 if (last_unlink_trans >= trans->transid) in btrfs_rmdir()
4651 BTRFS_I(dir)->last_unlink_trans = last_unlink_trans; in btrfs_rmdir()
4663 * btrfs_truncate_block - read, zero a chunk and write a block
4664 * @inode - inode that we're zeroing
4665 * @from - the offset to start zeroing
4666 * @len - the length to zero, 0 to zero the entire range respective to the
4668 * @front - zero up to the offset instead of from the offset on
4676 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_truncate_block()
4677 struct address_space *mapping = inode->vfs_inode.i_mapping; in btrfs_truncate_block()
4678 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_truncate_block()
4683 u32 blocksize = fs_info->sectorsize; in btrfs_truncate_block()
4685 unsigned offset = from & (blocksize - 1); in btrfs_truncate_block()
4698 block_end = block_start + blocksize - 1; in btrfs_truncate_block()
4723 ret = -ENOMEM; in btrfs_truncate_block()
4730 if (page->mapping != mapping) { in btrfs_truncate_block()
4736 ret = -EIO; in btrfs_truncate_block()
4742 * We unlock the page after the io is completed and then re-lock it in btrfs_truncate_block()
4765 clear_extent_bit(&inode->io_tree, block_start, block_end, in btrfs_truncate_block()
4778 len = blocksize - offset; in btrfs_truncate_block()
4780 memzero_page(page, (block_start - page_offset(page)), in btrfs_truncate_block()
4783 memzero_page(page, (block_start - page_offset(page)) + offset, in btrfs_truncate_block()
4787 block_end + 1 - block_start); in btrfs_truncate_block()
4788 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); in btrfs_truncate_block()
4792 set_extent_bit(&inode->io_tree, block_start, block_end, in btrfs_truncate_block()
4816 struct btrfs_fs_info *fs_info = root->fs_info; in maybe_insert_hole()
4831 * 1 - for the one we're dropping in maybe_insert_hole()
4832 * 1 - for the one we're adding in maybe_insert_hole()
4833 * 1 - for updating the inode. in maybe_insert_hole()
4869 struct btrfs_root *root = inode->root; in btrfs_cont_expand()
4870 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_cont_expand()
4871 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_cont_expand()
4874 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); in btrfs_cont_expand()
4875 u64 block_end = ALIGN(size, fs_info->sectorsize); in btrfs_cont_expand()
4893 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, in btrfs_cont_expand()
4898 block_end - cur_offset); in btrfs_cont_expand()
4905 last_byte = ALIGN(last_byte, fs_info->sectorsize); in btrfs_cont_expand()
4906 hole_size = last_byte - cur_offset; in btrfs_cont_expand()
4908 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_cont_expand()
4924 cur_offset + hole_size - 1, in btrfs_cont_expand()
4929 hole_em->start = cur_offset; in btrfs_cont_expand()
4930 hole_em->len = hole_size; in btrfs_cont_expand()
4931 hole_em->orig_start = cur_offset; in btrfs_cont_expand()
4933 hole_em->block_start = EXTENT_MAP_HOLE; in btrfs_cont_expand()
4934 hole_em->block_len = 0; in btrfs_cont_expand()
4935 hole_em->orig_block_len = 0; in btrfs_cont_expand()
4936 hole_em->ram_bytes = hole_size; in btrfs_cont_expand()
4937 hole_em->compress_type = BTRFS_COMPRESS_NONE; in btrfs_cont_expand()
4938 hole_em->generation = fs_info->generation; in btrfs_cont_expand()
4956 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); in btrfs_cont_expand()
4962 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setsize()
4965 loff_t newsize = attr->ia_size; in btrfs_setsize()
4966 int mask = attr->ia_valid; in btrfs_setsize()
4978 inode->i_mtime = inode_set_ctime_current(inode); in btrfs_setsize()
4986 * state of this file - if the snapshot captures this expanding in btrfs_setsize()
4990 btrfs_drew_write_lock(&root->snapshot_lock); in btrfs_setsize()
4993 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
4999 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
5007 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
5010 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_setsize()
5014 ALIGN(newsize, fs_info->sectorsize), in btrfs_setsize()
5015 (u64)-1); in btrfs_setsize()
5027 &BTRFS_I(inode)->runtime_flags); in btrfs_setsize()
5034 if (ret && inode->i_nlink) { in btrfs_setsize()
5038 * Truncate failed, so fix up the in-memory size. We in btrfs_setsize()
5041 * in-memory size to match. in btrfs_setsize()
5043 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_setsize()
5046 i_size_write(inode, BTRFS_I(inode)->disk_i_size); in btrfs_setsize()
5057 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setattr()
5061 return -EROFS; in btrfs_setattr()
5067 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in btrfs_setattr()
5073 if (attr->ia_valid) { in btrfs_setattr()
5078 if (!err && attr->ia_valid & ATTR_MODE) in btrfs_setattr()
5079 err = posix_acl_chmod(idmap, dentry, inode->i_mode); in btrfs_setattr()
5100 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in evict_inode_truncate_pages()
5103 ASSERT(inode->i_state & I_FREEING); in evict_inode_truncate_pages()
5104 truncate_inode_pages_final(&inode->i_data); in evict_inode_truncate_pages()
5106 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); in evict_inode_truncate_pages()
5116 * queue kthread), inode references (inode->i_count) were not taken in evict_inode_truncate_pages()
5120 * reference count - if we don't do it, when they access the inode's in evict_inode_truncate_pages()
5122 * use-after-free issue. in evict_inode_truncate_pages()
5124 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5125 while (!RB_EMPTY_ROOT(&io_tree->state)) { in evict_inode_truncate_pages()
5132 node = rb_first(&io_tree->state); in evict_inode_truncate_pages()
5134 start = state->start; in evict_inode_truncate_pages()
5135 end = state->end; in evict_inode_truncate_pages()
5136 state_flags = state->state; in evict_inode_truncate_pages()
5137 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5151 end - start + 1, NULL); in evict_inode_truncate_pages()
5158 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5160 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5166 struct btrfs_fs_info *fs_info = root->fs_info; in evict_refill_and_join()
5178 * above. We reserve our extra bit here because we generate a ton of in evict_refill_and_join()
5182 * if we fail to make this reservation we can re-try without the in evict_refill_and_join()
5185 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, in evict_refill_and_join()
5188 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, in evict_refill_and_join()
5193 return ERR_PTR(-ENOSPC); in evict_refill_and_join()
5203 trans->block_rsv = &fs_info->trans_block_rsv; in evict_refill_and_join()
5204 trans->bytes_reserved = delayed_refs_extra; in evict_refill_and_join()
5205 btrfs_block_rsv_migrate(rsv, trans->block_rsv, in evict_refill_and_join()
5213 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_evict_inode()
5215 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_evict_inode()
5229 if (inode->i_nlink && in btrfs_evict_inode()
5230 ((btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5231 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || in btrfs_evict_inode()
5238 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) in btrfs_evict_inode()
5241 if (inode->i_nlink > 0) { in btrfs_evict_inode()
5242 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5243 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); in btrfs_evict_inode()
5266 rsv->size = btrfs_calc_metadata_size(fs_info, 1); in btrfs_evict_inode()
5267 rsv->failfast = true; in btrfs_evict_inode()
5283 trans->block_rsv = rsv; in btrfs_evict_inode()
5286 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5294 if (ret && ret != -ENOSPC && ret != -EAGAIN) in btrfs_evict_inode()
5311 trans->block_rsv = rsv; in btrfs_evict_inode()
5313 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5333 * If no dir entries were found, returns -ENOENT.
5334 * If found a corrupted location in dir entry, returns -EUCLEAN.
5341 struct btrfs_root *root = dir->root; in btrfs_inode_by_name()
5347 return -ENOMEM; in btrfs_inode_by_name()
5349 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); in btrfs_inode_by_name()
5358 /* This needs to handle no-key deletions later on */ in btrfs_inode_by_name()
5363 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_inode_by_name()
5367 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); in btrfs_inode_by_name()
5368 if (location->type != BTRFS_INODE_ITEM_KEY && in btrfs_inode_by_name()
5369 location->type != BTRFS_ROOT_ITEM_KEY) { in btrfs_inode_by_name()
5370 ret = -EUCLEAN; in btrfs_inode_by_name()
5371 btrfs_warn(root->fs_info, in btrfs_inode_by_name()
5374 location->objectid, location->type, location->offset); in btrfs_inode_by_name()
5377 *type = btrfs_dir_ftype(path->nodes[0], di); in btrfs_inode_by_name()
5404 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); in fixup_tree_root_location()
5410 err = -ENOMEM; in fixup_tree_root_location()
5414 err = -ENOENT; in fixup_tree_root_location()
5415 key.objectid = dir->root->root_key.objectid; in fixup_tree_root_location()
5417 key.offset = location->objectid; in fixup_tree_root_location()
5419 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in fixup_tree_root_location()
5426 leaf = path->nodes[0]; in fixup_tree_root_location()
5427 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); in fixup_tree_root_location()
5439 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); in fixup_tree_root_location()
5446 location->objectid = btrfs_root_dirid(&new_root->root_item); in fixup_tree_root_location()
5447 location->type = BTRFS_INODE_ITEM_KEY; in fixup_tree_root_location()
5448 location->offset = 0; in fixup_tree_root_location()
5458 struct btrfs_root *root = inode->root; in inode_tree_add()
5462 struct rb_node *new = &inode->rb_node; in inode_tree_add()
5465 if (inode_unhashed(&inode->vfs_inode)) in inode_tree_add()
5468 spin_lock(&root->inode_lock); in inode_tree_add()
5469 p = &root->inode_tree.rb_node; in inode_tree_add()
5475 p = &parent->rb_left; in inode_tree_add()
5477 p = &parent->rb_right; in inode_tree_add()
5479 WARN_ON(!(entry->vfs_inode.i_state & in inode_tree_add()
5481 rb_replace_node(parent, new, &root->inode_tree); in inode_tree_add()
5483 spin_unlock(&root->inode_lock); in inode_tree_add()
5488 rb_insert_color(new, &root->inode_tree); in inode_tree_add()
5489 spin_unlock(&root->inode_lock); in inode_tree_add()
5494 struct btrfs_root *root = inode->root; in inode_tree_del()
5497 spin_lock(&root->inode_lock); in inode_tree_del()
5498 if (!RB_EMPTY_NODE(&inode->rb_node)) { in inode_tree_del()
5499 rb_erase(&inode->rb_node, &root->inode_tree); in inode_tree_del()
5500 RB_CLEAR_NODE(&inode->rb_node); in inode_tree_del()
5501 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5503 spin_unlock(&root->inode_lock); in inode_tree_del()
5505 if (empty && btrfs_root_refs(&root->root_item) == 0) { in inode_tree_del()
5506 spin_lock(&root->inode_lock); in inode_tree_del()
5507 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5508 spin_unlock(&root->inode_lock); in inode_tree_del()
5519 inode->i_ino = args->ino; in btrfs_init_locked_inode()
5520 BTRFS_I(inode)->location.objectid = args->ino; in btrfs_init_locked_inode()
5521 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; in btrfs_init_locked_inode()
5522 BTRFS_I(inode)->location.offset = 0; in btrfs_init_locked_inode()
5523 BTRFS_I(inode)->root = btrfs_grab_root(args->root); in btrfs_init_locked_inode()
5524 BUG_ON(args->root && !BTRFS_I(inode)->root); in btrfs_init_locked_inode()
5526 if (args->root && args->root == args->root->fs_info->tree_root && in btrfs_init_locked_inode()
5527 args->ino != BTRFS_BTREE_INODE_OBJECTID) in btrfs_init_locked_inode()
5529 &BTRFS_I(inode)->runtime_flags); in btrfs_init_locked_inode()
5537 return args->ino == BTRFS_I(inode)->location.objectid && in btrfs_find_actor()
5538 args->root == BTRFS_I(inode)->root; in btrfs_find_actor()
5570 return ERR_PTR(-ENOMEM); in btrfs_iget_path()
5572 if (inode->i_state & I_NEW) { in btrfs_iget_path()
5587 ret = -ENOENT; in btrfs_iget_path()
5604 struct inode *inode = new_inode(dir->i_sb); in new_simple_dir()
5607 return ERR_PTR(-ENOMEM); in new_simple_dir()
5609 BTRFS_I(inode)->root = btrfs_grab_root(root); in new_simple_dir()
5610 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); in new_simple_dir()
5611 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); in new_simple_dir()
5613 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; in new_simple_dir()
5615 * We only need lookup, the rest is read-only and there's no inode in new_simple_dir()
5618 inode->i_op = &simple_dir_inode_operations; in new_simple_dir()
5619 inode->i_opflags &= ~IOP_XATTR; in new_simple_dir()
5620 inode->i_fop = &simple_dir_operations; in new_simple_dir()
5621 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; in new_simple_dir()
5622 inode->i_mtime = inode_set_ctime_current(inode); in new_simple_dir()
5623 inode->i_atime = dir->i_atime; in new_simple_dir()
5624 BTRFS_I(inode)->i_otime = inode->i_mtime; in new_simple_dir()
5625 inode->i_uid = dir->i_uid; in new_simple_dir()
5626 inode->i_gid = dir->i_gid; in new_simple_dir()
5642 return fs_umode_to_ftype(inode->i_mode); in btrfs_inode_type()
5647 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_lookup_dentry()
5649 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_lookup_dentry()
5655 if (dentry->d_name.len > BTRFS_NAME_LEN) in btrfs_lookup_dentry()
5656 return ERR_PTR(-ENAMETOOLONG); in btrfs_lookup_dentry()
5663 inode = btrfs_iget(dir->i_sb, location.objectid, root); in btrfs_lookup_dentry()
5667 /* Do extra check against inode mode with di_type */ in btrfs_lookup_dentry()
5671 inode->i_mode, btrfs_inode_type(inode), in btrfs_lookup_dentry()
5674 return ERR_PTR(-EUCLEAN); in btrfs_lookup_dentry()
5682 if (ret != -ENOENT) in btrfs_lookup_dentry()
5687 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); in btrfs_lookup_dentry()
5693 down_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5694 if (!sb_rdonly(inode->i_sb)) in btrfs_lookup_dentry()
5696 up_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5712 inode = d_inode(dentry->d_parent); in btrfs_dentry_delete()
5715 root = BTRFS_I(inode)->root; in btrfs_dentry_delete()
5716 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_dentry_delete()
5730 if (inode == ERR_PTR(-ENOENT)) in btrfs_lookup()
5737 * in-memory index_cnt variable to the first free sequence number.
5741 struct btrfs_root *root = inode->root; in btrfs_set_inode_index_count()
5749 key.offset = (u64)-1; in btrfs_set_inode_index_count()
5753 return -ENOMEM; in btrfs_set_inode_index_count()
5763 if (path->slots[0] == 0) { in btrfs_set_inode_index_count()
5764 inode->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_set_inode_index_count()
5768 path->slots[0]--; in btrfs_set_inode_index_count()
5770 leaf = path->nodes[0]; in btrfs_set_inode_index_count()
5771 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_set_inode_index_count()
5775 inode->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_set_inode_index_count()
5779 inode->index_cnt = found_key.offset + 1; in btrfs_set_inode_index_count()
5790 if (dir->index_cnt == (u64)-1) { in btrfs_get_dir_last_index()
5800 *index = dir->index_cnt - 1; in btrfs_get_dir_last_index()
5828 return -ENOMEM; in btrfs_opendir()
5829 private->last_index = last_index; in btrfs_opendir()
5830 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); in btrfs_opendir()
5831 if (!private->filldir_buf) { in btrfs_opendir()
5833 return -ENOMEM; in btrfs_opendir()
5835 file->private_data = private; in btrfs_opendir()
5841 struct btrfs_file_private *private = file->private_data; in btrfs_dir_llseek()
5845 &private->last_index); in btrfs_dir_llseek()
5861 while (entries--) { in btrfs_filldir()
5865 ctx->pos = get_unaligned(&entry->offset); in btrfs_filldir()
5866 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), in btrfs_filldir()
5867 get_unaligned(&entry->ino), in btrfs_filldir()
5868 get_unaligned(&entry->type))) in btrfs_filldir()
5871 get_unaligned(&entry->name_len); in btrfs_filldir()
5872 ctx->pos++; in btrfs_filldir()
5880 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_real_readdir()
5881 struct btrfs_file_private *private = file->private_data; in btrfs_real_readdir()
5902 return -ENOMEM; in btrfs_real_readdir()
5904 addr = private->filldir_buf; in btrfs_real_readdir()
5905 path->reada = READA_FORWARD; in btrfs_real_readdir()
5907 put = btrfs_readdir_get_delayed_items(inode, private->last_index, in btrfs_real_readdir()
5912 key.offset = ctx->pos; in btrfs_real_readdir()
5917 struct extent_buffer *leaf = path->nodes[0]; in btrfs_real_readdir()
5924 if (found_key.offset < ctx->pos) in btrfs_real_readdir()
5926 if (found_key.offset > private->last_index) in btrfs_real_readdir()
5930 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); in btrfs_real_readdir()
5935 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5938 addr = private->filldir_buf; in btrfs_real_readdir()
5949 put_unaligned(name_len, &entry->name_len); in btrfs_real_readdir()
5950 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); in btrfs_real_readdir()
5952 put_unaligned(location.objectid, &entry->ino); in btrfs_real_readdir()
5953 put_unaligned(found_key.offset, &entry->offset); in btrfs_real_readdir()
5964 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5980 * they're returned by readdir. Until we re-use freed offsets in btrfs_real_readdir()
5989 if (ctx->pos >= INT_MAX) in btrfs_real_readdir()
5990 ctx->pos = LLONG_MAX; in btrfs_real_readdir()
5992 ctx->pos = INT_MAX; in btrfs_real_readdir()
6010 struct btrfs_root *root = inode->root; in btrfs_dirty_inode()
6011 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_dirty_inode()
6015 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) in btrfs_dirty_inode()
6023 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { in btrfs_dirty_inode()
6033 if (inode->delayed_node) in btrfs_dirty_inode()
6045 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_update_time()
6049 return -EROFS; in btrfs_update_time()
6063 if (dir->index_cnt == (u64)-1) { in btrfs_set_inode_index()
6072 *index = dir->index_cnt; in btrfs_set_inode_index()
6073 dir->index_cnt++; in btrfs_set_inode_index()
6082 args.ino = BTRFS_I(inode)->location.objectid; in btrfs_insert_inode_locked()
6083 args.root = BTRFS_I(inode)->root; in btrfs_insert_inode_locked()
6086 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), in btrfs_insert_inode_locked()
6093 struct inode *dir = args->dir; in btrfs_new_inode_prepare()
6094 struct inode *inode = args->inode; in btrfs_new_inode_prepare()
6097 if (!args->orphan) { in btrfs_new_inode_prepare()
6098 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, in btrfs_new_inode_prepare()
6099 &args->fname); in btrfs_new_inode_prepare()
6104 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); in btrfs_new_inode_prepare()
6106 fscrypt_free_filename(&args->fname); in btrfs_new_inode_prepare()
6113 if (BTRFS_I(dir)->prop_compress) in btrfs_new_inode_prepare()
6116 if (args->default_acl) in btrfs_new_inode_prepare()
6119 if (args->acl) in btrfs_new_inode_prepare()
6123 if (dir->i_security) in btrfs_new_inode_prepare()
6126 if (args->orphan) { in btrfs_new_inode_prepare()
6146 posix_acl_release(args->acl); in btrfs_new_inode_args_destroy()
6147 posix_acl_release(args->default_acl); in btrfs_new_inode_args_destroy()
6148 fscrypt_free_filename(&args->fname); in btrfs_new_inode_args_destroy()
6160 flags = dir->flags; in btrfs_inherit_iflags()
6163 inode->flags &= ~BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
6164 inode->flags |= BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
6166 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
6167 inode->flags |= BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
6171 inode->flags |= BTRFS_INODE_NODATACOW; in btrfs_inherit_iflags()
6172 if (S_ISREG(inode->vfs_inode.i_mode)) in btrfs_inherit_iflags()
6173 inode->flags |= BTRFS_INODE_NODATASUM; in btrfs_inherit_iflags()
6176 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); in btrfs_inherit_iflags()
6182 struct inode *dir = args->dir; in btrfs_create_new_inode()
6183 struct inode *inode = args->inode; in btrfs_create_new_inode()
6184 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; in btrfs_create_new_inode()
6185 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create_new_inode()
6200 return -ENOMEM; in btrfs_create_new_inode()
6202 if (!args->subvol) in btrfs_create_new_inode()
6203 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); in btrfs_create_new_inode()
6204 root = BTRFS_I(inode)->root; in btrfs_create_new_inode()
6209 inode->i_ino = objectid; in btrfs_create_new_inode()
6211 if (args->orphan) { in btrfs_create_new_inode()
6220 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6225 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_create_new_inode()
6226 BTRFS_I(inode)->generation = trans->transid; in btrfs_create_new_inode()
6227 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_create_new_inode()
6234 if (!args->subvol) in btrfs_create_new_inode()
6237 if (S_ISREG(inode->i_mode)) { in btrfs_create_new_inode()
6239 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_create_new_inode()
6241 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | in btrfs_create_new_inode()
6245 location = &BTRFS_I(inode)->location; in btrfs_create_new_inode()
6246 location->objectid = objectid; in btrfs_create_new_inode()
6247 location->offset = 0; in btrfs_create_new_inode()
6248 location->type = BTRFS_INODE_ITEM_KEY; in btrfs_create_new_inode()
6252 if (!args->orphan) in btrfs_create_new_inode()
6253 BTRFS_I(dir)->index_cnt--; in btrfs_create_new_inode()
6271 if (!args->orphan) { in btrfs_create_new_inode()
6280 if (args->subvol) { in btrfs_create_new_inode()
6285 sizes[1] = name->len + sizeof(*ref); in btrfs_create_new_inode()
6291 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); in btrfs_create_new_inode()
6292 batch.nr = args->orphan ? 1 : 2; in btrfs_create_new_inode()
6299 inode->i_mtime = inode_set_ctime_current(inode); in btrfs_create_new_inode()
6300 inode->i_atime = inode->i_mtime; in btrfs_create_new_inode()
6301 BTRFS_I(inode)->i_otime = inode->i_mtime; in btrfs_create_new_inode()
6308 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_create_new_inode()
6310 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, in btrfs_create_new_inode()
6312 fill_inode_item(trans, path->nodes[0], inode_item, inode); in btrfs_create_new_inode()
6314 if (!args->orphan) { in btrfs_create_new_inode()
6315 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, in btrfs_create_new_inode()
6318 if (args->subvol) { in btrfs_create_new_inode()
6319 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); in btrfs_create_new_inode()
6320 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); in btrfs_create_new_inode()
6321 write_extent_buffer(path->nodes[0], "..", ptr, 2); in btrfs_create_new_inode()
6323 btrfs_set_inode_ref_name_len(path->nodes[0], ref, in btrfs_create_new_inode()
6324 name->len); in btrfs_create_new_inode()
6325 btrfs_set_inode_ref_index(path->nodes[0], ref, in btrfs_create_new_inode()
6326 BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6327 write_extent_buffer(path->nodes[0], name->name, ptr, in btrfs_create_new_inode()
6328 name->len); in btrfs_create_new_inode()
6332 btrfs_mark_buffer_dirty(trans, path->nodes[0]); in btrfs_create_new_inode()
6341 if (args->subvol) { in btrfs_create_new_inode()
6348 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, in btrfs_create_new_inode()
6349 BTRFS_I(dir)->root); in btrfs_create_new_inode()
6362 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, in btrfs_create_new_inode()
6370 if (!args->subvol) { in btrfs_create_new_inode()
6385 if (args->orphan) { in btrfs_create_new_inode()
6389 0, BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6422 struct btrfs_root *root = parent_inode->root; in btrfs_add_link()
6427 memcpy(&key, &inode->root->root_key, sizeof(key)); in btrfs_add_link()
6436 root->root_key.objectid, parent_ino, in btrfs_add_link()
6448 btrfs_inode_type(&inode->vfs_inode), index); in btrfs_add_link()
6449 if (ret == -EEXIST || ret == -EOVERFLOW) in btrfs_add_link()
6456 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + in btrfs_add_link()
6457 name->len * 2); in btrfs_add_link()
6458 inode_inc_iversion(&parent_inode->vfs_inode); in btrfs_add_link()
6465 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) in btrfs_add_link()
6466 parent_inode->vfs_inode.i_mtime = in btrfs_add_link()
6467 inode_set_ctime_current(&parent_inode->vfs_inode); in btrfs_add_link()
6479 root->root_key.objectid, parent_ino, in btrfs_add_link()
6500 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create_common()
6501 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_create_common()
6540 inode = new_inode(dir->i_sb); in btrfs_mknod()
6542 return -ENOMEM; in btrfs_mknod()
6544 inode->i_op = &btrfs_special_inode_operations; in btrfs_mknod()
6545 init_special_inode(inode, inode->i_mode, rdev); in btrfs_mknod()
6554 inode = new_inode(dir->i_sb); in btrfs_create()
6556 return -ENOMEM; in btrfs_create()
6558 inode->i_fop = &btrfs_file_operations; in btrfs_create()
6559 inode->i_op = &btrfs_file_inode_operations; in btrfs_create()
6560 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_create()
6568 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_link()
6570 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_link()
6577 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) in btrfs_link()
6578 return -EXDEV; in btrfs_link()
6580 if (inode->i_nlink >= BTRFS_LINK_MAX) in btrfs_link()
6581 return -EMLINK; in btrfs_link()
6583 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); in btrfs_link()
6597 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); in btrfs_link()
6605 BTRFS_I(inode)->dir_index = 0ULL; in btrfs_link()
6610 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); in btrfs_link()
6618 struct dentry *parent = dentry->d_parent; in btrfs_link()
6623 if (inode->i_nlink == 1) { in btrfs_link()
6653 inode = new_inode(dir->i_sb); in btrfs_mkdir()
6655 return -ENOMEM; in btrfs_mkdir()
6657 inode->i_op = &btrfs_dir_inode_operations; in btrfs_mkdir()
6658 inode->i_fop = &btrfs_dir_file_operations; in btrfs_mkdir()
6667 struct extent_buffer *leaf = path->nodes[0]; in uncompress_inline()
6676 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); in uncompress_inline()
6679 return -ENOMEM; in uncompress_inline()
6696 memzero_page(page, max_size, PAGE_SIZE - max_size); in uncompress_inline()
6713 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], in read_inline_extent()
6715 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) in read_inline_extent()
6719 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); in read_inline_extent()
6721 read_extent_buffer(path->nodes[0], kaddr, in read_inline_extent()
6725 memzero_page(page, copy_size, PAGE_SIZE - copy_size); in read_inline_extent()
6739 * it from the B-tree and caching it if necessary. Note that there may be more
6745 * Return: ERR_PTR on error, non-NULL extent_map on success.
6751 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_get_extent()
6756 int extent_type = -1; in btrfs_get_extent()
6758 struct btrfs_root *root = inode->root; in btrfs_get_extent()
6763 struct extent_map_tree *em_tree = &inode->extent_tree; in btrfs_get_extent()
6765 read_lock(&em_tree->lock); in btrfs_get_extent()
6767 read_unlock(&em_tree->lock); in btrfs_get_extent()
6770 if (em->start > start || em->start + em->len <= start) in btrfs_get_extent()
6772 else if (em->block_start == EXTENT_MAP_INLINE && page) in btrfs_get_extent()
6779 ret = -ENOMEM; in btrfs_get_extent()
6782 em->start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6783 em->orig_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6784 em->len = (u64)-1; in btrfs_get_extent()
6785 em->block_len = (u64)-1; in btrfs_get_extent()
6789 ret = -ENOMEM; in btrfs_get_extent()
6794 path->reada = READA_FORWARD; in btrfs_get_extent()
6802 path->search_commit_root = 1; in btrfs_get_extent()
6803 path->skip_locking = 1; in btrfs_get_extent()
6810 if (path->slots[0] == 0) in btrfs_get_extent()
6812 path->slots[0]--; in btrfs_get_extent()
6816 leaf = path->nodes[0]; in btrfs_get_extent()
6817 item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_get_extent()
6819 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6838 if (!S_ISREG(inode->vfs_inode.i_mode)) { in btrfs_get_extent()
6839 ret = -EUCLEAN; in btrfs_get_extent()
6841 "regular/prealloc extent found for non-regular inode %llu", in btrfs_get_extent()
6849 path->slots[0], in btrfs_get_extent()
6854 path->slots[0]++; in btrfs_get_extent()
6855 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_get_extent()
6862 leaf = path->nodes[0]; in btrfs_get_extent()
6864 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6874 em->start = start; in btrfs_get_extent()
6875 em->orig_start = start; in btrfs_get_extent()
6876 em->len = found_key.offset - start; in btrfs_get_extent()
6877 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6889 * ensured by tree-checker and inline extent creation path. in btrfs_get_extent()
6894 ASSERT(em->start == 0); in btrfs_get_extent()
6902 ASSERT(em->block_start == EXTENT_MAP_INLINE); in btrfs_get_extent()
6903 ASSERT(em->len == fs_info->sectorsize); in btrfs_get_extent()
6911 em->start = start; in btrfs_get_extent()
6912 em->orig_start = start; in btrfs_get_extent()
6913 em->len = len; in btrfs_get_extent()
6914 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6918 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent()
6921 em->start, em->len, start, len); in btrfs_get_extent()
6922 ret = -EIO; in btrfs_get_extent()
6926 write_lock(&em_tree->lock); in btrfs_get_extent()
6928 write_unlock(&em_tree->lock); in btrfs_get_extent()
6972 start + len - 1, false); in btrfs_create_dio_extent()
6976 ASSERT(!dio_data->ordered); in btrfs_create_dio_extent()
6977 dio_data->ordered = ordered; in btrfs_create_dio_extent()
6988 struct btrfs_root *root = inode->root; in btrfs_new_extent_direct()
6989 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_extent_direct()
6997 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, in btrfs_new_extent_direct()
6999 if (ret == -EAGAIN) { in btrfs_new_extent_direct()
7001 wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH, in btrfs_new_extent_direct()
7025 if (!block_group || block_group->ro) in btrfs_extent_readonly()
7039 * @orig_len: (optional) Return the original on-disk length of the file extent
7056 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in can_nocow_extent()
7061 struct btrfs_root *root = BTRFS_I(inode)->root; in can_nocow_extent()
7062 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in can_nocow_extent()
7069 return -ENOMEM; in can_nocow_extent()
7070 path->nowait = nowait; in can_nocow_extent()
7078 if (path->slots[0] == 0) { in can_nocow_extent()
7083 path->slots[0]--; in can_nocow_extent()
7086 leaf = path->nodes[0]; in can_nocow_extent()
7087 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in can_nocow_extent()
7102 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in can_nocow_extent()
7108 nocow_args.end = offset + *len - 1; in can_nocow_extent()
7126 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in can_nocow_extent()
7131 root->fs_info->sectorsize) - 1; in can_nocow_extent()
7135 ret = -EAGAIN; in can_nocow_extent()
7141 *orig_start = key.offset - nocow_args.extent_offset; in can_nocow_extent()
7158 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in lock_extent_direct()
7166 return -EAGAIN; in lock_extent_direct()
7176 lockend - lockstart + 1); in lock_extent_direct()
7186 (!writing || !filemap_range_has_page(inode->i_mapping, in lock_extent_direct()
7195 ret = -EAGAIN; in lock_extent_direct()
7214 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) in lock_extent_direct()
7217 ret = nowait ? -EAGAIN : -ENOTBLK; in lock_extent_direct()
7233 ret = nowait ? -EAGAIN : -ENOTBLK; in lock_extent_direct()
7262 return ERR_PTR(-ENOMEM); in create_io_em()
7264 em->start = start; in create_io_em()
7265 em->orig_start = orig_start; in create_io_em()
7266 em->len = len; in create_io_em()
7267 em->block_len = block_len; in create_io_em()
7268 em->block_start = block_start; in create_io_em()
7269 em->orig_block_len = orig_block_len; in create_io_em()
7270 em->ram_bytes = ram_bytes; in create_io_em()
7271 em->generation = -1; in create_io_em()
7272 set_bit(EXTENT_FLAG_PINNED, &em->flags); in create_io_em()
7274 set_bit(EXTENT_FLAG_FILLING, &em->flags); in create_io_em()
7276 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in create_io_em()
7277 em->compress_type = compress_type; in create_io_em()
7298 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_get_blocks_direct_write()
7318 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || in btrfs_get_blocks_direct_write()
7319 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in btrfs_get_blocks_direct_write()
7320 em->block_start != EXTENT_MAP_HOLE)) { in btrfs_get_blocks_direct_write()
7321 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_blocks_direct_write()
7325 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7326 block_start = em->block_start + (start - em->start); in btrfs_get_blocks_direct_write()
7348 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) in btrfs_get_blocks_direct_write()
7349 ret = -EAGAIN; in btrfs_get_blocks_direct_write()
7370 dio_data->nocow_done = true; in btrfs_get_blocks_direct_write()
7377 ret = -EAGAIN; in btrfs_get_blocks_direct_write()
7385 if (!dio_data->data_space_reserved) { in btrfs_get_blocks_direct_write()
7386 ret = -ENOSPC; in btrfs_get_blocks_direct_write()
7406 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7409 prev_len - len, true); in btrfs_get_blocks_direct_write()
7438 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dio_iomap_begin()
7441 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_iomap_begin()
7458 * -EAGAIN at this point so that the normal path is used. in btrfs_dio_iomap_begin()
7461 return -EAGAIN; in btrfs_dio_iomap_begin()
7468 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); in btrfs_dio_iomap_begin()
7471 lockend = start + len - 1; in btrfs_dio_iomap_begin()
7477 * outstanding dirty pages are on disk - the first flush only starts in btrfs_dio_iomap_begin()
7485 * dirty or under writeback (same as for the non-compression case). in btrfs_dio_iomap_begin()
7492 &BTRFS_I(inode)->runtime_flags)) { in btrfs_dio_iomap_begin()
7494 if (filemap_range_needs_writeback(inode->i_mapping, in btrfs_dio_iomap_begin()
7496 return -EAGAIN; in btrfs_dio_iomap_begin()
7498 ret = filemap_fdatawrite_range(inode->i_mapping, start, in btrfs_dio_iomap_begin()
7499 start + length - 1); in btrfs_dio_iomap_begin()
7518 &dio_data->data_reserved, in btrfs_dio_iomap_begin()
7521 dio_data->data_space_reserved = true; in btrfs_dio_iomap_begin()
7522 else if (ret && !(BTRFS_I(inode)->flags & in btrfs_dio_iomap_begin()
7552 * We return -ENOTBLK because that's what makes DIO go ahead and go back in btrfs_dio_iomap_begin()
7556 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || in btrfs_dio_iomap_begin()
7557 em->block_start == EXTENT_MAP_INLINE) { in btrfs_dio_iomap_begin()
7560 * If we are in a NOWAIT context, return -EAGAIN in order to in btrfs_dio_iomap_begin()
7564 * space - this happens if we were able to read some data from in btrfs_dio_iomap_begin()
7565 * previous non-compressed extents and then when we fallback to in btrfs_dio_iomap_begin()
7569 * of bytes previously read is > 0, so it does not return -EFAULT). in btrfs_dio_iomap_begin()
7571 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; in btrfs_dio_iomap_begin()
7575 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7579 * (or a mix of extents and holes), then we return -EAGAIN to make the in btrfs_dio_iomap_begin()
7595 * which we return back to our caller - we should only return EIOCBQUEUED in btrfs_dio_iomap_begin()
7600 ret = -EAGAIN; in btrfs_dio_iomap_begin()
7611 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7612 if (dio_data->data_space_reserved) { in btrfs_dio_iomap_begin()
7616 if (dio_data->nocow_done) { in btrfs_dio_iomap_begin()
7621 release_len = data_alloc_len - len; in btrfs_dio_iomap_begin()
7626 dio_data->data_reserved, in btrfs_dio_iomap_begin()
7641 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7651 if ((em->block_start == EXTENT_MAP_HOLE) || in btrfs_dio_iomap_begin()
7652 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { in btrfs_dio_iomap_begin()
7653 iomap->addr = IOMAP_NULL_ADDR; in btrfs_dio_iomap_begin()
7654 iomap->type = IOMAP_HOLE; in btrfs_dio_iomap_begin()
7656 iomap->addr = em->block_start + (start - em->start); in btrfs_dio_iomap_begin()
7657 iomap->type = IOMAP_MAPPED; in btrfs_dio_iomap_begin()
7659 iomap->offset = start; in btrfs_dio_iomap_begin()
7660 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; in btrfs_dio_iomap_begin()
7661 iomap->length = len; in btrfs_dio_iomap_begin()
7667 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7670 if (dio_data->data_space_reserved) { in btrfs_dio_iomap_begin()
7672 dio_data->data_reserved, in btrfs_dio_iomap_begin()
7674 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7684 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_iomap_end()
7685 size_t submitted = dio_data->submitted; in btrfs_dio_iomap_end()
7689 if (!write && (iomap->type == IOMAP_HOLE)) { in btrfs_dio_iomap_end()
7691 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, in btrfs_dio_iomap_end()
7698 length -= submitted; in btrfs_dio_iomap_end()
7700 btrfs_finish_ordered_extent(dio_data->ordered, NULL, in btrfs_dio_iomap_end()
7703 unlock_extent(&BTRFS_I(inode)->io_tree, pos, in btrfs_dio_iomap_end()
7704 pos + length - 1, NULL); in btrfs_dio_iomap_end()
7705 ret = -ENOTBLK; in btrfs_dio_iomap_end()
7708 btrfs_put_ordered_extent(dio_data->ordered); in btrfs_dio_iomap_end()
7709 dio_data->ordered = NULL; in btrfs_dio_iomap_end()
7713 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_end()
7721 struct btrfs_inode *inode = bbio->inode; in btrfs_dio_end_io()
7722 struct bio *bio = &bbio->bio; in btrfs_dio_end_io()
7724 if (bio->bi_status) { in btrfs_dio_end_io()
7725 btrfs_warn(inode->root->fs_info, in btrfs_dio_end_io()
7727 btrfs_ino(inode), bio->bi_opf, in btrfs_dio_end_io()
7728 dip->file_offset, dip->bytes, bio->bi_status); in btrfs_dio_end_io()
7732 btrfs_finish_ordered_extent(bbio->ordered, NULL, in btrfs_dio_end_io()
7733 dip->file_offset, dip->bytes, in btrfs_dio_end_io()
7734 !bio->bi_status); in btrfs_dio_end_io()
7736 unlock_extent(&inode->io_tree, dip->file_offset, in btrfs_dio_end_io()
7737 dip->file_offset + dip->bytes - 1, NULL); in btrfs_dio_end_io()
7740 bbio->bio.bi_private = bbio->private; in btrfs_dio_end_io()
7750 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_submit_io()
7752 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, in btrfs_dio_submit_io()
7753 btrfs_dio_end_io, bio->bi_private); in btrfs_dio_submit_io()
7754 bbio->inode = BTRFS_I(iter->inode); in btrfs_dio_submit_io()
7755 bbio->file_offset = file_offset; in btrfs_dio_submit_io()
7757 dip->file_offset = file_offset; in btrfs_dio_submit_io()
7758 dip->bytes = bio->bi_iter.bi_size; in btrfs_dio_submit_io()
7760 dio_data->submitted += bio->bi_iter.bi_size; in btrfs_dio_submit_io()
7769 if (iter->flags & IOMAP_WRITE) { in btrfs_dio_submit_io()
7772 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); in btrfs_dio_submit_io()
7774 btrfs_finish_ordered_extent(dio_data->ordered, NULL, in btrfs_dio_submit_io()
7775 file_offset, dip->bytes, in btrfs_dio_submit_io()
7777 bio->bi_status = errno_to_blk_status(ret); in btrfs_dio_submit_io()
7835 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { in btrfs_fiemap()
7858 * If we continue to release/invalidate the page, we could cause use-after-free
7864 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); in wait_subpage_spinlock()
7870 ASSERT(PagePrivate(page) && page->private); in wait_subpage_spinlock()
7871 subpage = (struct btrfs_subpage *)page->private; in wait_subpage_spinlock()
7884 spin_lock_irq(&subpage->lock); in wait_subpage_spinlock()
7885 spin_unlock_irq(&subpage->lock); in wait_subpage_spinlock()
7890 int ret = try_release_extent_mapping(&folio->page, gfp_flags); in __btrfs_release_folio()
7893 wait_subpage_spinlock(&folio->page); in __btrfs_release_folio()
7894 clear_page_extent_mapped(&folio->page); in __btrfs_release_folio()
7930 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); in btrfs_invalidate_folio()
7931 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_invalidate_folio()
7932 struct extent_io_tree *tree = &inode->io_tree; in btrfs_invalidate_folio()
7935 u64 page_end = page_start + folio_size(folio) - 1; in btrfs_invalidate_folio()
7937 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; in btrfs_invalidate_folio()
7953 wait_subpage_spinlock(&folio->page); in btrfs_invalidate_folio()
7960 * shouldn't clear page extent mapped, as folio->private can still in btrfs_invalidate_folio()
7983 page_end + 1 - cur); in btrfs_invalidate_folio()
7993 if (ordered->file_offset > cur) { in btrfs_invalidate_folio()
7995 * There is a range between [cur, oe->file_offset) not in btrfs_invalidate_folio()
8000 range_end = ordered->file_offset - 1; in btrfs_invalidate_folio()
8005 range_end = min(ordered->file_offset + ordered->num_bytes - 1, in btrfs_invalidate_folio()
8007 ASSERT(range_end + 1 - cur < U32_MAX); in btrfs_invalidate_folio()
8008 range_len = range_end + 1 - cur; in btrfs_invalidate_folio()
8009 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { in btrfs_invalidate_folio()
8018 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); in btrfs_invalidate_folio()
8034 spin_lock_irq(&inode->ordered_tree.lock); in btrfs_invalidate_folio()
8035 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); in btrfs_invalidate_folio()
8036 ordered->truncated_len = min(ordered->truncated_len, in btrfs_invalidate_folio()
8037 cur - ordered->file_offset); in btrfs_invalidate_folio()
8038 spin_unlock_irq(&inode->ordered_tree.lock); in btrfs_invalidate_folio()
8047 cur, range_end + 1 - cur)) { in btrfs_invalidate_folio()
8073 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); in btrfs_invalidate_folio()
8088 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); in btrfs_invalidate_folio()
8091 clear_page_extent_mapped(&folio->page); in btrfs_invalidate_folio()
8111 struct page *page = vmf->page; in btrfs_page_mkwrite()
8112 struct inode *inode = file_inode(vmf->vma->vm_file); in btrfs_page_mkwrite()
8113 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_page_mkwrite()
8114 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_page_mkwrite()
8130 sb_start_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8132 page_end = page_start + PAGE_SIZE - 1; in btrfs_page_mkwrite()
8146 ret2 = file_update_time(vmf->vma->vm_file); in btrfs_page_mkwrite()
8158 down_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8162 if ((page->mapping != inode->i_mapping) || in btrfs_page_mkwrite()
8186 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8192 if (page->index == ((size - 1) >> PAGE_SHIFT)) { in btrfs_page_mkwrite()
8193 reserved_space = round_up(size - page_start, in btrfs_page_mkwrite()
8194 fs_info->sectorsize); in btrfs_page_mkwrite()
8196 end = page_start + reserved_space - 1; in btrfs_page_mkwrite()
8199 PAGE_SIZE - reserved_space, true); in btrfs_page_mkwrite()
8210 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, in btrfs_page_mkwrite()
8229 memzero_page(page, zero_start, PAGE_SIZE - zero_start); in btrfs_page_mkwrite()
8232 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); in btrfs_page_mkwrite()
8233 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); in btrfs_page_mkwrite()
8238 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8241 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8247 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8253 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8266 struct btrfs_root *root = inode->root; in btrfs_truncate()
8267 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_truncate()
8271 u64 mask = fs_info->sectorsize - 1; in btrfs_truncate()
8275 ret = btrfs_wait_ordered_range(&inode->vfs_inode, in btrfs_truncate()
8276 inode->vfs_inode.i_size & (~mask), in btrfs_truncate()
8277 (u64)-1); in btrfs_truncate()
8305 * 1) rsv - for the truncate reservation, which we will steal from the in btrfs_truncate()
8307 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for in btrfs_truncate()
8312 return -ENOMEM; in btrfs_truncate()
8313 rsv->size = min_size; in btrfs_truncate()
8314 rsv->failfast = true; in btrfs_truncate()
8327 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, in btrfs_truncate()
8339 trans->block_rsv = rsv; in btrfs_truncate()
8343 const u64 new_size = inode->vfs_inode.i_size; in btrfs_truncate()
8344 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); in btrfs_truncate()
8347 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); in btrfs_truncate()
8354 ALIGN(new_size, fs_info->sectorsize), in btrfs_truncate()
8355 (u64)-1, false); in btrfs_truncate()
8359 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate()
8362 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); in btrfs_truncate()
8364 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8365 if (ret != -ENOSPC && ret != -EAGAIN) in btrfs_truncate()
8382 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); in btrfs_truncate()
8383 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, in btrfs_truncate()
8393 trans->block_rsv = rsv; in btrfs_truncate()
8406 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); in btrfs_truncate()
8420 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8440 * fsync to truncate all the inode's items from the log and re-log them in btrfs_truncate()
8457 inode = new_inode(dir->i_sb); in btrfs_new_subvol_inode()
8465 inode->i_op = &btrfs_dir_inode_operations; in btrfs_new_subvol_inode()
8466 inode->i_fop = &btrfs_dir_file_operations; in btrfs_new_subvol_inode()
8481 ei->root = NULL; in btrfs_alloc_inode()
8482 ei->generation = 0; in btrfs_alloc_inode()
8483 ei->last_trans = 0; in btrfs_alloc_inode()
8484 ei->last_sub_trans = 0; in btrfs_alloc_inode()
8485 ei->logged_trans = 0; in btrfs_alloc_inode()
8486 ei->delalloc_bytes = 0; in btrfs_alloc_inode()
8487 ei->new_delalloc_bytes = 0; in btrfs_alloc_inode()
8488 ei->defrag_bytes = 0; in btrfs_alloc_inode()
8489 ei->disk_i_size = 0; in btrfs_alloc_inode()
8490 ei->flags = 0; in btrfs_alloc_inode()
8491 ei->ro_flags = 0; in btrfs_alloc_inode()
8492 ei->csum_bytes = 0; in btrfs_alloc_inode()
8493 ei->index_cnt = (u64)-1; in btrfs_alloc_inode()
8494 ei->dir_index = 0; in btrfs_alloc_inode()
8495 ei->last_unlink_trans = 0; in btrfs_alloc_inode()
8496 ei->last_reflink_trans = 0; in btrfs_alloc_inode()
8497 ei->last_log_commit = 0; in btrfs_alloc_inode()
8499 spin_lock_init(&ei->lock); in btrfs_alloc_inode()
8500 ei->outstanding_extents = 0; in btrfs_alloc_inode()
8501 if (sb->s_magic != BTRFS_TEST_MAGIC) in btrfs_alloc_inode()
8502 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, in btrfs_alloc_inode()
8504 ei->runtime_flags = 0; in btrfs_alloc_inode()
8505 ei->prop_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8506 ei->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8508 ei->delayed_node = NULL; in btrfs_alloc_inode()
8510 ei->i_otime.tv_sec = 0; in btrfs_alloc_inode()
8511 ei->i_otime.tv_nsec = 0; in btrfs_alloc_inode()
8513 inode = &ei->vfs_inode; in btrfs_alloc_inode()
8514 extent_map_tree_init(&ei->extent_tree); in btrfs_alloc_inode()
8515 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); in btrfs_alloc_inode()
8516 ei->io_tree.inode = ei; in btrfs_alloc_inode()
8517 extent_io_tree_init(fs_info, &ei->file_extent_tree, in btrfs_alloc_inode()
8519 mutex_init(&ei->log_mutex); in btrfs_alloc_inode()
8520 btrfs_ordered_inode_tree_init(&ei->ordered_tree); in btrfs_alloc_inode()
8521 INIT_LIST_HEAD(&ei->delalloc_inodes); in btrfs_alloc_inode()
8522 INIT_LIST_HEAD(&ei->delayed_iput); in btrfs_alloc_inode()
8523 RB_CLEAR_NODE(&ei->rb_node); in btrfs_alloc_inode()
8524 init_rwsem(&ei->i_mmap_lock); in btrfs_alloc_inode()
8532 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); in btrfs_test_destroy_inode()
8546 struct btrfs_root *root = inode->root; in btrfs_destroy_inode()
8549 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); in btrfs_destroy_inode()
8550 WARN_ON(vfs_inode->i_data.nrpages); in btrfs_destroy_inode()
8551 WARN_ON(inode->block_rsv.reserved); in btrfs_destroy_inode()
8552 WARN_ON(inode->block_rsv.size); in btrfs_destroy_inode()
8553 WARN_ON(inode->outstanding_extents); in btrfs_destroy_inode()
8554 if (!S_ISDIR(vfs_inode->i_mode)) { in btrfs_destroy_inode()
8555 WARN_ON(inode->delalloc_bytes); in btrfs_destroy_inode()
8556 WARN_ON(inode->new_delalloc_bytes); in btrfs_destroy_inode()
8558 WARN_ON(inode->csum_bytes); in btrfs_destroy_inode()
8559 WARN_ON(inode->defrag_bytes); in btrfs_destroy_inode()
8576 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); in btrfs_destroy_inode()
8580 btrfs_err(root->fs_info, in btrfs_destroy_inode()
8582 ordered->file_offset, ordered->num_bytes); in btrfs_destroy_inode()
8585 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); in btrfs_destroy_inode()
8594 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_destroy_inode()
8595 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); in btrfs_destroy_inode()
8596 btrfs_put_root(inode->root); in btrfs_destroy_inode()
8601 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_drop_inode()
8607 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_drop_inode()
8617 inode_init_once(&ei->vfs_inode); in init_once()
8648 return -ENOMEM; in btrfs_init_cachep()
8657 struct inode *inode = d_inode(path->dentry); in btrfs_getattr()
8658 u32 blocksize = inode->i_sb->s_blocksize; in btrfs_getattr()
8659 u32 bi_flags = BTRFS_I(inode)->flags; in btrfs_getattr()
8660 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; in btrfs_getattr()
8662 stat->result_mask |= STATX_BTIME; in btrfs_getattr()
8663 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; in btrfs_getattr()
8664 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; in btrfs_getattr()
8666 stat->attributes |= STATX_ATTR_APPEND; in btrfs_getattr()
8668 stat->attributes |= STATX_ATTR_COMPRESSED; in btrfs_getattr()
8670 stat->attributes |= STATX_ATTR_IMMUTABLE; in btrfs_getattr()
8672 stat->attributes |= STATX_ATTR_NODUMP; in btrfs_getattr()
8674 stat->attributes |= STATX_ATTR_VERITY; in btrfs_getattr()
8676 stat->attributes_mask |= (STATX_ATTR_APPEND | in btrfs_getattr()
8682 stat->dev = BTRFS_I(inode)->root->anon_dev; in btrfs_getattr()
8684 spin_lock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8685 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; in btrfs_getattr()
8687 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8688 stat->blocks = (ALIGN(inode_bytes, blocksize) + in btrfs_getattr()
8698 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename_exchange()
8701 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename_exchange()
8702 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename_exchange()
8703 struct inode *new_inode = new_dentry->d_inode; in btrfs_rename_exchange()
8704 struct inode *old_inode = old_dentry->d_inode; in btrfs_rename_exchange()
8718 * For non-subvolumes allow exchange only within one subvolume, in the in btrfs_rename_exchange()
8725 return -EXDEV; in btrfs_rename_exchange()
8727 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); in btrfs_rename_exchange()
8731 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); in btrfs_rename_exchange()
8743 down_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8799 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8800 BTRFS_I(new_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8837 if (old_dentry->d_parent != new_dentry->d_parent) { in btrfs_rename_exchange()
8849 BTRFS_I(old_dentry->d_inode), in btrfs_rename_exchange()
8864 BTRFS_I(new_dentry->d_inode), in btrfs_rename_exchange()
8888 if (old_inode->i_nlink == 1) in btrfs_rename_exchange()
8889 BTRFS_I(old_inode)->dir_index = old_idx; in btrfs_rename_exchange()
8890 if (new_inode->i_nlink == 1) in btrfs_rename_exchange()
8891 BTRFS_I(new_inode)->dir_index = new_idx; in btrfs_rename_exchange()
8907 old_rename_ctx.index, new_dentry->d_parent); in btrfs_rename_exchange()
8910 new_rename_ctx.index, old_dentry->d_parent); in btrfs_rename_exchange()
8923 up_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8935 inode = new_inode(dir->i_sb); in new_whiteout_inode()
8939 inode->i_op = &btrfs_special_inode_operations; in new_whiteout_inode()
8940 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); in new_whiteout_inode()
8950 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename()
8957 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename()
8958 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename()
8969 return -EPERM; in btrfs_rename()
8973 return -EXDEV; in btrfs_rename()
8977 return -ENOTEMPTY; in btrfs_rename()
8979 if (S_ISDIR(old_inode->i_mode) && new_inode && in btrfs_rename()
8980 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rename()
8981 return -ENOTEMPTY; in btrfs_rename()
8983 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); in btrfs_rename()
8987 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); in btrfs_rename()
8994 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); in btrfs_rename()
8996 if (ret == -EEXIST) { in btrfs_rename()
9003 /* maybe -EOVERFLOW */ in btrfs_rename()
9013 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) in btrfs_rename()
9014 filemap_flush(old_inode->i_mapping); in btrfs_rename()
9019 ret = -ENOMEM; in btrfs_rename()
9032 down_read(&fs_info->subvol_sem); in btrfs_rename()
9084 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename()
9101 if (old_dentry->d_parent != new_dentry->d_parent) in btrfs_rename()
9124 BUG_ON(new_inode->i_nlink == 0); in btrfs_rename()
9130 if (!ret && new_inode->i_nlink == 0) in btrfs_rename()
9146 if (old_inode->i_nlink == 1) in btrfs_rename()
9147 BTRFS_I(old_inode)->dir_index = index; in btrfs_rename()
9151 rename_ctx.index, new_dentry->d_parent); in btrfs_rename()
9169 up_read(&fs_info->subvol_sem); in btrfs_rename()
9188 return -EINVAL; in btrfs_rename2()
9197 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); in btrfs_rename2()
9216 inode = delalloc_work->inode; in btrfs_run_delalloc_work()
9217 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9219 &BTRFS_I(inode)->runtime_flags)) in btrfs_run_delalloc_work()
9220 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9223 complete(&delalloc_work->completion); in btrfs_run_delalloc_work()
9234 init_completion(&work->completion); in btrfs_alloc_delalloc_work()
9235 INIT_LIST_HEAD(&work->list); in btrfs_alloc_delalloc_work()
9236 work->inode = inode; in btrfs_alloc_delalloc_work()
9237 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); in btrfs_alloc_delalloc_work()
9256 bool full_flush = wbc->nr_to_write == LONG_MAX; in start_delalloc_inodes()
9258 mutex_lock(&root->delalloc_mutex); in start_delalloc_inodes()
9259 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9260 list_splice_init(&root->delalloc_inodes, &splice); in start_delalloc_inodes()
9265 list_move_tail(&binode->delalloc_inodes, in start_delalloc_inodes()
9266 &root->delalloc_inodes); in start_delalloc_inodes()
9269 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) in start_delalloc_inodes()
9272 inode = igrab(&binode->vfs_inode); in start_delalloc_inodes()
9274 cond_resched_lock(&root->delalloc_lock); in start_delalloc_inodes()
9277 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9281 &binode->runtime_flags); in start_delalloc_inodes()
9286 ret = -ENOMEM; in start_delalloc_inodes()
9289 list_add_tail(&work->list, &works); in start_delalloc_inodes()
9290 btrfs_queue_work(root->fs_info->flush_workers, in start_delalloc_inodes()
9291 &work->work); in start_delalloc_inodes()
9293 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); in start_delalloc_inodes()
9295 if (ret || wbc->nr_to_write <= 0) in start_delalloc_inodes()
9299 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9301 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9305 list_del_init(&work->list); in start_delalloc_inodes()
9306 wait_for_completion(&work->completion); in start_delalloc_inodes()
9311 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9312 list_splice_tail(&splice, &root->delalloc_inodes); in start_delalloc_inodes()
9313 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9315 mutex_unlock(&root->delalloc_mutex); in start_delalloc_inodes()
9327 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_start_delalloc_snapshot()
9330 return -EROFS; in btrfs_start_delalloc_snapshot()
9349 return -EROFS; in btrfs_start_delalloc_roots()
9351 mutex_lock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9352 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9353 list_splice_init(&fs_info->delalloc_roots, &splice); in btrfs_start_delalloc_roots()
9366 list_move_tail(&root->delalloc_root, in btrfs_start_delalloc_roots()
9367 &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9368 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9374 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9376 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9381 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9382 list_splice_tail(&splice, &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9383 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9385 mutex_unlock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9392 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_symlink()
9394 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_symlink()
9412 return -ENAMETOOLONG; in btrfs_symlink()
9414 inode = new_inode(dir->i_sb); in btrfs_symlink()
9416 return -ENOMEM; in btrfs_symlink()
9418 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_symlink()
9420 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_symlink()
9443 err = -ENOMEM; in btrfs_symlink()
9462 leaf = path->nodes[0]; in btrfs_symlink()
9463 ei = btrfs_item_ptr(leaf, path->slots[0], in btrfs_symlink()
9465 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in btrfs_symlink()
9501 u64 start = ins->objectid; in insert_prealloc_file_extent()
9502 u64 len = ins->offset; in insert_prealloc_file_extent()
9542 ret = -ENOMEM; in insert_prealloc_file_extent()
9547 file_offset + len - 1, &extent_info, in insert_prealloc_file_extent()
9562 btrfs_qgroup_free_refroot(inode->root->fs_info, in insert_prealloc_file_extent()
9563 inode->root->root_key.objectid, qgroup_released, in insert_prealloc_file_extent()
9573 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in __btrfs_prealloc_file_range()
9575 struct btrfs_root *root = BTRFS_I(inode)->root; in __btrfs_prealloc_file_range()
9581 u64 last_alloc = (u64)-1; in __btrfs_prealloc_file_range()
9584 u64 end = start + num_bytes - 1; in __btrfs_prealloc_file_range()
9605 * ->bytes_may_use to ->bytes_reserved. Any error that happens in __btrfs_prealloc_file_range()
9632 cur_offset + ins.offset - 1, false); in __btrfs_prealloc_file_range()
9637 em->start = cur_offset; in __btrfs_prealloc_file_range()
9638 em->orig_start = cur_offset; in __btrfs_prealloc_file_range()
9639 em->len = ins.offset; in __btrfs_prealloc_file_range()
9640 em->block_start = ins.objectid; in __btrfs_prealloc_file_range()
9641 em->block_len = ins.offset; in __btrfs_prealloc_file_range()
9642 em->orig_block_len = ins.offset; in __btrfs_prealloc_file_range()
9643 em->ram_bytes = ins.offset; in __btrfs_prealloc_file_range()
9644 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in __btrfs_prealloc_file_range()
9645 em->generation = trans->transid; in __btrfs_prealloc_file_range()
9650 num_bytes -= ins.offset; in __btrfs_prealloc_file_range()
9656 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; in __btrfs_prealloc_file_range()
9658 (actual_len > inode->i_size) && in __btrfs_prealloc_file_range()
9659 (cur_offset > inode->i_size)) { in __btrfs_prealloc_file_range()
9684 end - clear_offset + 1); in __btrfs_prealloc_file_range()
9709 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_permission()
9710 umode_t mode = inode->i_mode; in btrfs_permission()
9715 return -EROFS; in btrfs_permission()
9716 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) in btrfs_permission()
9717 return -EACCES; in btrfs_permission()
9725 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_tmpfile()
9727 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_tmpfile()
9731 .dentry = file->f_path.dentry, in btrfs_tmpfile()
9737 inode = new_inode(dir->i_sb); in btrfs_tmpfile()
9739 return -ENOMEM; in btrfs_tmpfile()
9741 inode->i_fop = &btrfs_file_operations; in btrfs_tmpfile()
9742 inode->i_op = &btrfs_file_inode_operations; in btrfs_tmpfile()
9743 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_tmpfile()
9763 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() in btrfs_tmpfile()
9785 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_set_range_writeback()
9791 ASSERT(end + 1 - start <= U32_MAX); in btrfs_set_range_writeback()
9792 len = end + 1 - start; in btrfs_set_range_writeback()
9794 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_set_range_writeback()
9816 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) in btrfs_encoded_io_compression_from_extent()
9817 return -EINVAL; in btrfs_encoded_io_compression_from_extent()
9819 (fs_info->sectorsize_bits - 12); in btrfs_encoded_io_compression_from_extent()
9823 return -EUCLEAN; in btrfs_encoded_io_compression_from_extent()
9836 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read_inline()
9837 struct btrfs_root *root = inode->root; in btrfs_encoded_read_inline()
9838 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_encoded_read_inline()
9839 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read_inline()
9850 ret = -ENOMEM; in btrfs_encoded_read_inline()
9858 ret = -EIO; in btrfs_encoded_read_inline()
9862 leaf = path->nodes[0]; in btrfs_encoded_read_inline()
9863 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in btrfs_encoded_read_inline()
9868 encoded->len = min_t(u64, extent_start + ram_bytes, in btrfs_encoded_read_inline()
9869 inode->vfs_inode.i_size) - iocb->ki_pos; in btrfs_encoded_read_inline()
9874 encoded->compression = ret; in btrfs_encoded_read_inline()
9875 if (encoded->compression) { in btrfs_encoded_read_inline()
9879 path->slots[0]); in btrfs_encoded_read_inline()
9881 ret = -ENOBUFS; in btrfs_encoded_read_inline()
9885 encoded->unencoded_len = ram_bytes; in btrfs_encoded_read_inline()
9886 encoded->unencoded_offset = iocb->ki_pos - extent_start; in btrfs_encoded_read_inline()
9888 count = min_t(u64, count, encoded->len); in btrfs_encoded_read_inline()
9889 encoded->len = count; in btrfs_encoded_read_inline()
9890 encoded->unencoded_len = count; in btrfs_encoded_read_inline()
9891 ptr += iocb->ki_pos - extent_start; in btrfs_encoded_read_inline()
9896 ret = -ENOMEM; in btrfs_encoded_read_inline()
9907 ret = -EFAULT; in btrfs_encoded_read_inline()
9922 struct btrfs_encoded_read_private *priv = bbio->private; in btrfs_encoded_read_endio()
9924 if (bbio->bio.bi_status) { in btrfs_encoded_read_endio()
9933 WRITE_ONCE(priv->status, bbio->bio.bi_status); in btrfs_encoded_read_endio()
9935 if (!atomic_dec_return(&priv->pending)) in btrfs_encoded_read_endio()
9936 wake_up(&priv->wait); in btrfs_encoded_read_endio()
9937 bio_put(&bbio->bio); in btrfs_encoded_read_endio()
9944 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_encoded_read_regular_fill_pages()
9955 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; in btrfs_encoded_read_regular_fill_pages()
9956 bbio->inode = inode; in btrfs_encoded_read_regular_fill_pages()
9961 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { in btrfs_encoded_read_regular_fill_pages()
9967 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; in btrfs_encoded_read_regular_fill_pages()
9968 bbio->inode = inode; in btrfs_encoded_read_regular_fill_pages()
9974 disk_io_size -= bytes; in btrfs_encoded_read_regular_fill_pages()
9994 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read_regular()
9995 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read_regular()
10005 return -ENOMEM; in btrfs_encoded_read_regular()
10008 ret = -ENOMEM; in btrfs_encoded_read_regular()
10025 i = (iocb->ki_pos - start) >> PAGE_SHIFT; in btrfs_encoded_read_regular()
10026 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); in btrfs_encoded_read_regular()
10030 size_t bytes = min_t(size_t, count - cur, in btrfs_encoded_read_regular()
10031 PAGE_SIZE - page_offset); in btrfs_encoded_read_regular()
10035 ret = -EFAULT; in btrfs_encoded_read_regular()
10055 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read()
10056 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_encoded_read()
10057 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read()
10065 file_accessed(iocb->ki_filp); in btrfs_encoded_read()
10069 if (iocb->ki_pos >= inode->vfs_inode.i_size) { in btrfs_encoded_read()
10073 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); in btrfs_encoded_read()
10075 * We don't know how long the extent containing iocb->ki_pos is, but if in btrfs_encoded_read()
10078 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; in btrfs_encoded_read()
10083 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, in btrfs_encoded_read()
10084 lockend - start + 1); in btrfs_encoded_read()
10089 lockend - start + 1); in btrfs_encoded_read()
10097 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); in btrfs_encoded_read()
10103 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_encoded_read()
10104 u64 extent_start = em->start; in btrfs_encoded_read()
10122 encoded->len = min_t(u64, extent_map_end(em), in btrfs_encoded_read()
10123 inode->vfs_inode.i_size) - iocb->ki_pos; in btrfs_encoded_read()
10124 if (em->block_start == EXTENT_MAP_HOLE || in btrfs_encoded_read()
10125 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_encoded_read()
10127 count = min_t(u64, count, encoded->len); in btrfs_encoded_read()
10128 encoded->len = count; in btrfs_encoded_read()
10129 encoded->unencoded_len = count; in btrfs_encoded_read()
10130 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_encoded_read()
10131 disk_bytenr = em->block_start; in btrfs_encoded_read()
10136 if (em->block_len > count) { in btrfs_encoded_read()
10137 ret = -ENOBUFS; in btrfs_encoded_read()
10140 disk_io_size = em->block_len; in btrfs_encoded_read()
10141 count = em->block_len; in btrfs_encoded_read()
10142 encoded->unencoded_len = em->ram_bytes; in btrfs_encoded_read()
10143 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; in btrfs_encoded_read()
10145 em->compress_type); in btrfs_encoded_read()
10148 encoded->compression = ret; in btrfs_encoded_read()
10150 disk_bytenr = em->block_start + (start - em->start); in btrfs_encoded_read()
10151 if (encoded->len > count) in btrfs_encoded_read()
10152 encoded->len = count; in btrfs_encoded_read()
10157 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; in btrfs_encoded_read()
10158 count = start + disk_io_size - iocb->ki_pos; in btrfs_encoded_read()
10159 encoded->len = count; in btrfs_encoded_read()
10160 encoded->unencoded_len = count; in btrfs_encoded_read()
10161 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); in btrfs_encoded_read()
10172 ret = -EFAULT; in btrfs_encoded_read()
10177 encoded->compression, in btrfs_encoded_read()
10183 iocb->ki_pos += encoded->len; in btrfs_encoded_read()
10198 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_do_encoded_write()
10199 struct btrfs_root *root = inode->root; in btrfs_do_encoded_write()
10200 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_do_encoded_write()
10201 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_do_encoded_write()
10216 switch (encoded->compression) { in btrfs_do_encoded_write()
10229 if (encoded->compression - in btrfs_do_encoded_write()
10231 fs_info->sectorsize_bits) in btrfs_do_encoded_write()
10232 return -EINVAL; in btrfs_do_encoded_write()
10236 return -EINVAL; in btrfs_do_encoded_write()
10238 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) in btrfs_do_encoded_write()
10239 return -EINVAL; in btrfs_do_encoded_write()
10245 if (inode->flags & BTRFS_INODE_NODATASUM) in btrfs_do_encoded_write()
10246 return -EINVAL; in btrfs_do_encoded_write()
10251 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || in btrfs_do_encoded_write()
10253 return -EINVAL; in btrfs_do_encoded_write()
10268 if (orig_count >= encoded->unencoded_len) in btrfs_do_encoded_write()
10269 return -EINVAL; in btrfs_do_encoded_write()
10272 start = iocb->ki_pos; in btrfs_do_encoded_write()
10273 if (!IS_ALIGNED(start, fs_info->sectorsize)) in btrfs_do_encoded_write()
10274 return -EINVAL; in btrfs_do_encoded_write()
10281 if (start + encoded->len < inode->vfs_inode.i_size && in btrfs_do_encoded_write()
10282 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) in btrfs_do_encoded_write()
10283 return -EINVAL; in btrfs_do_encoded_write()
10285 /* Finally, the offset in the unencoded data must be sector-aligned. */ in btrfs_do_encoded_write()
10286 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) in btrfs_do_encoded_write()
10287 return -EINVAL; in btrfs_do_encoded_write()
10289 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); in btrfs_do_encoded_write()
10290 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); in btrfs_do_encoded_write()
10291 end = start + num_bytes - 1; in btrfs_do_encoded_write()
10295 * sector-aligned. For convenience, we extend it with zeroes if it in btrfs_do_encoded_write()
10298 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); in btrfs_do_encoded_write()
10302 return -ENOMEM; in btrfs_do_encoded_write()
10309 ret = -ENOMEM; in btrfs_do_encoded_write()
10315 ret = -EFAULT; in btrfs_do_encoded_write()
10319 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); in btrfs_do_encoded_write()
10326 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); in btrfs_do_encoded_write()
10329 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, in btrfs_do_encoded_write()
10337 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) in btrfs_do_encoded_write()
10346 * We don't use the higher-level delalloc space functions because our in btrfs_do_encoded_write()
10361 if (start == 0 && encoded->unencoded_len == encoded->len && in btrfs_do_encoded_write()
10362 encoded->unencoded_offset == 0) { in btrfs_do_encoded_write()
10363 ret = cow_file_range_inline(inode, encoded->len, orig_count, in btrfs_do_encoded_write()
10379 start - encoded->unencoded_offset, ins.objectid, in btrfs_do_encoded_write()
10390 encoded->unencoded_offset, in btrfs_do_encoded_write()
10401 if (start + encoded->len > inode->vfs_inode.i_size) in btrfs_do_encoded_write()
10402 i_size_write(&inode->vfs_inode, start + encoded->len); in btrfs_do_encoded_write()
10438 iocb->ki_pos += encoded->len; in btrfs_do_encoded_write()
10451 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_add_swapfile_pin()
10458 return -ENOMEM; in btrfs_add_swapfile_pin()
10459 sp->ptr = ptr; in btrfs_add_swapfile_pin()
10460 sp->inode = inode; in btrfs_add_swapfile_pin()
10461 sp->is_block_group = is_block_group; in btrfs_add_swapfile_pin()
10462 sp->bg_extent_count = 1; in btrfs_add_swapfile_pin()
10464 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10465 p = &fs_info->swapfile_pins.rb_node; in btrfs_add_swapfile_pin()
10469 if (sp->ptr < entry->ptr || in btrfs_add_swapfile_pin()
10470 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { in btrfs_add_swapfile_pin()
10471 p = &(*p)->rb_left; in btrfs_add_swapfile_pin()
10472 } else if (sp->ptr > entry->ptr || in btrfs_add_swapfile_pin()
10473 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { in btrfs_add_swapfile_pin()
10474 p = &(*p)->rb_right; in btrfs_add_swapfile_pin()
10477 entry->bg_extent_count++; in btrfs_add_swapfile_pin()
10478 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10483 rb_link_node(&sp->node, parent, p); in btrfs_add_swapfile_pin()
10484 rb_insert_color(&sp->node, &fs_info->swapfile_pins); in btrfs_add_swapfile_pin()
10485 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10492 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_free_swapfile_pins()
10496 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10497 node = rb_first(&fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10501 if (sp->inode == inode) { in btrfs_free_swapfile_pins()
10502 rb_erase(&sp->node, &fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10503 if (sp->is_block_group) { in btrfs_free_swapfile_pins()
10504 btrfs_dec_block_group_swap_extents(sp->ptr, in btrfs_free_swapfile_pins()
10505 sp->bg_extent_count); in btrfs_free_swapfile_pins()
10506 btrfs_put_block_group(sp->ptr); in btrfs_free_swapfile_pins()
10512 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10538 if (bsi->nr_pages >= sis->max) in btrfs_add_swap_extent()
10541 max_pages = sis->max - bsi->nr_pages; in btrfs_add_swap_extent()
10542 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10543 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10547 nr_pages = next_ppage - first_ppage; in btrfs_add_swap_extent()
10551 if (bsi->start == 0) in btrfs_add_swap_extent()
10553 if (bsi->lowest_ppage > first_ppage_reported) in btrfs_add_swap_extent()
10554 bsi->lowest_ppage = first_ppage_reported; in btrfs_add_swap_extent()
10555 if (bsi->highest_ppage < (next_ppage - 1)) in btrfs_add_swap_extent()
10556 bsi->highest_ppage = next_ppage - 1; in btrfs_add_swap_extent()
10558 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); in btrfs_add_swap_extent()
10561 bsi->nr_extents += ret; in btrfs_add_swap_extent()
10562 bsi->nr_pages += nr_pages; in btrfs_add_swap_extent()
10571 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); in btrfs_swap_deactivate()
10578 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_swap_activate()
10579 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_swap_activate()
10580 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_swap_activate()
10585 .lowest_ppage = (sector_t)-1ULL, in btrfs_swap_activate()
10596 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_swap_activate()
10603 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { in btrfs_swap_activate()
10605 return -EINVAL; in btrfs_swap_activate()
10607 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { in btrfs_swap_activate()
10608 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10609 return -EINVAL; in btrfs_swap_activate()
10611 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { in btrfs_swap_activate()
10613 return -EINVAL; in btrfs_swap_activate()
10620 * fs_info->swapfile_pins prevents them from running while the swap in btrfs_swap_activate()
10628 return -EBUSY; in btrfs_swap_activate()
10638 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { in btrfs_swap_activate()
10642 return -EINVAL; in btrfs_swap_activate()
10654 spin_lock(&root->root_item_lock); in btrfs_swap_activate()
10656 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10661 root->root_key.objectid); in btrfs_swap_activate()
10662 return -EPERM; in btrfs_swap_activate()
10664 atomic_inc(&root->nr_swapfiles); in btrfs_swap_activate()
10665 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10667 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); in btrfs_swap_activate()
10669 lock_extent(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10674 u64 len = isize - start; in btrfs_swap_activate()
10682 if (em->block_start == EXTENT_MAP_HOLE) { in btrfs_swap_activate()
10684 ret = -EINVAL; in btrfs_swap_activate()
10687 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_swap_activate()
10696 ret = -EINVAL; in btrfs_swap_activate()
10699 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_swap_activate()
10701 ret = -EINVAL; in btrfs_swap_activate()
10705 logical_block_start = em->block_start + (start - em->start); in btrfs_swap_activate()
10706 len = min(len, em->len - (start - em->start)); in btrfs_swap_activate()
10717 "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10718 ret = -EINVAL; in btrfs_swap_activate()
10728 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { in btrfs_swap_activate()
10731 ret = -EINVAL; in btrfs_swap_activate()
10736 device = em->map_lookup->stripes[0].dev; in btrfs_swap_activate()
10742 } else if (device != em->map_lookup->stripes[0].dev) { in btrfs_swap_activate()
10744 ret = -EINVAL; in btrfs_swap_activate()
10748 physical_block_start = (em->map_lookup->stripes[0].physical + in btrfs_swap_activate()
10749 (logical_block_start - em->start)); in btrfs_swap_activate()
10750 len = min(len, em->len - (logical_block_start - em->start)); in btrfs_swap_activate()
10758 ret = -EINVAL; in btrfs_swap_activate()
10764 "block group for swapfile at %llu is read-only%s", in btrfs_swap_activate()
10765 bg->start, in btrfs_swap_activate()
10766 atomic_read(&fs_info->scrubs_running) ? in btrfs_swap_activate()
10769 ret = -EINVAL; in btrfs_swap_activate()
10806 unlock_extent(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10811 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_swap_activate()
10819 sis->bdev = device->bdev; in btrfs_swap_activate()
10820 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; in btrfs_swap_activate()
10821 sis->max = bsi.nr_pages; in btrfs_swap_activate()
10822 sis->pages = bsi.nr_pages - 1; in btrfs_swap_activate()
10823 sis->highest_bit = bsi.nr_pages - 1; in btrfs_swap_activate()
10834 return -EOPNOTSUPP; in btrfs_swap_activate()
10851 spin_lock(&inode->lock); in btrfs_update_inode_bytes()
10853 inode_sub_bytes(&inode->vfs_inode, del_bytes); in btrfs_update_inode_bytes()
10855 inode_add_bytes(&inode->vfs_inode, add_bytes); in btrfs_update_inode_bytes()
10856 spin_unlock(&inode->lock); in btrfs_update_inode_bytes()
10875 struct btrfs_root *root = inode->root; in btrfs_assert_inode_range_clean()
10881 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); in btrfs_assert_inode_range_clean()
10883 btrfs_err(root->fs_info, in btrfs_assert_inode_range_clean()
10885 start, end, btrfs_ino(inode), root->root_key.objectid, in btrfs_assert_inode_range_clean()
10886 ordered->file_offset, in btrfs_assert_inode_range_clean()
10887 ordered->file_offset + ordered->num_bytes - 1); in btrfs_assert_inode_range_clean()