Lines Matching +full:oe +full:- +full:extra +full:- +full:delay
1 // SPDX-License-Identifier: GPL-2.0
9 #include <linux/blk-cgroup.h>
17 #include <linux/backing-dev.h>
39 #include "disk-io.h"
42 #include "print-tree.h"
43 #include "ordered-data.h"
45 #include "tree-log.h"
49 #include "free-space-cache.h"
52 #include "delalloc-space.h"
53 #include "block-group.h"
54 #include "space-info.h"
57 #include "inode-item.h"
60 #include "extent-tree.h"
61 #include "root-tree.h"
63 #include "dir-item.h"
64 #include "file-item.h"
65 #include "uuid-tree.h"
142 struct btrfs_fs_info *fs_info = warn->fs_info; in data_reloc_print_warning_inode()
163 ret = btrfs_search_slot(NULL, local_root, &key, &warn->path, 0, 0); in data_reloc_print_warning_inode()
166 btrfs_release_path(&warn->path); in data_reloc_print_warning_inode()
170 eb = warn->path.nodes[0]; in data_reloc_print_warning_inode()
171 inode_item = btrfs_item_ptr(eb, warn->path.slots[0], struct btrfs_inode_item); in data_reloc_print_warning_inode()
173 btrfs_release_path(&warn->path); in data_reloc_print_warning_inode()
176 ipath = init_ipath(4096, local_root, &warn->path); in data_reloc_print_warning_inode()
183 * -ENOMEM, not a critical error, just output an generic error in data_reloc_print_warning_inode()
188 warn->logical, warn->mirror_num, root, inum, offset); in data_reloc_print_warning_inode()
199 for (int i = 0; i < ipath->fspath->elem_cnt; i++) { in data_reloc_print_warning_inode()
202 warn->logical, warn->mirror_num, root, inum, offset, in data_reloc_print_warning_inode()
203 fs_info->sectorsize, nlink, in data_reloc_print_warning_inode()
204 (char *)(unsigned long)ipath->fspath->val[i]); in data_reloc_print_warning_inode()
214 warn->logical, warn->mirror_num, root, inum, offset, ret); in data_reloc_print_warning_inode()
221 * Do extra user-friendly error output (e.g. lookup all the affected files).
230 struct btrfs_fs_info *fs_info = inode->root->fs_info; in print_data_reloc_error()
235 const u32 csum_size = fs_info->csum_size; in print_data_reloc_error()
241 mutex_lock(&fs_info->reloc_mutex); in print_data_reloc_error()
243 mutex_unlock(&fs_info->reloc_mutex); in print_data_reloc_error()
249 inode->root->root_key.objectid, btrfs_ino(inode), file_off, in print_data_reloc_error()
259 inode->root->root_key.objectid, in print_data_reloc_error()
306 ctx.extent_item_pos = logical - found_key.objectid; in print_data_reloc_error()
322 struct btrfs_root *root = inode->root; in btrfs_print_data_csum_error()
323 const u32 csum_size = root->fs_info->csum_size; in btrfs_print_data_csum_error()
326 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID) in btrfs_print_data_csum_error()
331 if (root->root_key.objectid >= BTRFS_LAST_FREE_OBJECTID) { in btrfs_print_data_csum_error()
332 btrfs_warn_rl(root->fs_info, in btrfs_print_data_csum_error()
334 root->root_key.objectid, btrfs_ino(inode), in btrfs_print_data_csum_error()
340 btrfs_warn_rl(root->fs_info, in btrfs_print_data_csum_error()
342 root->root_key.objectid, btrfs_ino(inode), in btrfs_print_data_csum_error()
351 * btrfs_inode_lock - lock inode i_rwsem based on arguments passed
355 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
356 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
357 * return -EAGAIN
358 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
364 if (!inode_trylock_shared(&inode->vfs_inode)) in btrfs_inode_lock()
365 return -EAGAIN; in btrfs_inode_lock()
369 inode_lock_shared(&inode->vfs_inode); in btrfs_inode_lock()
372 if (!inode_trylock(&inode->vfs_inode)) in btrfs_inode_lock()
373 return -EAGAIN; in btrfs_inode_lock()
377 inode_lock(&inode->vfs_inode); in btrfs_inode_lock()
380 down_write(&inode->i_mmap_lock); in btrfs_inode_lock()
385 * btrfs_inode_unlock - unock inode i_rwsem
393 up_write(&inode->i_mmap_lock); in btrfs_inode_unlock()
395 inode_unlock_shared(&inode->vfs_inode); in btrfs_inode_unlock()
397 inode_unlock(&inode->vfs_inode); in btrfs_inode_unlock()
415 unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT; in btrfs_cleanup_ordered_extents()
421 page_end = page_start + PAGE_SIZE - 1; in btrfs_cleanup_ordered_extents()
439 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_cleanup_ordered_extents()
449 btrfs_page_clamp_clear_ordered(inode->root->fs_info, page, in btrfs_cleanup_ordered_extents()
464 if (page_start >= offset && page_end <= (offset + bytes - 1)) { in btrfs_cleanup_ordered_extents()
465 bytes = offset + bytes - page_offset(locked_page) - PAGE_SIZE; in btrfs_cleanup_ordered_extents()
480 if (args->default_acl) { in btrfs_init_inode_security()
481 err = __btrfs_set_acl(trans, args->inode, args->default_acl, in btrfs_init_inode_security()
486 if (args->acl) { in btrfs_init_inode_security()
487 err = __btrfs_set_acl(trans, args->inode, args->acl, ACL_TYPE_ACCESS); in btrfs_init_inode_security()
491 if (!args->default_acl && !args->acl) in btrfs_init_inode_security()
492 cache_no_acl(args->inode); in btrfs_init_inode_security()
493 return btrfs_xattr_security_init(trans, args->inode, args->dir, in btrfs_init_inode_security()
494 &args->dentry->d_name); in btrfs_init_inode_security()
510 struct btrfs_root *root = inode->root; in insert_inline_extent()
540 leaf = path->nodes[0]; in insert_inline_extent()
541 ei = btrfs_item_ptr(leaf, path->slots[0], in insert_inline_extent()
543 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in insert_inline_extent()
564 compressed_size -= cur_size; in insert_inline_extent()
569 page = find_get_page(inode->vfs_inode.i_mapping, 0); in insert_inline_extent()
584 ALIGN(size, root->fs_info->sectorsize)); in insert_inline_extent()
595 i_size = i_size_read(&inode->vfs_inode); in insert_inline_extent()
597 i_size_write(&inode->vfs_inode, size); in insert_inline_extent()
600 inode->disk_i_size = i_size; in insert_inline_extent()
619 struct btrfs_root *root = inode->root; in cow_file_range_inline()
620 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range_inline()
632 if (size < i_size_read(&inode->vfs_inode) || in cow_file_range_inline()
633 size > fs_info->sectorsize || in cow_file_range_inline()
635 data_len > fs_info->max_inline) in cow_file_range_inline()
640 return -ENOMEM; in cow_file_range_inline()
647 trans->block_rsv = &inode->block_rsv; in cow_file_range_inline()
651 drop_args.end = fs_info->sectorsize; in cow_file_range_inline()
664 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
667 } else if (ret == -ENOSPC) { in cow_file_range_inline()
674 if (ret && ret != -ENOSPC) { in cow_file_range_inline()
677 } else if (ret == -ENOSPC) { in cow_file_range_inline()
734 return -ENOMEM; in add_async_extent()
735 async_extent->start = start; in add_async_extent()
736 async_extent->ram_size = ram_size; in add_async_extent()
737 async_extent->compressed_size = compressed_size; in add_async_extent()
738 async_extent->pages = pages; in add_async_extent()
739 async_extent->nr_pages = nr_pages; in add_async_extent()
740 async_extent->compress_type = compress_type; in add_async_extent()
741 list_add_tail(&async_extent->list, &cow->extents); in add_async_extent()
752 struct btrfs_fs_info *fs_info = inode->root->fs_info; in inode_need_compress()
768 * \- A \- B in inode_need_compress()
786 if (fs_info->sectorsize < PAGE_SIZE) { in inode_need_compress()
796 if (inode->defrag_compress) in inode_need_compress()
799 if (inode->flags & BTRFS_INODE_NOCOMPRESS) in inode_need_compress()
802 inode->flags & BTRFS_INODE_COMPRESS || in inode_need_compress()
803 inode->prop_compress) in inode_need_compress()
804 return btrfs_compress_heuristic(&inode->vfs_inode, start, end); in inode_need_compress()
813 (start > 0 || end + 1 < inode->disk_i_size)) in inode_should_defrag()
834 struct btrfs_inode *inode = async_chunk->inode; in compress_file_range()
835 struct btrfs_fs_info *fs_info = inode->root->fs_info; in compress_file_range()
836 struct address_space *mapping = inode->vfs_inode.i_mapping; in compress_file_range()
837 u64 blocksize = fs_info->sectorsize; in compress_file_range()
838 u64 start = async_chunk->start; in compress_file_range()
839 u64 end = async_chunk->end; in compress_file_range()
849 int compress_type = fs_info->compress_type; in compress_file_range()
851 inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); in compress_file_range()
858 extent_range_clear_dirty_for_io(&inode->vfs_inode, start, end); in compress_file_range()
870 i_size = i_size_read(&inode->vfs_inode); in compress_file_range()
875 nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; in compress_file_range()
891 total_compressed = actual_end - start; in compress_file_range()
898 (start > 0 || end + 1 < inode->disk_i_size)) in compress_file_range()
918 * We do compression for mount -o compress and when the inode has not in compress_file_range()
934 if (inode->defrag_compress) in compress_file_range()
935 compress_type = inode->defrag_compress; in compress_file_range()
936 else if (inode->prop_compress) in compress_file_range()
937 compress_type = inode->prop_compress; in compress_file_range()
940 ret = btrfs_compress_pages(compress_type | (fs_info->compress_level << 4), in compress_file_range()
952 memzero_page(pages[nr_pages - 1], poff, PAGE_SIZE - poff); in compress_file_range()
963 if (start == 0 && fs_info->sectorsize == PAGE_SIZE) { in compress_file_range()
980 mapping_set_error(mapping, -EIO); in compress_file_range()
1013 total_in = round_up(total_in, fs_info->sectorsize); in compress_file_range()
1032 if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && !inode->prop_compress) in compress_file_range()
1033 inode->flags |= BTRFS_INODE_NOCOMPRESS; in compress_file_range()
1035 ret = add_async_extent(async_chunk, start, end - start + 1, 0, NULL, 0, in compress_file_range()
1041 WARN_ON(pages[i]->mapping); in compress_file_range()
1052 if (!async_extent->pages) in free_async_extent_pages()
1055 for (i = 0; i < async_extent->nr_pages; i++) { in free_async_extent_pages()
1056 WARN_ON(async_extent->pages[i]->mapping); in free_async_extent_pages()
1057 put_page(async_extent->pages[i]); in free_async_extent_pages()
1059 kfree(async_extent->pages); in free_async_extent_pages()
1060 async_extent->nr_pages = 0; in free_async_extent_pages()
1061 async_extent->pages = NULL; in free_async_extent_pages()
1068 u64 start = async_extent->start; in submit_uncompressed_range()
1069 u64 end = async_extent->start + async_extent->ram_size - 1; in submit_uncompressed_range()
1078 wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); in submit_uncompressed_range()
1082 btrfs_cleanup_ordered_extents(inode, locked_page, start, end - start + 1); in submit_uncompressed_range()
1091 mapping_set_error(locked_page->mapping, ret); in submit_uncompressed_range()
1101 struct btrfs_inode *inode = async_chunk->inode; in submit_one_async_extent()
1102 struct extent_io_tree *io_tree = &inode->io_tree; in submit_one_async_extent()
1103 struct btrfs_root *root = inode->root; in submit_one_async_extent()
1104 struct btrfs_fs_info *fs_info = root->fs_info; in submit_one_async_extent()
1110 u64 start = async_extent->start; in submit_one_async_extent()
1111 u64 end = async_extent->start + async_extent->ram_size - 1; in submit_one_async_extent()
1113 if (async_chunk->blkcg_css) in submit_one_async_extent()
1114 kthread_associate_blkcg(async_chunk->blkcg_css); in submit_one_async_extent()
1117 * If async_chunk->locked_page is in the async_extent range, we need to in submit_one_async_extent()
1120 if (async_chunk->locked_page) { in submit_one_async_extent()
1121 u64 locked_page_start = page_offset(async_chunk->locked_page); in submit_one_async_extent()
1122 u64 locked_page_end = locked_page_start + PAGE_SIZE - 1; in submit_one_async_extent()
1125 locked_page = async_chunk->locked_page; in submit_one_async_extent()
1129 if (async_extent->compress_type == BTRFS_COMPRESS_NONE) { in submit_one_async_extent()
1134 ret = btrfs_reserve_extent(root, async_extent->ram_size, in submit_one_async_extent()
1135 async_extent->compressed_size, in submit_one_async_extent()
1136 async_extent->compressed_size, in submit_one_async_extent()
1142 * non-contiguous space for the uncompressed size instead. So in submit_one_async_extent()
1151 async_extent->ram_size, /* len */ in submit_one_async_extent()
1156 async_extent->ram_size, /* ram_bytes */ in submit_one_async_extent()
1157 async_extent->compress_type, in submit_one_async_extent()
1166 async_extent->ram_size, /* num_bytes */ in submit_one_async_extent()
1167 async_extent->ram_size, /* ram_bytes */ in submit_one_async_extent()
1172 async_extent->compress_type); in submit_one_async_extent()
1185 async_extent->pages, /* compressed_pages */ in submit_one_async_extent()
1186 async_extent->nr_pages, in submit_one_async_extent()
1187 async_chunk->write_flags, true); in submit_one_async_extent()
1190 if (async_chunk->blkcg_css) in submit_one_async_extent()
1198 mapping_set_error(inode->vfs_inode.i_mapping, -EIO); in submit_one_async_extent()
1206 if (async_chunk->blkcg_css) in submit_one_async_extent()
1210 root->root_key.objectid, btrfs_ino(inode), start, in submit_one_async_extent()
1211 async_extent->ram_size, ret); in submit_one_async_extent()
1218 struct extent_map_tree *em_tree = &inode->extent_tree; in get_extent_allocation_hint()
1222 read_lock(&em_tree->lock); in get_extent_allocation_hint()
1230 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { in get_extent_allocation_hint()
1233 if (em && em->block_start < EXTENT_MAP_LAST_BYTE) in get_extent_allocation_hint()
1234 alloc_hint = em->block_start; in get_extent_allocation_hint()
1238 alloc_hint = em->block_start; in get_extent_allocation_hint()
1242 read_unlock(&em_tree->lock); in get_extent_allocation_hint()
1254 * it to make sure we don't do extra locks or unlocks.
1266 * - If @keep_locked is set, all pages are kept locked.
1267 * - Else all pages except for @locked_page are unlocked.
1270 * while-loop, the ordered extents created in previous iterations are kept
1280 struct btrfs_root *root = inode->root; in cow_file_range()
1281 struct btrfs_fs_info *fs_info = root->fs_info; in cow_file_range()
1288 u64 blocksize = fs_info->sectorsize; in cow_file_range()
1297 ret = -EINVAL; in cow_file_range()
1301 num_bytes = ALIGN(end - start + 1, blocksize); in cow_file_range()
1303 ASSERT(num_bytes <= btrfs_super_total_bytes(fs_info->super_copy)); in cow_file_range()
1317 if (start == 0 && fs_info->sectorsize == PAGE_SIZE && !no_inline) { in cow_file_range()
1318 u64 actual_end = min_t(u64, i_size_read(&inode->vfs_inode), in cow_file_range()
1373 min_alloc_size = fs_info->sectorsize; in cow_file_range()
1382 if (ret == -EAGAIN) { in cow_file_range()
1384 * btrfs_reserve_extent only returns -EAGAIN for zoned in cow_file_range()
1392 * us, or return -ENOSPC if it can't handle retries. in cow_file_range()
1396 wait_on_bit_io(&inode->root->fs_info->flags, in cow_file_range()
1402 *done_offset = start - 1; in cow_file_range()
1405 ret = -ENOSPC; in cow_file_range()
1452 start + ram_size - 1, in cow_file_range()
1470 extent_clear_unlock_delalloc(inode, start, start + ram_size - 1, in cow_file_range()
1477 num_bytes -= cur_alloc_size; in cow_file_range()
1496 btrfs_drop_extent_map_range(inode, start, start + ram_size - 1, false); in cow_file_range()
1504 * |-------(1)----|---(2)---|-------------(3)----------| in cow_file_range()
1505 * `- orig_start `- start `- start + cur_alloc_size `- end in cow_file_range()
1528 mapping_set_error(inode->vfs_inode.i_mapping, ret); in cow_file_range()
1529 extent_clear_unlock_delalloc(inode, orig_start, start - 1, in cow_file_range()
1545 start + cur_alloc_size - 1, in cow_file_range()
1563 btrfs_qgroup_free_data(inode, NULL, start, end - start + 1, NULL); in cow_file_range()
1582 nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >> in submit_compressed_extents()
1585 while (!list_empty(&async_chunk->extents)) { in submit_compressed_extents()
1586 async_extent = list_entry(async_chunk->extents.next, in submit_compressed_extents()
1588 list_del(&async_extent->list); in submit_compressed_extents()
1593 if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < in submit_compressed_extents()
1595 cond_wake_up_nomb(&fs_info->async_submit_wait); in submit_compressed_extents()
1604 btrfs_add_delayed_iput(async_chunk->inode); in async_cow_free()
1605 if (async_chunk->blkcg_css) in async_cow_free()
1606 css_put(async_chunk->blkcg_css); in async_cow_free()
1608 async_cow = async_chunk->async_cow; in async_cow_free()
1609 if (atomic_dec_and_test(&async_cow->num_chunks)) in async_cow_free()
1617 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_compressed()
1622 u64 num_chunks = DIV_ROUND_UP(end - start, SZ_512K); in run_delalloc_compressed()
1633 unlock_extent(&inode->io_tree, start, end, NULL); in run_delalloc_compressed()
1634 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, &inode->runtime_flags); in run_delalloc_compressed()
1636 async_chunk = ctx->chunks; in run_delalloc_compressed()
1637 atomic_set(&ctx->num_chunks, num_chunks); in run_delalloc_compressed()
1640 u64 cur_end = min(end, start + SZ_512K - 1); in run_delalloc_compressed()
1646 ihold(&inode->vfs_inode); in run_delalloc_compressed()
1674 cur_end - start); in run_delalloc_compressed()
1692 nr_pages = DIV_ROUND_UP(cur_end - start, PAGE_SIZE); in run_delalloc_compressed()
1693 atomic_add(nr_pages, &fs_info->async_delalloc_pages); in run_delalloc_compressed()
1695 btrfs_queue_work(fs_info->delalloc_workers, &async_chunk[i].work); in run_delalloc_compressed()
1719 extent_write_locked_range(&inode->vfs_inode, locked_page, start, in run_delalloc_cow()
1735 ret = btrfs_lookup_csums_list(csum_root, bytenr, bytenr + num_bytes - 1, in csum_exist_in_range()
1742 list_del(&sums->list); in csum_exist_in_range()
1754 const bool is_reloc_ino = btrfs_is_data_reloc_root(inode->root); in fallback_to_cow()
1755 const u64 range_bytes = end + 1 - start; in fallback_to_cow()
1756 struct extent_io_tree *io_tree = &inode->io_tree; in fallback_to_cow()
1797 struct btrfs_fs_info *fs_info = inode->root->fs_info; in fallback_to_cow()
1798 struct btrfs_space_info *sinfo = fs_info->data_sinfo; in fallback_to_cow()
1803 spin_lock(&sinfo->lock); in fallback_to_cow()
1805 spin_unlock(&sinfo->lock); in fallback_to_cow()
1849 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1861 struct extent_buffer *leaf = path->nodes[0]; in can_nocow_file_extent()
1862 struct btrfs_root *root = inode->root; in can_nocow_file_extent()
1868 bool nowait = path->nowait; in can_nocow_file_extent()
1870 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in can_nocow_file_extent()
1877 args->disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); in can_nocow_file_extent()
1878 args->disk_num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); in can_nocow_file_extent()
1879 args->extent_offset = btrfs_file_extent_offset(leaf, fi); in can_nocow_file_extent()
1881 if (!(inode->flags & BTRFS_INODE_NODATACOW) && in can_nocow_file_extent()
1890 if (!args->strict && in can_nocow_file_extent()
1892 btrfs_root_last_snapshot(&root->root_item)) in can_nocow_file_extent()
1896 if (args->disk_bytenr == 0) in can_nocow_file_extent()
1915 key->offset - args->extent_offset, in can_nocow_file_extent()
1916 args->disk_bytenr, args->strict, path); in can_nocow_file_extent()
1921 if (args->free_path) { in can_nocow_file_extent()
1925 * another path. So free the path to avoid unnecessary extra in can_nocow_file_extent()
1933 if (args->writeback_path && !is_freespace_inode && in can_nocow_file_extent()
1934 atomic_read(&root->snapshot_force_cow)) in can_nocow_file_extent()
1937 args->disk_bytenr += args->extent_offset; in can_nocow_file_extent()
1938 args->disk_bytenr += args->start - key->offset; in can_nocow_file_extent()
1939 args->num_bytes = min(args->end + 1, extent_end) - args->start; in can_nocow_file_extent()
1945 ret = csum_exist_in_range(root->fs_info, args->disk_bytenr, args->num_bytes, in can_nocow_file_extent()
1953 if (args->free_path && path) in can_nocow_file_extent()
1970 struct btrfs_fs_info *fs_info = inode->root->fs_info; in run_delalloc_nocow()
1971 struct btrfs_root *root = inode->root; in run_delalloc_nocow()
1973 u64 cow_start = (u64)-1; in run_delalloc_nocow()
1989 ret = -ENOMEM; in run_delalloc_nocow()
2018 if (ret > 0 && path->slots[0] > 0 && check_prev) { in run_delalloc_nocow()
2019 leaf = path->nodes[0]; in run_delalloc_nocow()
2021 path->slots[0] - 1); in run_delalloc_nocow()
2024 path->slots[0]--; in run_delalloc_nocow()
2029 leaf = path->nodes[0]; in run_delalloc_nocow()
2030 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in run_delalloc_nocow()
2036 leaf = path->nodes[0]; in run_delalloc_nocow()
2039 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in run_delalloc_nocow()
2050 path->slots[0]++; in run_delalloc_nocow()
2064 if (cow_start == (u64)-1) in run_delalloc_nocow()
2074 fi = btrfs_item_ptr(leaf, path->slots[0], in run_delalloc_nocow()
2080 ret = -EUCLEAN; in run_delalloc_nocow()
2091 path->slots[0]++; in run_delalloc_nocow()
2113 if (cow_start == (u64)-1) in run_delalloc_nocow()
2118 if (!path->nodes[0]) in run_delalloc_nocow()
2120 path->slots[0]++; in run_delalloc_nocow()
2125 * COW range from cow_start to found_key.offset - 1. As the key in run_delalloc_nocow()
2129 if (cow_start != (u64)-1) { in run_delalloc_nocow()
2131 cow_start, found_key.offset - 1); in run_delalloc_nocow()
2132 cow_start = (u64)-1; in run_delalloc_nocow()
2139 nocow_end = cur_offset + nocow_args.num_bytes - 1; in run_delalloc_nocow()
2142 u64 orig_start = found_key.offset - nocow_args.extent_offset; in run_delalloc_nocow()
2206 if (cur_offset <= end && cow_start == (u64)-1) in run_delalloc_nocow()
2209 if (cow_start != (u64)-1) { in run_delalloc_nocow()
2212 cow_start = (u64)-1; in run_delalloc_nocow()
2226 if (cow_start != (u64)-1) in run_delalloc_nocow()
2235 btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL); in run_delalloc_nocow()
2243 if (inode->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) { in should_nocow()
2244 if (inode->defrag_bytes && in should_nocow()
2245 test_range_bit(&inode->io_tree, start, end, EXTENT_DEFRAG, in should_nocow()
2260 const bool zoned = btrfs_is_zoned(inode->root->fs_info); in btrfs_run_delalloc_range()
2290 end - start + 1); in btrfs_run_delalloc_range()
2297 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_split_delalloc_extent()
2301 if (!(orig->state & EXTENT_DELALLOC)) in btrfs_split_delalloc_extent()
2304 size = orig->end - orig->start + 1; in btrfs_split_delalloc_extent()
2305 if (size > fs_info->max_extent_size) { in btrfs_split_delalloc_extent()
2313 new_size = orig->end - split + 1; in btrfs_split_delalloc_extent()
2315 new_size = split - orig->start; in btrfs_split_delalloc_extent()
2321 spin_lock(&inode->lock); in btrfs_split_delalloc_extent()
2323 spin_unlock(&inode->lock); in btrfs_split_delalloc_extent()
2334 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_merge_delalloc_extent()
2339 if (!(other->state & EXTENT_DELALLOC)) in btrfs_merge_delalloc_extent()
2342 if (new->start > other->start) in btrfs_merge_delalloc_extent()
2343 new_size = new->end - other->start + 1; in btrfs_merge_delalloc_extent()
2345 new_size = other->end - new->start + 1; in btrfs_merge_delalloc_extent()
2348 if (new_size <= fs_info->max_extent_size) { in btrfs_merge_delalloc_extent()
2349 spin_lock(&inode->lock); in btrfs_merge_delalloc_extent()
2350 btrfs_mod_outstanding_extents(inode, -1); in btrfs_merge_delalloc_extent()
2351 spin_unlock(&inode->lock); in btrfs_merge_delalloc_extent()
2373 old_size = other->end - other->start + 1; in btrfs_merge_delalloc_extent()
2375 old_size = new->end - new->start + 1; in btrfs_merge_delalloc_extent()
2380 spin_lock(&inode->lock); in btrfs_merge_delalloc_extent()
2381 btrfs_mod_outstanding_extents(inode, -1); in btrfs_merge_delalloc_extent()
2382 spin_unlock(&inode->lock); in btrfs_merge_delalloc_extent()
2388 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_add_delalloc_inodes()
2390 spin_lock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
2391 if (list_empty(&inode->delalloc_inodes)) { in btrfs_add_delalloc_inodes()
2392 list_add_tail(&inode->delalloc_inodes, &root->delalloc_inodes); in btrfs_add_delalloc_inodes()
2393 set_bit(BTRFS_INODE_IN_DELALLOC_LIST, &inode->runtime_flags); in btrfs_add_delalloc_inodes()
2394 root->nr_delalloc_inodes++; in btrfs_add_delalloc_inodes()
2395 if (root->nr_delalloc_inodes == 1) { in btrfs_add_delalloc_inodes()
2396 spin_lock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
2397 BUG_ON(!list_empty(&root->delalloc_root)); in btrfs_add_delalloc_inodes()
2398 list_add_tail(&root->delalloc_root, in btrfs_add_delalloc_inodes()
2399 &fs_info->delalloc_roots); in btrfs_add_delalloc_inodes()
2400 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_add_delalloc_inodes()
2403 spin_unlock(&root->delalloc_lock); in btrfs_add_delalloc_inodes()
2409 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_del_delalloc_inode()
2411 if (!list_empty(&inode->delalloc_inodes)) { in __btrfs_del_delalloc_inode()
2412 list_del_init(&inode->delalloc_inodes); in __btrfs_del_delalloc_inode()
2414 &inode->runtime_flags); in __btrfs_del_delalloc_inode()
2415 root->nr_delalloc_inodes--; in __btrfs_del_delalloc_inode()
2416 if (!root->nr_delalloc_inodes) { in __btrfs_del_delalloc_inode()
2417 ASSERT(list_empty(&root->delalloc_inodes)); in __btrfs_del_delalloc_inode()
2418 spin_lock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
2419 BUG_ON(list_empty(&root->delalloc_root)); in __btrfs_del_delalloc_inode()
2420 list_del_init(&root->delalloc_root); in __btrfs_del_delalloc_inode()
2421 spin_unlock(&fs_info->delalloc_root_lock); in __btrfs_del_delalloc_inode()
2429 spin_lock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
2431 spin_unlock(&root->delalloc_lock); in btrfs_del_delalloc_inode()
2441 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_set_delalloc_extent()
2450 if (!(state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { in btrfs_set_delalloc_extent()
2451 struct btrfs_root *root = inode->root; in btrfs_set_delalloc_extent()
2452 u64 len = state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2456 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2458 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2464 percpu_counter_add_batch(&fs_info->delalloc_bytes, len, in btrfs_set_delalloc_extent()
2465 fs_info->delalloc_batch); in btrfs_set_delalloc_extent()
2466 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2467 inode->delalloc_bytes += len; in btrfs_set_delalloc_extent()
2469 inode->defrag_bytes += len; in btrfs_set_delalloc_extent()
2471 &inode->runtime_flags)) in btrfs_set_delalloc_extent()
2473 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2476 if (!(state->state & EXTENT_DELALLOC_NEW) && in btrfs_set_delalloc_extent()
2478 spin_lock(&inode->lock); in btrfs_set_delalloc_extent()
2479 inode->new_delalloc_bytes += state->end + 1 - state->start; in btrfs_set_delalloc_extent()
2480 spin_unlock(&inode->lock); in btrfs_set_delalloc_extent()
2491 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_clear_delalloc_extent()
2492 u64 len = state->end + 1 - state->start; in btrfs_clear_delalloc_extent()
2495 if ((state->state & EXTENT_DEFRAG) && (bits & EXTENT_DEFRAG)) { in btrfs_clear_delalloc_extent()
2496 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2497 inode->defrag_bytes -= len; in btrfs_clear_delalloc_extent()
2498 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2506 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) { in btrfs_clear_delalloc_extent()
2507 struct btrfs_root *root = inode->root; in btrfs_clear_delalloc_extent()
2510 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2511 btrfs_mod_outstanding_extents(inode, -num_extents); in btrfs_clear_delalloc_extent()
2512 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2520 root != fs_info->tree_root) in btrfs_clear_delalloc_extent()
2528 do_list && !(state->state & EXTENT_NORESERVE) && in btrfs_clear_delalloc_extent()
2532 percpu_counter_add_batch(&fs_info->delalloc_bytes, -len, in btrfs_clear_delalloc_extent()
2533 fs_info->delalloc_batch); in btrfs_clear_delalloc_extent()
2534 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2535 inode->delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2536 if (do_list && inode->delalloc_bytes == 0 && in btrfs_clear_delalloc_extent()
2538 &inode->runtime_flags)) in btrfs_clear_delalloc_extent()
2540 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2543 if ((state->state & EXTENT_DELALLOC_NEW) && in btrfs_clear_delalloc_extent()
2545 spin_lock(&inode->lock); in btrfs_clear_delalloc_extent()
2546 ASSERT(inode->new_delalloc_bytes >= len); in btrfs_clear_delalloc_extent()
2547 inode->new_delalloc_bytes -= len; in btrfs_clear_delalloc_extent()
2549 inode_add_bytes(&inode->vfs_inode, len); in btrfs_clear_delalloc_extent()
2550 spin_unlock(&inode->lock); in btrfs_clear_delalloc_extent()
2557 u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; in btrfs_extract_ordered_extent()
2558 u64 len = bbio->bio.bi_iter.bi_size; in btrfs_extract_ordered_extent()
2563 if (WARN_ON_ONCE(start != ordered->disk_bytenr)) in btrfs_extract_ordered_extent()
2564 return -EINVAL; in btrfs_extract_ordered_extent()
2567 if (ordered->disk_num_bytes == len) { in btrfs_extract_ordered_extent()
2568 refcount_inc(&ordered->refs); in btrfs_extract_ordered_extent()
2569 bbio->ordered = ordered; in btrfs_extract_ordered_extent()
2575 * a pre-existing one. in btrfs_extract_ordered_extent()
2577 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) { in btrfs_extract_ordered_extent()
2578 ret = split_extent_map(bbio->inode, bbio->file_offset, in btrfs_extract_ordered_extent()
2579 ordered->num_bytes, len, in btrfs_extract_ordered_extent()
2580 ordered->disk_bytenr); in btrfs_extract_ordered_extent()
2588 bbio->ordered = new; in btrfs_extract_ordered_extent()
2604 trans->adding_csums = true; in add_pending_csums()
2606 csum_root = btrfs_csum_root(trans->fs_info, in add_pending_csums()
2607 sum->logical); in add_pending_csums()
2609 trans->adding_csums = false; in add_pending_csums()
2622 const u64 end = start + len - 1; in btrfs_find_new_delalloc_bytes()
2625 const u64 search_len = end - search_start + 1; in btrfs_find_new_delalloc_bytes()
2634 if (em->block_start != EXTENT_MAP_HOLE) in btrfs_find_new_delalloc_bytes()
2637 em_len = em->len; in btrfs_find_new_delalloc_bytes()
2638 if (em->start < search_start) in btrfs_find_new_delalloc_bytes()
2639 em_len -= search_start - em->start; in btrfs_find_new_delalloc_bytes()
2643 ret = set_extent_bit(&inode->io_tree, search_start, in btrfs_find_new_delalloc_bytes()
2644 search_start + em_len - 1, in btrfs_find_new_delalloc_bytes()
2661 if (start >= i_size_read(&inode->vfs_inode) && in btrfs_set_extent_delalloc()
2662 !(inode->flags & BTRFS_INODE_PREALLOC)) { in btrfs_set_extent_delalloc()
2672 end + 1 - start, in btrfs_set_extent_delalloc()
2678 return set_extent_bit(&inode->io_tree, start, end, in btrfs_set_extent_delalloc()
2696 struct page *page = fixup->page; in btrfs_writepage_fixup_worker()
2697 struct btrfs_inode *inode = fixup->inode; in btrfs_writepage_fixup_worker()
2698 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_writepage_fixup_worker()
2700 u64 page_end = page_offset(page) + PAGE_SIZE - 1; in btrfs_writepage_fixup_worker()
2715 * page->mapping may go NULL, but it shouldn't be moved to a different in btrfs_writepage_fixup_worker()
2718 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { in btrfs_writepage_fixup_worker()
2753 lock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2761 unlock_extent(&inode->io_tree, page_start, page_end, in btrfs_writepage_fixup_worker()
2788 unlock_extent(&inode->io_tree, page_start, page_end, &cached_state); in btrfs_writepage_fixup_worker()
2795 mapping_set_error(page->mapping, ret); in btrfs_writepage_fixup_worker()
2826 struct inode *inode = page->mapping->host; in btrfs_writepage_cow_fixup()
2827 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_writepage_cow_fixup()
2842 return -EAGAIN; in btrfs_writepage_cow_fixup()
2846 return -EAGAIN; in btrfs_writepage_cow_fixup()
2852 * page->mapping outside of the page lock. in btrfs_writepage_cow_fixup()
2857 btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); in btrfs_writepage_cow_fixup()
2858 fixup->page = page; in btrfs_writepage_cow_fixup()
2859 fixup->inode = BTRFS_I(inode); in btrfs_writepage_cow_fixup()
2860 btrfs_queue_work(fs_info->fixup_workers, &fixup->work); in btrfs_writepage_cow_fixup()
2862 return -EAGAIN; in btrfs_writepage_cow_fixup()
2871 struct btrfs_root *root = inode->root; in insert_reserved_file_extent()
2872 const u64 sectorsize = root->fs_info->sectorsize; in insert_reserved_file_extent()
2886 return -ENOMEM; in insert_reserved_file_extent()
2916 leaf = path->nodes[0]; in insert_reserved_file_extent()
2917 btrfs_set_stack_file_extent_generation(stack_fi, trans->transid); in insert_reserved_file_extent()
2919 btrfs_item_ptr_offset(leaf, path->slots[0]), in insert_reserved_file_extent()
2935 inline_size = drop_args.bytes_found - inline_size; in insert_reserved_file_extent()
2937 drop_args.bytes_found -= inline_size; in insert_reserved_file_extent()
2938 num_bytes -= sectorsize; in insert_reserved_file_extent()
2953 file_pos - offset, in insert_reserved_file_extent()
2969 spin_lock(&cache->lock); in btrfs_release_delalloc_bytes()
2970 cache->delalloc_bytes -= len; in btrfs_release_delalloc_bytes()
2971 spin_unlock(&cache->lock); in btrfs_release_delalloc_bytes()
2977 struct btrfs_ordered_extent *oe) in insert_ordered_extent_file_extent() argument
2981 u64 num_bytes = oe->num_bytes; in insert_ordered_extent_file_extent()
2982 u64 ram_bytes = oe->ram_bytes; in insert_ordered_extent_file_extent()
2986 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi, oe->disk_bytenr); in insert_ordered_extent_file_extent()
2988 oe->disk_num_bytes); in insert_ordered_extent_file_extent()
2989 btrfs_set_stack_file_extent_offset(&stack_fi, oe->offset); in insert_ordered_extent_file_extent()
2990 if (test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags)) { in insert_ordered_extent_file_extent()
2991 num_bytes = oe->truncated_len; in insert_ordered_extent_file_extent()
2996 btrfs_set_stack_file_extent_compression(&stack_fi, oe->compress_type); in insert_ordered_extent_file_extent()
3005 update_inode_bytes = test_bit(BTRFS_ORDERED_DIRECT, &oe->flags) || in insert_ordered_extent_file_extent()
3006 test_bit(BTRFS_ORDERED_ENCODED, &oe->flags) || in insert_ordered_extent_file_extent()
3007 test_bit(BTRFS_ORDERED_TRUNCATED, &oe->flags); in insert_ordered_extent_file_extent()
3009 return insert_reserved_file_extent(trans, BTRFS_I(oe->inode), in insert_ordered_extent_file_extent()
3010 oe->file_offset, &stack_fi, in insert_ordered_extent_file_extent()
3011 update_inode_bytes, oe->qgroup_rsv); in insert_ordered_extent_file_extent()
3021 struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); in btrfs_finish_one_ordered()
3022 struct btrfs_root *root = inode->root; in btrfs_finish_one_ordered()
3023 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_finish_one_ordered()
3025 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_finish_one_ordered()
3030 u64 logical_len = ordered_extent->num_bytes; in btrfs_finish_one_ordered()
3036 start = ordered_extent->file_offset; in btrfs_finish_one_ordered()
3037 end = start + ordered_extent->num_bytes - 1; in btrfs_finish_one_ordered()
3039 if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3040 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3041 !test_bit(BTRFS_ORDERED_DIRECT, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3042 !test_bit(BTRFS_ORDERED_ENCODED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3049 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3050 ret = -EIO; in btrfs_finish_one_ordered()
3055 btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3056 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3058 if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3060 logical_len = ordered_extent->truncated_len; in btrfs_finish_one_ordered()
3066 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3067 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ in btrfs_finish_one_ordered()
3079 trans->block_rsv = &inode->block_rsv; in btrfs_finish_one_ordered()
3081 if (ret) /* -ENOMEM or corruption */ in btrfs_finish_one_ordered()
3099 trans->block_rsv = &inode->block_rsv; in btrfs_finish_one_ordered()
3101 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3102 compress_type = ordered_extent->compress_type; in btrfs_finish_one_ordered()
3103 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3106 ordered_extent->file_offset, in btrfs_finish_one_ordered()
3107 ordered_extent->file_offset + in btrfs_finish_one_ordered()
3109 btrfs_zoned_release_data_reloc_bg(fs_info, ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3110 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3112 BUG_ON(root == fs_info->tree_root); in btrfs_finish_one_ordered()
3117 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3118 ordered_extent->disk_num_bytes); in btrfs_finish_one_ordered()
3121 unpin_extent_cache(&inode->extent_tree, ordered_extent->file_offset, in btrfs_finish_one_ordered()
3122 ordered_extent->num_bytes, trans->transid); in btrfs_finish_one_ordered()
3128 ret = add_pending_csums(trans, &ordered_extent->list); in btrfs_finish_one_ordered()
3140 !test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) in btrfs_finish_one_ordered()
3141 clear_extent_bit(&inode->io_tree, start, end, in btrfs_finish_one_ordered()
3147 if (ret) { /* -ENOMEM or corruption */ in btrfs_finish_one_ordered()
3153 clear_extent_bit(&inode->io_tree, start, end, clear_bits, in btrfs_finish_one_ordered()
3171 &ordered_extent->flags)) in btrfs_finish_one_ordered()
3172 mapping_set_error(ordered_extent->inode->i_mapping, -EIO); in btrfs_finish_one_ordered()
3185 * writepage where we do ASSERT(em->block_start != in btrfs_finish_one_ordered()
3208 !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && in btrfs_finish_one_ordered()
3209 !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { in btrfs_finish_one_ordered()
3216 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3217 ordered_extent->disk_num_bytes, in btrfs_finish_one_ordered()
3220 ordered_extent->disk_bytenr, in btrfs_finish_one_ordered()
3221 ordered_extent->disk_num_bytes, 1); in btrfs_finish_one_ordered()
3226 btrfs_qgroup_free_refroot(fs_info, inode->root->root_key.objectid, in btrfs_finish_one_ordered()
3227 ordered_extent->qgroup_rsv, in btrfs_finish_one_ordered()
3248 if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) && in btrfs_finish_ordered_io()
3249 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) in btrfs_finish_ordered_io()
3255 * Verify the checksum for a single sector without any extra action that depend
3261 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); in btrfs_check_sector_csum()
3264 ASSERT(pgoff + fs_info->sectorsize <= PAGE_SIZE); in btrfs_check_sector_csum()
3266 shash->tfm = fs_info->csum_shash; in btrfs_check_sector_csum()
3269 crypto_shash_digest(shash, kaddr, fs_info->sectorsize, csum); in btrfs_check_sector_csum()
3272 if (memcmp(csum, csum_expected, fs_info->csum_size)) in btrfs_check_sector_csum()
3273 return -EIO; in btrfs_check_sector_csum()
3293 struct btrfs_inode *inode = bbio->inode; in btrfs_data_csum_ok()
3294 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_data_csum_ok()
3295 u64 file_offset = bbio->file_offset + bio_offset; in btrfs_data_csum_ok()
3296 u64 end = file_offset + bv->bv_len - 1; in btrfs_data_csum_ok()
3300 ASSERT(bv->bv_len == fs_info->sectorsize); in btrfs_data_csum_ok()
3302 if (!bbio->csum) in btrfs_data_csum_ok()
3305 if (btrfs_is_data_reloc_root(inode->root) && in btrfs_data_csum_ok()
3306 test_range_bit(&inode->io_tree, file_offset, end, EXTENT_NODATASUM, in btrfs_data_csum_ok()
3309 clear_extent_bits(&inode->io_tree, file_offset, end, in btrfs_data_csum_ok()
3314 csum_expected = bbio->csum + (bio_offset >> fs_info->sectorsize_bits) * in btrfs_data_csum_ok()
3315 fs_info->csum_size; in btrfs_data_csum_ok()
3316 if (btrfs_check_sector_csum(fs_info, bv->bv_page, bv->bv_offset, csum, in btrfs_data_csum_ok()
3323 bbio->mirror_num); in btrfs_data_csum_ok()
3331 * btrfs_add_delayed_iput - perform a delayed iput on @inode
3342 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_add_delayed_iput()
3345 if (atomic_add_unless(&inode->vfs_inode.i_count, -1, 1)) in btrfs_add_delayed_iput()
3348 atomic_inc(&fs_info->nr_delayed_iputs); in btrfs_add_delayed_iput()
3351 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq in btrfs_add_delayed_iput()
3354 spin_lock_irqsave(&fs_info->delayed_iput_lock, flags); in btrfs_add_delayed_iput()
3355 ASSERT(list_empty(&inode->delayed_iput)); in btrfs_add_delayed_iput()
3356 list_add_tail(&inode->delayed_iput, &fs_info->delayed_iputs); in btrfs_add_delayed_iput()
3357 spin_unlock_irqrestore(&fs_info->delayed_iput_lock, flags); in btrfs_add_delayed_iput()
3358 if (!test_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags)) in btrfs_add_delayed_iput()
3359 wake_up_process(fs_info->cleaner_kthread); in btrfs_add_delayed_iput()
3365 list_del_init(&inode->delayed_iput); in run_delayed_iput_locked()
3366 spin_unlock_irq(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
3367 iput(&inode->vfs_inode); in run_delayed_iput_locked()
3368 if (atomic_dec_and_test(&fs_info->nr_delayed_iputs)) in run_delayed_iput_locked()
3369 wake_up(&fs_info->delayed_iputs_wait); in run_delayed_iput_locked()
3370 spin_lock_irq(&fs_info->delayed_iput_lock); in run_delayed_iput_locked()
3376 if (!list_empty(&inode->delayed_iput)) { in btrfs_run_delayed_iput()
3377 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
3378 if (!list_empty(&inode->delayed_iput)) in btrfs_run_delayed_iput()
3380 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iput()
3389 * fs_info->delayed_iput_lock. So we need to disable irqs here to in btrfs_run_delayed_iputs()
3392 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3393 while (!list_empty(&fs_info->delayed_iputs)) { in btrfs_run_delayed_iputs()
3396 inode = list_first_entry(&fs_info->delayed_iputs, in btrfs_run_delayed_iputs()
3400 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3402 spin_lock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3405 spin_unlock_irq(&fs_info->delayed_iput_lock); in btrfs_run_delayed_iputs()
3422 int ret = wait_event_killable(fs_info->delayed_iputs_wait, in btrfs_wait_on_delayed_iputs()
3423 atomic_read(&fs_info->nr_delayed_iputs) == 0); in btrfs_wait_on_delayed_iputs()
3425 return -EINTR; in btrfs_wait_on_delayed_iputs()
3438 ret = btrfs_insert_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_add()
3439 if (ret && ret != -EEXIST) { in btrfs_orphan_add()
3454 return btrfs_del_orphan_item(trans, inode->root, btrfs_ino(inode)); in btrfs_orphan_del()
3463 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_orphan_cleanup()
3472 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &root->state)) in btrfs_orphan_cleanup()
3477 ret = -ENOMEM; in btrfs_orphan_cleanup()
3480 path->reada = READA_BACK; in btrfs_orphan_cleanup()
3484 key.offset = (u64)-1; in btrfs_orphan_cleanup()
3498 if (path->slots[0] == 0) in btrfs_orphan_cleanup()
3500 path->slots[0]--; in btrfs_orphan_cleanup()
3504 leaf = path->nodes[0]; in btrfs_orphan_cleanup()
3505 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_orphan_cleanup()
3527 * due to -ENOSPC for example, so try to grab the error in btrfs_orphan_cleanup()
3532 ret = BTRFS_FS_ERROR(fs_info) ?: -EINVAL; in btrfs_orphan_cleanup()
3541 inode = btrfs_iget(fs_info->sb, last_objectid, root); in btrfs_orphan_cleanup()
3545 if (ret != -ENOENT) in btrfs_orphan_cleanup()
3549 if (!inode && root == fs_info->tree_root) { in btrfs_orphan_cleanup()
3564 * fs_info->fs_roots_radix. So here we can find if an in btrfs_orphan_cleanup()
3569 spin_lock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3570 dead_root = radix_tree_lookup(&fs_info->fs_roots_radix, in btrfs_orphan_cleanup()
3572 if (dead_root && btrfs_root_refs(&dead_root->root_item) == 0) in btrfs_orphan_cleanup()
3574 spin_unlock(&fs_info->fs_roots_radix_lock); in btrfs_orphan_cleanup()
3578 key.offset = found_key.objectid - 1; in btrfs_orphan_cleanup()
3601 * only if this filesystem was last used on a pre-v3.12 kernel in btrfs_orphan_cleanup()
3610 if (!inode || inode->i_nlink) { in btrfs_orphan_cleanup()
3641 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state)) { in btrfs_orphan_cleanup()
3681 *first_xattr_slot = -1; in acls_after_inode_item()
3691 if (*first_xattr_slot == -1) in acls_after_inode_item()
3721 if (*first_xattr_slot == -1) in acls_after_inode_item()
3727 * read an inode from the btree into the in-memory inode
3732 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_read_locked_inode()
3736 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_read_locked_inode()
3752 return -ENOMEM; in btrfs_read_locked_inode()
3755 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); in btrfs_read_locked_inode()
3764 leaf = path->nodes[0]; in btrfs_read_locked_inode()
3769 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_read_locked_inode()
3771 inode->i_mode = btrfs_inode_mode(leaf, inode_item); in btrfs_read_locked_inode()
3777 round_up(i_size_read(inode), fs_info->sectorsize)); in btrfs_read_locked_inode()
3779 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3780 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); in btrfs_read_locked_inode()
3782 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3783 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); in btrfs_read_locked_inode()
3785 inode_set_ctime(inode, btrfs_timespec_sec(leaf, &inode_item->ctime), in btrfs_read_locked_inode()
3786 btrfs_timespec_nsec(leaf, &inode_item->ctime)); in btrfs_read_locked_inode()
3788 BTRFS_I(inode)->i_otime.tv_sec = in btrfs_read_locked_inode()
3789 btrfs_timespec_sec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3790 BTRFS_I(inode)->i_otime.tv_nsec = in btrfs_read_locked_inode()
3791 btrfs_timespec_nsec(leaf, &inode_item->otime); in btrfs_read_locked_inode()
3794 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); in btrfs_read_locked_inode()
3795 BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); in btrfs_read_locked_inode()
3799 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_read_locked_inode()
3800 inode->i_rdev = 0; in btrfs_read_locked_inode()
3803 BTRFS_I(inode)->index_cnt = (u64)-1; in btrfs_read_locked_inode()
3805 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags); in btrfs_read_locked_inode()
3810 * and then re-read we need to do a full sync since we don't have any in btrfs_read_locked_inode()
3814 * This is required for both inode re-read from disk and delayed inode in btrfs_read_locked_inode()
3817 if (BTRFS_I(inode)->last_trans == fs_info->generation) in btrfs_read_locked_inode()
3819 &BTRFS_I(inode)->runtime_flags); in btrfs_read_locked_inode()
3834 * xfs_io -c fsync mydir/foo in btrfs_read_locked_inode()
3848 BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3856 BTRFS_I(inode)->last_reflink_trans = BTRFS_I(inode)->last_trans; in btrfs_read_locked_inode()
3858 path->slots[0]++; in btrfs_read_locked_inode()
3859 if (inode->i_nlink != 1 || in btrfs_read_locked_inode()
3860 path->slots[0] >= btrfs_header_nritems(leaf)) in btrfs_read_locked_inode()
3863 btrfs_item_key_to_cpu(leaf, &location, path->slots[0]); in btrfs_read_locked_inode()
3867 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); in btrfs_read_locked_inode()
3872 BTRFS_I(inode)->dir_index = btrfs_inode_ref_index(leaf, ref); in btrfs_read_locked_inode()
3877 BTRFS_I(inode)->dir_index = btrfs_inode_extref_index(leaf, in btrfs_read_locked_inode()
3885 maybe_acls = acls_after_inode_item(leaf, path->slots[0], in btrfs_read_locked_inode()
3887 if (first_xattr_slot != -1) { in btrfs_read_locked_inode()
3888 path->slots[0] = first_xattr_slot; in btrfs_read_locked_inode()
3894 root->root_key.objectid, ret); in btrfs_read_locked_inode()
3902 switch (inode->i_mode & S_IFMT) { in btrfs_read_locked_inode()
3904 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3905 inode->i_fop = &btrfs_file_operations; in btrfs_read_locked_inode()
3906 inode->i_op = &btrfs_file_inode_operations; in btrfs_read_locked_inode()
3909 inode->i_fop = &btrfs_dir_file_operations; in btrfs_read_locked_inode()
3910 inode->i_op = &btrfs_dir_inode_operations; in btrfs_read_locked_inode()
3913 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_read_locked_inode()
3915 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_read_locked_inode()
3918 inode->i_op = &btrfs_special_inode_operations; in btrfs_read_locked_inode()
3919 init_special_inode(inode, inode->i_mode, rdev); in btrfs_read_locked_inode()
3942 btrfs_set_token_inode_size(&token, item, BTRFS_I(inode)->disk_i_size); in fill_inode_item()
3943 btrfs_set_token_inode_mode(&token, item, inode->i_mode); in fill_inode_item()
3944 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); in fill_inode_item()
3946 btrfs_set_token_timespec_sec(&token, &item->atime, in fill_inode_item()
3947 inode->i_atime.tv_sec); in fill_inode_item()
3948 btrfs_set_token_timespec_nsec(&token, &item->atime, in fill_inode_item()
3949 inode->i_atime.tv_nsec); in fill_inode_item()
3951 btrfs_set_token_timespec_sec(&token, &item->mtime, in fill_inode_item()
3952 inode->i_mtime.tv_sec); in fill_inode_item()
3953 btrfs_set_token_timespec_nsec(&token, &item->mtime, in fill_inode_item()
3954 inode->i_mtime.tv_nsec); in fill_inode_item()
3956 btrfs_set_token_timespec_sec(&token, &item->ctime, in fill_inode_item()
3958 btrfs_set_token_timespec_nsec(&token, &item->ctime, in fill_inode_item()
3961 btrfs_set_token_timespec_sec(&token, &item->otime, in fill_inode_item()
3962 BTRFS_I(inode)->i_otime.tv_sec); in fill_inode_item()
3963 btrfs_set_token_timespec_nsec(&token, &item->otime, in fill_inode_item()
3964 BTRFS_I(inode)->i_otime.tv_nsec); in fill_inode_item()
3968 BTRFS_I(inode)->generation); in fill_inode_item()
3970 btrfs_set_token_inode_transid(&token, item, trans->transid); in fill_inode_item()
3971 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); in fill_inode_item()
3972 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags, in fill_inode_item()
3973 BTRFS_I(inode)->ro_flags); in fill_inode_item()
3979 * copy everything in the in-memory inode into the btree.
3992 return -ENOMEM; in btrfs_update_inode_item()
3994 ret = btrfs_lookup_inode(trans, root, path, &inode->location, 1); in btrfs_update_inode_item()
3997 ret = -ENOENT; in btrfs_update_inode_item()
4001 leaf = path->nodes[0]; in btrfs_update_inode_item()
4002 inode_item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_update_inode_item()
4005 fill_inode_item(trans, leaf, inode_item, &inode->vfs_inode); in btrfs_update_inode_item()
4015 * copy everything in the in-memory inode into the btree.
4021 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_update_inode()
4029 * without delay in btrfs_update_inode()
4033 && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { in btrfs_update_inode()
4050 ret = btrfs_update_inode(trans, inode->root, inode); in btrfs_update_inode_fallback()
4051 if (ret == -ENOSPC) in btrfs_update_inode_fallback()
4052 return btrfs_update_inode_item(trans, inode->root, inode); in btrfs_update_inode_fallback()
4067 struct btrfs_root *root = dir->root; in __btrfs_unlink_inode()
4068 struct btrfs_fs_info *fs_info = root->fs_info; in __btrfs_unlink_inode()
4078 ret = -ENOMEM; in __btrfs_unlink_inode()
4082 di = btrfs_lookup_dir_item(trans, root, path, dir_ino, name, -1); in __btrfs_unlink_inode()
4084 ret = di ? PTR_ERR(di) : -ENOENT; in __btrfs_unlink_inode()
4099 * that we delay to delete it, and just do this deletion when in __btrfs_unlink_inode()
4102 if (inode->dir_index) { in __btrfs_unlink_inode()
4105 index = inode->dir_index; in __btrfs_unlink_inode()
4114 name->len, name->name, ino, dir_ino); in __btrfs_unlink_inode()
4120 rename_ctx->index = index; in __btrfs_unlink_inode()
4131 * Besides that, doing it here would only cause extra unnecessary btree in __btrfs_unlink_inode()
4141 * being run in btrfs-cleaner context. If we have enough of these built in __btrfs_unlink_inode()
4142 * up we can end up burning a lot of time in btrfs-cleaner without any in __btrfs_unlink_inode()
4154 btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2); in __btrfs_unlink_inode()
4155 inode_inc_iversion(&inode->vfs_inode); in __btrfs_unlink_inode()
4156 inode_set_ctime_current(&inode->vfs_inode); in __btrfs_unlink_inode()
4157 inode_inc_iversion(&dir->vfs_inode); in __btrfs_unlink_inode()
4158 inode_set_ctime_current(&inode->vfs_inode); in __btrfs_unlink_inode()
4159 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); in __btrfs_unlink_inode()
4173 drop_nlink(&inode->vfs_inode); in btrfs_unlink_inode()
4174 ret = btrfs_update_inode(trans, inode->root, inode); in btrfs_unlink_inode()
4189 struct btrfs_root *root = dir->root; in __unlink_start_trans()
4202 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); in btrfs_unlink()
4206 /* This needs to handle no-key deletions later on */ in btrfs_unlink()
4222 if (inode->i_nlink == 0) { in btrfs_unlink()
4230 btrfs_btree_balance_dirty(BTRFS_I(dir)->root->fs_info); in btrfs_unlink()
4239 struct btrfs_root *root = dir->root; in btrfs_unlink_subvol()
4251 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); in btrfs_unlink_subvol()
4255 /* This needs to handle no-key deletions later on */ in btrfs_unlink_subvol()
4258 objectid = inode->root->root_key.objectid; in btrfs_unlink_subvol()
4260 objectid = inode->location.objectid; in btrfs_unlink_subvol()
4264 return -EINVAL; in btrfs_unlink_subvol()
4269 ret = -ENOMEM; in btrfs_unlink_subvol()
4274 &fname.disk_name, -1); in btrfs_unlink_subvol()
4276 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_unlink_subvol()
4280 leaf = path->nodes[0]; in btrfs_unlink_subvol()
4294 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect. in btrfs_unlink_subvol()
4307 leaf = path->nodes[0]; in btrfs_unlink_subvol()
4308 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in btrfs_unlink_subvol()
4313 root->root_key.objectid, dir_ino, in btrfs_unlink_subvol()
4327 btrfs_i_size_write(dir, dir->vfs_inode.i_size - fname.disk_name.len * 2); in btrfs_unlink_subvol()
4328 inode_inc_iversion(&dir->vfs_inode); in btrfs_unlink_subvol()
4329 dir->vfs_inode.i_mtime = inode_set_ctime_current(&dir->vfs_inode); in btrfs_unlink_subvol()
4345 struct btrfs_fs_info *fs_info = root->fs_info; in may_destroy_subvol()
4355 return -ENOMEM; in may_destroy_subvol()
4358 dir_id = btrfs_super_root_dir(fs_info->super_copy); in may_destroy_subvol()
4359 di = btrfs_lookup_dir_item(NULL, fs_info->tree_root, path, in may_destroy_subvol()
4362 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); in may_destroy_subvol()
4363 if (key.objectid == root->root_key.objectid) { in may_destroy_subvol()
4364 ret = -EPERM; in may_destroy_subvol()
4373 key.objectid = root->root_key.objectid; in may_destroy_subvol()
4375 key.offset = (u64)-1; in may_destroy_subvol()
4377 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in may_destroy_subvol()
4382 * Key with offset -1 found, there would have to exist a root in may_destroy_subvol()
4385 ret = -EUCLEAN; in may_destroy_subvol()
4390 if (path->slots[0] > 0) { in may_destroy_subvol()
4391 path->slots[0]--; in may_destroy_subvol()
4392 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); in may_destroy_subvol()
4393 if (key.objectid == root->root_key.objectid && in may_destroy_subvol()
4395 ret = -ENOTEMPTY; in may_destroy_subvol()
4405 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_prune_dentries()
4413 WARN_ON(btrfs_root_refs(&root->root_item) != 0); in btrfs_prune_dentries()
4415 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
4417 node = root->inode_tree.rb_node; in btrfs_prune_dentries()
4424 node = node->rb_left; in btrfs_prune_dentries()
4426 node = node->rb_right; in btrfs_prune_dentries()
4443 inode = igrab(&entry->vfs_inode); in btrfs_prune_dentries()
4445 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
4446 if (atomic_read(&inode->i_count) > 1) in btrfs_prune_dentries()
4454 spin_lock(&root->inode_lock); in btrfs_prune_dentries()
4458 if (cond_resched_lock(&root->inode_lock)) in btrfs_prune_dentries()
4463 spin_unlock(&root->inode_lock); in btrfs_prune_dentries()
4468 struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb); in btrfs_delete_subvolume()
4469 struct btrfs_root *root = dir->root; in btrfs_delete_subvolume()
4471 struct btrfs_root *dest = BTRFS_I(inode)->root; in btrfs_delete_subvolume()
4478 down_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4485 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4486 if (dest->send_in_progress) { in btrfs_delete_subvolume()
4487 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4490 dest->root_key.objectid); in btrfs_delete_subvolume()
4491 ret = -EPERM; in btrfs_delete_subvolume()
4494 if (atomic_read(&dest->nr_swapfiles)) { in btrfs_delete_subvolume()
4495 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4498 root->root_key.objectid); in btrfs_delete_subvolume()
4499 ret = -EPERM; in btrfs_delete_subvolume()
4502 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4503 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4505 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4534 trans->block_rsv = &block_rsv; in btrfs_delete_subvolume()
4535 trans->bytes_reserved = block_rsv.size; in btrfs_delete_subvolume()
4551 memset(&dest->root_item.drop_progress, 0, in btrfs_delete_subvolume()
4552 sizeof(dest->root_item.drop_progress)); in btrfs_delete_subvolume()
4553 btrfs_set_root_drop_level(&dest->root_item, 0); in btrfs_delete_subvolume()
4554 btrfs_set_root_refs(&dest->root_item, 0); in btrfs_delete_subvolume()
4556 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &dest->state)) { in btrfs_delete_subvolume()
4558 fs_info->tree_root, in btrfs_delete_subvolume()
4559 dest->root_key.objectid); in btrfs_delete_subvolume()
4566 ret = btrfs_uuid_tree_remove(trans, dest->root_item.uuid, in btrfs_delete_subvolume()
4568 dest->root_key.objectid); in btrfs_delete_subvolume()
4569 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4573 if (!btrfs_is_empty_uuid(dest->root_item.received_uuid)) { in btrfs_delete_subvolume()
4575 dest->root_item.received_uuid, in btrfs_delete_subvolume()
4577 dest->root_key.objectid); in btrfs_delete_subvolume()
4578 if (ret && ret != -ENOENT) { in btrfs_delete_subvolume()
4584 free_anon_bdev(dest->anon_dev); in btrfs_delete_subvolume()
4585 dest->anon_dev = 0; in btrfs_delete_subvolume()
4587 trans->block_rsv = NULL; in btrfs_delete_subvolume()
4588 trans->bytes_reserved = 0; in btrfs_delete_subvolume()
4590 inode->i_flags |= S_DEAD; in btrfs_delete_subvolume()
4592 btrfs_block_rsv_release(fs_info, &block_rsv, (u64)-1, NULL); in btrfs_delete_subvolume()
4597 spin_lock(&dest->root_item_lock); in btrfs_delete_subvolume()
4598 root_flags = btrfs_root_flags(&dest->root_item); in btrfs_delete_subvolume()
4599 btrfs_set_root_flags(&dest->root_item, in btrfs_delete_subvolume()
4601 spin_unlock(&dest->root_item_lock); in btrfs_delete_subvolume()
4604 up_write(&fs_info->subvol_sem); in btrfs_delete_subvolume()
4608 ASSERT(dest->send_in_progress == 0); in btrfs_delete_subvolume()
4617 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_rmdir()
4622 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rmdir()
4623 return -ENOTEMPTY; in btrfs_rmdir()
4628 return -EOPNOTSUPP; in btrfs_rmdir()
4633 ret = fscrypt_setup_filename(dir, &dentry->d_name, 1, &fname); in btrfs_rmdir()
4637 /* This needs to handle no-key deletions later on */ in btrfs_rmdir()
4659 if (BTRFS_I(inode)->last_unlink_trans >= trans->transid) in btrfs_rmdir()
4686 * btrfs_truncate_block - read, zero a chunk and write a block
4687 * @inode - inode that we're zeroing
4688 * @from - the offset to start zeroing
4689 * @len - the length to zero, 0 to zero the entire range respective to the
4691 * @front - zero up to the offset instead of from the offset on
4699 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_truncate_block()
4700 struct address_space *mapping = inode->vfs_inode.i_mapping; in btrfs_truncate_block()
4701 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_truncate_block()
4706 u32 blocksize = fs_info->sectorsize; in btrfs_truncate_block()
4708 unsigned offset = from & (blocksize - 1); in btrfs_truncate_block()
4721 block_end = block_start + blocksize - 1; in btrfs_truncate_block()
4746 ret = -ENOMEM; in btrfs_truncate_block()
4753 if (page->mapping != mapping) { in btrfs_truncate_block()
4759 ret = -EIO; in btrfs_truncate_block()
4765 * We unlock the page after the io is completed and then re-lock it in btrfs_truncate_block()
4788 clear_extent_bit(&inode->io_tree, block_start, block_end, in btrfs_truncate_block()
4801 len = blocksize - offset; in btrfs_truncate_block()
4803 memzero_page(page, (block_start - page_offset(page)), in btrfs_truncate_block()
4806 memzero_page(page, (block_start - page_offset(page)) + offset, in btrfs_truncate_block()
4810 block_end + 1 - block_start); in btrfs_truncate_block()
4811 btrfs_page_set_dirty(fs_info, page, block_start, block_end + 1 - block_start); in btrfs_truncate_block()
4815 set_extent_bit(&inode->io_tree, block_start, block_end, in btrfs_truncate_block()
4839 struct btrfs_fs_info *fs_info = root->fs_info; in maybe_insert_hole()
4854 * 1 - for the one we're dropping in maybe_insert_hole()
4855 * 1 - for the one we're adding in maybe_insert_hole()
4856 * 1 - for updating the inode. in maybe_insert_hole()
4892 struct btrfs_root *root = inode->root; in btrfs_cont_expand()
4893 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_cont_expand()
4894 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_cont_expand()
4897 u64 hole_start = ALIGN(oldsize, fs_info->sectorsize); in btrfs_cont_expand()
4898 u64 block_end = ALIGN(size, fs_info->sectorsize); in btrfs_cont_expand()
4916 btrfs_lock_and_flush_ordered_range(inode, hole_start, block_end - 1, in btrfs_cont_expand()
4921 block_end - cur_offset); in btrfs_cont_expand()
4928 last_byte = ALIGN(last_byte, fs_info->sectorsize); in btrfs_cont_expand()
4929 hole_size = last_byte - cur_offset; in btrfs_cont_expand()
4931 if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_cont_expand()
4947 cur_offset + hole_size - 1, in btrfs_cont_expand()
4952 hole_em->start = cur_offset; in btrfs_cont_expand()
4953 hole_em->len = hole_size; in btrfs_cont_expand()
4954 hole_em->orig_start = cur_offset; in btrfs_cont_expand()
4956 hole_em->block_start = EXTENT_MAP_HOLE; in btrfs_cont_expand()
4957 hole_em->block_len = 0; in btrfs_cont_expand()
4958 hole_em->orig_block_len = 0; in btrfs_cont_expand()
4959 hole_em->ram_bytes = hole_size; in btrfs_cont_expand()
4960 hole_em->compress_type = BTRFS_COMPRESS_NONE; in btrfs_cont_expand()
4961 hole_em->generation = fs_info->generation; in btrfs_cont_expand()
4979 unlock_extent(io_tree, hole_start, block_end - 1, &cached_state); in btrfs_cont_expand()
4985 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setsize()
4988 loff_t newsize = attr->ia_size; in btrfs_setsize()
4989 int mask = attr->ia_valid; in btrfs_setsize()
5001 inode->i_mtime = inode_set_ctime_current(inode); in btrfs_setsize()
5009 * state of this file - if the snapshot captures this expanding in btrfs_setsize()
5013 btrfs_drew_write_lock(&root->snapshot_lock); in btrfs_setsize()
5016 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
5022 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
5030 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_setsize()
5033 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_setsize()
5037 ALIGN(newsize, fs_info->sectorsize), in btrfs_setsize()
5038 (u64)-1); in btrfs_setsize()
5050 &BTRFS_I(inode)->runtime_flags); in btrfs_setsize()
5057 if (ret && inode->i_nlink) { in btrfs_setsize()
5061 * Truncate failed, so fix up the in-memory size. We in btrfs_setsize()
5064 * in-memory size to match. in btrfs_setsize()
5066 err = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_setsize()
5069 i_size_write(inode, BTRFS_I(inode)->disk_i_size); in btrfs_setsize()
5080 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_setattr()
5084 return -EROFS; in btrfs_setattr()
5090 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) { in btrfs_setattr()
5096 if (attr->ia_valid) { in btrfs_setattr()
5101 if (!err && attr->ia_valid & ATTR_MODE) in btrfs_setattr()
5102 err = posix_acl_chmod(idmap, dentry, inode->i_mode); in btrfs_setattr()
5123 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in evict_inode_truncate_pages()
5126 ASSERT(inode->i_state & I_FREEING); in evict_inode_truncate_pages()
5127 truncate_inode_pages_final(&inode->i_data); in evict_inode_truncate_pages()
5129 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); in evict_inode_truncate_pages()
5139 * queue kthread), inode references (inode->i_count) were not taken in evict_inode_truncate_pages()
5143 * reference count - if we don't do it, when they access the inode's in evict_inode_truncate_pages()
5145 * use-after-free issue. in evict_inode_truncate_pages()
5147 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5148 while (!RB_EMPTY_ROOT(&io_tree->state)) { in evict_inode_truncate_pages()
5155 node = rb_first(&io_tree->state); in evict_inode_truncate_pages()
5157 start = state->start; in evict_inode_truncate_pages()
5158 end = state->end; in evict_inode_truncate_pages()
5159 state_flags = state->state; in evict_inode_truncate_pages()
5160 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5174 end - start + 1, NULL); in evict_inode_truncate_pages()
5181 spin_lock(&io_tree->lock); in evict_inode_truncate_pages()
5183 spin_unlock(&io_tree->lock); in evict_inode_truncate_pages()
5189 struct btrfs_fs_info *fs_info = root->fs_info; in evict_refill_and_join()
5201 * above. We reserve our extra bit here because we generate a ton of in evict_refill_and_join()
5205 * if we fail to make this reservation we can re-try without the in evict_refill_and_join()
5208 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size + delayed_refs_extra, in evict_refill_and_join()
5211 ret = btrfs_block_rsv_refill(fs_info, rsv, rsv->size, in evict_refill_and_join()
5216 return ERR_PTR(-ENOSPC); in evict_refill_and_join()
5226 trans->block_rsv = &fs_info->trans_block_rsv; in evict_refill_and_join()
5227 trans->bytes_reserved = delayed_refs_extra; in evict_refill_and_join()
5228 btrfs_block_rsv_migrate(rsv, trans->block_rsv, in evict_refill_and_join()
5236 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_evict_inode()
5238 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_evict_inode()
5252 if (inode->i_nlink && in btrfs_evict_inode()
5253 ((btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5254 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || in btrfs_evict_inode()
5261 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) in btrfs_evict_inode()
5264 if (inode->i_nlink > 0) { in btrfs_evict_inode()
5265 BUG_ON(btrfs_root_refs(&root->root_item) != 0 && in btrfs_evict_inode()
5266 root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID); in btrfs_evict_inode()
5289 rsv->size = btrfs_calc_metadata_size(fs_info, 1); in btrfs_evict_inode()
5290 rsv->failfast = true; in btrfs_evict_inode()
5306 trans->block_rsv = rsv; in btrfs_evict_inode()
5309 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5317 if (ret && ret != -ENOSPC && ret != -EAGAIN) in btrfs_evict_inode()
5334 trans->block_rsv = rsv; in btrfs_evict_inode()
5336 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_evict_inode()
5356 * If no dir entries were found, returns -ENOENT.
5357 * If found a corrupted location in dir entry, returns -EUCLEAN.
5364 struct btrfs_root *root = dir->root; in btrfs_inode_by_name()
5370 return -ENOMEM; in btrfs_inode_by_name()
5372 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 1, &fname); in btrfs_inode_by_name()
5381 /* This needs to handle no-key deletions later on */ in btrfs_inode_by_name()
5386 ret = di ? PTR_ERR(di) : -ENOENT; in btrfs_inode_by_name()
5390 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); in btrfs_inode_by_name()
5391 if (location->type != BTRFS_INODE_ITEM_KEY && in btrfs_inode_by_name()
5392 location->type != BTRFS_ROOT_ITEM_KEY) { in btrfs_inode_by_name()
5393 ret = -EUCLEAN; in btrfs_inode_by_name()
5394 btrfs_warn(root->fs_info, in btrfs_inode_by_name()
5397 location->objectid, location->type, location->offset); in btrfs_inode_by_name()
5400 *type = btrfs_dir_ftype(path->nodes[0], di); in btrfs_inode_by_name()
5427 ret = fscrypt_setup_filename(&dir->vfs_inode, &dentry->d_name, 0, &fname); in fixup_tree_root_location()
5433 err = -ENOMEM; in fixup_tree_root_location()
5437 err = -ENOENT; in fixup_tree_root_location()
5438 key.objectid = dir->root->root_key.objectid; in fixup_tree_root_location()
5440 key.offset = location->objectid; in fixup_tree_root_location()
5442 ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); in fixup_tree_root_location()
5449 leaf = path->nodes[0]; in fixup_tree_root_location()
5450 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); in fixup_tree_root_location()
5462 new_root = btrfs_get_fs_root(fs_info, location->objectid, true); in fixup_tree_root_location()
5469 location->objectid = btrfs_root_dirid(&new_root->root_item); in fixup_tree_root_location()
5470 location->type = BTRFS_INODE_ITEM_KEY; in fixup_tree_root_location()
5471 location->offset = 0; in fixup_tree_root_location()
5481 struct btrfs_root *root = inode->root; in inode_tree_add()
5485 struct rb_node *new = &inode->rb_node; in inode_tree_add()
5488 if (inode_unhashed(&inode->vfs_inode)) in inode_tree_add()
5491 spin_lock(&root->inode_lock); in inode_tree_add()
5492 p = &root->inode_tree.rb_node; in inode_tree_add()
5498 p = &parent->rb_left; in inode_tree_add()
5500 p = &parent->rb_right; in inode_tree_add()
5502 WARN_ON(!(entry->vfs_inode.i_state & in inode_tree_add()
5504 rb_replace_node(parent, new, &root->inode_tree); in inode_tree_add()
5506 spin_unlock(&root->inode_lock); in inode_tree_add()
5511 rb_insert_color(new, &root->inode_tree); in inode_tree_add()
5512 spin_unlock(&root->inode_lock); in inode_tree_add()
5517 struct btrfs_root *root = inode->root; in inode_tree_del()
5520 spin_lock(&root->inode_lock); in inode_tree_del()
5521 if (!RB_EMPTY_NODE(&inode->rb_node)) { in inode_tree_del()
5522 rb_erase(&inode->rb_node, &root->inode_tree); in inode_tree_del()
5523 RB_CLEAR_NODE(&inode->rb_node); in inode_tree_del()
5524 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5526 spin_unlock(&root->inode_lock); in inode_tree_del()
5528 if (empty && btrfs_root_refs(&root->root_item) == 0) { in inode_tree_del()
5529 spin_lock(&root->inode_lock); in inode_tree_del()
5530 empty = RB_EMPTY_ROOT(&root->inode_tree); in inode_tree_del()
5531 spin_unlock(&root->inode_lock); in inode_tree_del()
5542 inode->i_ino = args->ino; in btrfs_init_locked_inode()
5543 BTRFS_I(inode)->location.objectid = args->ino; in btrfs_init_locked_inode()
5544 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY; in btrfs_init_locked_inode()
5545 BTRFS_I(inode)->location.offset = 0; in btrfs_init_locked_inode()
5546 BTRFS_I(inode)->root = btrfs_grab_root(args->root); in btrfs_init_locked_inode()
5547 BUG_ON(args->root && !BTRFS_I(inode)->root); in btrfs_init_locked_inode()
5549 if (args->root && args->root == args->root->fs_info->tree_root && in btrfs_init_locked_inode()
5550 args->ino != BTRFS_BTREE_INODE_OBJECTID) in btrfs_init_locked_inode()
5552 &BTRFS_I(inode)->runtime_flags); in btrfs_init_locked_inode()
5560 return args->ino == BTRFS_I(inode)->location.objectid && in btrfs_find_actor()
5561 args->root == BTRFS_I(inode)->root; in btrfs_find_actor()
5593 return ERR_PTR(-ENOMEM); in btrfs_iget_path()
5595 if (inode->i_state & I_NEW) { in btrfs_iget_path()
5610 ret = -ENOENT; in btrfs_iget_path()
5627 struct inode *inode = new_inode(dir->i_sb); in new_simple_dir()
5630 return ERR_PTR(-ENOMEM); in new_simple_dir()
5632 BTRFS_I(inode)->root = btrfs_grab_root(root); in new_simple_dir()
5633 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); in new_simple_dir()
5634 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); in new_simple_dir()
5636 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; in new_simple_dir()
5638 * We only need lookup, the rest is read-only and there's no inode in new_simple_dir()
5641 inode->i_op = &simple_dir_inode_operations; in new_simple_dir()
5642 inode->i_opflags &= ~IOP_XATTR; in new_simple_dir()
5643 inode->i_fop = &simple_dir_operations; in new_simple_dir()
5644 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; in new_simple_dir()
5645 inode->i_mtime = inode_set_ctime_current(inode); in new_simple_dir()
5646 inode->i_atime = dir->i_atime; in new_simple_dir()
5647 BTRFS_I(inode)->i_otime = inode->i_mtime; in new_simple_dir()
5648 inode->i_uid = dir->i_uid; in new_simple_dir()
5649 inode->i_gid = dir->i_gid; in new_simple_dir()
5665 return fs_umode_to_ftype(inode->i_mode); in btrfs_inode_type()
5670 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_lookup_dentry()
5672 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_lookup_dentry()
5678 if (dentry->d_name.len > BTRFS_NAME_LEN) in btrfs_lookup_dentry()
5679 return ERR_PTR(-ENAMETOOLONG); in btrfs_lookup_dentry()
5686 inode = btrfs_iget(dir->i_sb, location.objectid, root); in btrfs_lookup_dentry()
5690 /* Do extra check against inode mode with di_type */ in btrfs_lookup_dentry()
5694 inode->i_mode, btrfs_inode_type(inode), in btrfs_lookup_dentry()
5697 return ERR_PTR(-EUCLEAN); in btrfs_lookup_dentry()
5705 if (ret != -ENOENT) in btrfs_lookup_dentry()
5710 inode = btrfs_iget(dir->i_sb, location.objectid, sub_root); in btrfs_lookup_dentry()
5716 down_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5717 if (!sb_rdonly(inode->i_sb)) in btrfs_lookup_dentry()
5719 up_read(&fs_info->cleanup_work_sem); in btrfs_lookup_dentry()
5735 inode = d_inode(dentry->d_parent); in btrfs_dentry_delete()
5738 root = BTRFS_I(inode)->root; in btrfs_dentry_delete()
5739 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_dentry_delete()
5753 if (inode == ERR_PTR(-ENOENT)) in btrfs_lookup()
5760 * in-memory index_cnt variable to the first free sequence number.
5764 struct btrfs_root *root = inode->root; in btrfs_set_inode_index_count()
5772 key.offset = (u64)-1; in btrfs_set_inode_index_count()
5776 return -ENOMEM; in btrfs_set_inode_index_count()
5786 if (path->slots[0] == 0) { in btrfs_set_inode_index_count()
5787 inode->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_set_inode_index_count()
5791 path->slots[0]--; in btrfs_set_inode_index_count()
5793 leaf = path->nodes[0]; in btrfs_set_inode_index_count()
5794 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_set_inode_index_count()
5798 inode->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_set_inode_index_count()
5802 inode->index_cnt = found_key.offset + 1; in btrfs_set_inode_index_count()
5813 if (dir->index_cnt == (u64)-1) { in btrfs_get_dir_last_index()
5823 *index = dir->index_cnt - 1; in btrfs_get_dir_last_index()
5851 return -ENOMEM; in btrfs_opendir()
5852 private->last_index = last_index; in btrfs_opendir()
5853 private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); in btrfs_opendir()
5854 if (!private->filldir_buf) { in btrfs_opendir()
5856 return -ENOMEM; in btrfs_opendir()
5858 file->private_data = private; in btrfs_opendir()
5864 struct btrfs_file_private *private = file->private_data; in btrfs_dir_llseek()
5868 &private->last_index); in btrfs_dir_llseek()
5884 while (entries--) { in btrfs_filldir()
5888 ctx->pos = get_unaligned(&entry->offset); in btrfs_filldir()
5889 if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), in btrfs_filldir()
5890 get_unaligned(&entry->ino), in btrfs_filldir()
5891 get_unaligned(&entry->type))) in btrfs_filldir()
5894 get_unaligned(&entry->name_len); in btrfs_filldir()
5895 ctx->pos++; in btrfs_filldir()
5903 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_real_readdir()
5904 struct btrfs_file_private *private = file->private_data; in btrfs_real_readdir()
5925 return -ENOMEM; in btrfs_real_readdir()
5927 addr = private->filldir_buf; in btrfs_real_readdir()
5928 path->reada = READA_FORWARD; in btrfs_real_readdir()
5930 put = btrfs_readdir_get_delayed_items(inode, private->last_index, in btrfs_real_readdir()
5935 key.offset = ctx->pos; in btrfs_real_readdir()
5940 struct extent_buffer *leaf = path->nodes[0]; in btrfs_real_readdir()
5947 if (found_key.offset < ctx->pos) in btrfs_real_readdir()
5949 if (found_key.offset > private->last_index) in btrfs_real_readdir()
5953 di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item); in btrfs_real_readdir()
5958 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
5961 addr = private->filldir_buf; in btrfs_real_readdir()
5972 put_unaligned(name_len, &entry->name_len); in btrfs_real_readdir()
5973 put_unaligned(fs_ftype_to_dtype(ftype), &entry->type); in btrfs_real_readdir()
5975 put_unaligned(location.objectid, &entry->ino); in btrfs_real_readdir()
5976 put_unaligned(found_key.offset, &entry->offset); in btrfs_real_readdir()
5987 ret = btrfs_filldir(private->filldir_buf, entries, ctx); in btrfs_real_readdir()
6003 * they're returned by readdir. Until we re-use freed offsets in btrfs_real_readdir()
6012 if (ctx->pos >= INT_MAX) in btrfs_real_readdir()
6013 ctx->pos = LLONG_MAX; in btrfs_real_readdir()
6015 ctx->pos = INT_MAX; in btrfs_real_readdir()
6033 struct btrfs_root *root = inode->root; in btrfs_dirty_inode()
6034 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_dirty_inode()
6038 if (test_bit(BTRFS_INODE_DUMMY, &inode->runtime_flags)) in btrfs_dirty_inode()
6046 if (ret && (ret == -ENOSPC || ret == -EDQUOT)) { in btrfs_dirty_inode()
6056 if (inode->delayed_node) in btrfs_dirty_inode()
6068 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_update_time()
6072 return -EROFS; in btrfs_update_time()
6086 if (dir->index_cnt == (u64)-1) { in btrfs_set_inode_index()
6095 *index = dir->index_cnt; in btrfs_set_inode_index()
6096 dir->index_cnt++; in btrfs_set_inode_index()
6105 args.ino = BTRFS_I(inode)->location.objectid; in btrfs_insert_inode_locked()
6106 args.root = BTRFS_I(inode)->root; in btrfs_insert_inode_locked()
6109 btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), in btrfs_insert_inode_locked()
6116 struct inode *dir = args->dir; in btrfs_new_inode_prepare()
6117 struct inode *inode = args->inode; in btrfs_new_inode_prepare()
6120 if (!args->orphan) { in btrfs_new_inode_prepare()
6121 ret = fscrypt_setup_filename(dir, &args->dentry->d_name, 0, in btrfs_new_inode_prepare()
6122 &args->fname); in btrfs_new_inode_prepare()
6127 ret = posix_acl_create(dir, &inode->i_mode, &args->default_acl, &args->acl); in btrfs_new_inode_prepare()
6129 fscrypt_free_filename(&args->fname); in btrfs_new_inode_prepare()
6136 if (BTRFS_I(dir)->prop_compress) in btrfs_new_inode_prepare()
6139 if (args->default_acl) in btrfs_new_inode_prepare()
6142 if (args->acl) in btrfs_new_inode_prepare()
6146 if (dir->i_security) in btrfs_new_inode_prepare()
6149 if (args->orphan) { in btrfs_new_inode_prepare()
6169 posix_acl_release(args->acl); in btrfs_new_inode_args_destroy()
6170 posix_acl_release(args->default_acl); in btrfs_new_inode_args_destroy()
6171 fscrypt_free_filename(&args->fname); in btrfs_new_inode_args_destroy()
6183 flags = dir->flags; in btrfs_inherit_iflags()
6186 inode->flags &= ~BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
6187 inode->flags |= BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
6189 inode->flags &= ~BTRFS_INODE_NOCOMPRESS; in btrfs_inherit_iflags()
6190 inode->flags |= BTRFS_INODE_COMPRESS; in btrfs_inherit_iflags()
6194 inode->flags |= BTRFS_INODE_NODATACOW; in btrfs_inherit_iflags()
6195 if (S_ISREG(inode->vfs_inode.i_mode)) in btrfs_inherit_iflags()
6196 inode->flags |= BTRFS_INODE_NODATASUM; in btrfs_inherit_iflags()
6199 btrfs_sync_inode_flags_to_i_flags(&inode->vfs_inode); in btrfs_inherit_iflags()
6205 struct inode *dir = args->dir; in btrfs_create_new_inode()
6206 struct inode *inode = args->inode; in btrfs_create_new_inode()
6207 const struct fscrypt_str *name = args->orphan ? NULL : &args->fname.disk_name; in btrfs_create_new_inode()
6208 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create_new_inode()
6223 return -ENOMEM; in btrfs_create_new_inode()
6225 if (!args->subvol) in btrfs_create_new_inode()
6226 BTRFS_I(inode)->root = btrfs_grab_root(BTRFS_I(dir)->root); in btrfs_create_new_inode()
6227 root = BTRFS_I(inode)->root; in btrfs_create_new_inode()
6232 inode->i_ino = objectid; in btrfs_create_new_inode()
6234 if (args->orphan) { in btrfs_create_new_inode()
6243 ret = btrfs_set_inode_index(BTRFS_I(dir), &BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6248 BTRFS_I(inode)->index_cnt = BTRFS_DIR_START_INDEX; in btrfs_create_new_inode()
6249 BTRFS_I(inode)->generation = trans->transid; in btrfs_create_new_inode()
6250 inode->i_generation = BTRFS_I(inode)->generation; in btrfs_create_new_inode()
6257 if (!args->subvol) in btrfs_create_new_inode()
6260 if (S_ISREG(inode->i_mode)) { in btrfs_create_new_inode()
6262 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM; in btrfs_create_new_inode()
6264 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW | in btrfs_create_new_inode()
6268 location = &BTRFS_I(inode)->location; in btrfs_create_new_inode()
6269 location->objectid = objectid; in btrfs_create_new_inode()
6270 location->offset = 0; in btrfs_create_new_inode()
6271 location->type = BTRFS_INODE_ITEM_KEY; in btrfs_create_new_inode()
6275 if (!args->orphan) in btrfs_create_new_inode()
6276 BTRFS_I(dir)->index_cnt--; in btrfs_create_new_inode()
6294 if (!args->orphan) { in btrfs_create_new_inode()
6303 if (args->subvol) { in btrfs_create_new_inode()
6308 sizes[1] = name->len + sizeof(*ref); in btrfs_create_new_inode()
6314 batch.total_data_size = sizes[0] + (args->orphan ? 0 : sizes[1]); in btrfs_create_new_inode()
6315 batch.nr = args->orphan ? 1 : 2; in btrfs_create_new_inode()
6322 inode->i_mtime = inode_set_ctime_current(inode); in btrfs_create_new_inode()
6323 inode->i_atime = inode->i_mtime; in btrfs_create_new_inode()
6324 BTRFS_I(inode)->i_otime = inode->i_mtime; in btrfs_create_new_inode()
6331 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], in btrfs_create_new_inode()
6333 memzero_extent_buffer(path->nodes[0], (unsigned long)inode_item, in btrfs_create_new_inode()
6335 fill_inode_item(trans, path->nodes[0], inode_item, inode); in btrfs_create_new_inode()
6337 if (!args->orphan) { in btrfs_create_new_inode()
6338 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1, in btrfs_create_new_inode()
6341 if (args->subvol) { in btrfs_create_new_inode()
6342 btrfs_set_inode_ref_name_len(path->nodes[0], ref, 2); in btrfs_create_new_inode()
6343 btrfs_set_inode_ref_index(path->nodes[0], ref, 0); in btrfs_create_new_inode()
6344 write_extent_buffer(path->nodes[0], "..", ptr, 2); in btrfs_create_new_inode()
6346 btrfs_set_inode_ref_name_len(path->nodes[0], ref, in btrfs_create_new_inode()
6347 name->len); in btrfs_create_new_inode()
6348 btrfs_set_inode_ref_index(path->nodes[0], ref, in btrfs_create_new_inode()
6349 BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6350 write_extent_buffer(path->nodes[0], name->name, ptr, in btrfs_create_new_inode()
6351 name->len); in btrfs_create_new_inode()
6355 btrfs_mark_buffer_dirty(trans, path->nodes[0]); in btrfs_create_new_inode()
6364 if (args->subvol) { in btrfs_create_new_inode()
6371 parent = btrfs_iget(fs_info->sb, BTRFS_FIRST_FREE_OBJECTID, in btrfs_create_new_inode()
6372 BTRFS_I(dir)->root); in btrfs_create_new_inode()
6385 btrfs_ino(BTRFS_I(inode)), root->root_key.objectid, in btrfs_create_new_inode()
6393 if (!args->subvol) { in btrfs_create_new_inode()
6408 if (args->orphan) { in btrfs_create_new_inode()
6412 0, BTRFS_I(inode)->dir_index); in btrfs_create_new_inode()
6445 struct btrfs_root *root = parent_inode->root; in btrfs_add_link()
6450 memcpy(&key, &inode->root->root_key, sizeof(key)); in btrfs_add_link()
6459 root->root_key.objectid, parent_ino, in btrfs_add_link()
6471 btrfs_inode_type(&inode->vfs_inode), index); in btrfs_add_link()
6472 if (ret == -EEXIST || ret == -EOVERFLOW) in btrfs_add_link()
6479 btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + in btrfs_add_link()
6480 name->len * 2); in btrfs_add_link()
6481 inode_inc_iversion(&parent_inode->vfs_inode); in btrfs_add_link()
6488 if (!test_bit(BTRFS_FS_LOG_RECOVERING, &root->fs_info->flags)) in btrfs_add_link()
6489 parent_inode->vfs_inode.i_mtime = in btrfs_add_link()
6490 inode_set_ctime_current(&parent_inode->vfs_inode); in btrfs_add_link()
6502 root->root_key.objectid, parent_ino, in btrfs_add_link()
6523 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_create_common()
6524 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_create_common()
6563 inode = new_inode(dir->i_sb); in btrfs_mknod()
6565 return -ENOMEM; in btrfs_mknod()
6567 inode->i_op = &btrfs_special_inode_operations; in btrfs_mknod()
6568 init_special_inode(inode, inode->i_mode, rdev); in btrfs_mknod()
6577 inode = new_inode(dir->i_sb); in btrfs_create()
6579 return -ENOMEM; in btrfs_create()
6581 inode->i_fop = &btrfs_file_operations; in btrfs_create()
6582 inode->i_op = &btrfs_file_inode_operations; in btrfs_create()
6583 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_create()
6591 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_link()
6593 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_link()
6600 if (root->root_key.objectid != BTRFS_I(inode)->root->root_key.objectid) in btrfs_link()
6601 return -EXDEV; in btrfs_link()
6603 if (inode->i_nlink >= BTRFS_LINK_MAX) in btrfs_link()
6604 return -EMLINK; in btrfs_link()
6606 err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &fname); in btrfs_link()
6620 trans = btrfs_start_transaction(root, inode->i_nlink ? 5 : 6); in btrfs_link()
6628 BTRFS_I(inode)->dir_index = 0ULL; in btrfs_link()
6633 set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); in btrfs_link()
6641 struct dentry *parent = dentry->d_parent; in btrfs_link()
6646 if (inode->i_nlink == 1) { in btrfs_link()
6676 inode = new_inode(dir->i_sb); in btrfs_mkdir()
6678 return -ENOMEM; in btrfs_mkdir()
6680 inode->i_op = &btrfs_dir_inode_operations; in btrfs_mkdir()
6681 inode->i_fop = &btrfs_dir_file_operations; in btrfs_mkdir()
6690 struct extent_buffer *leaf = path->nodes[0]; in uncompress_inline()
6699 inline_size = btrfs_file_extent_inline_item_len(leaf, path->slots[0]); in uncompress_inline()
6702 return -ENOMEM; in uncompress_inline()
6719 memzero_page(page, max_size, PAGE_SIZE - max_size); in uncompress_inline()
6736 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], in read_inline_extent()
6738 if (btrfs_file_extent_compression(path->nodes[0], fi) != BTRFS_COMPRESS_NONE) in read_inline_extent()
6742 btrfs_file_extent_ram_bytes(path->nodes[0], fi)); in read_inline_extent()
6744 read_extent_buffer(path->nodes[0], kaddr, in read_inline_extent()
6748 memzero_page(page, copy_size, PAGE_SIZE - copy_size); in read_inline_extent()
6762 * it from the B-tree and caching it if necessary. Note that there may be more
6768 * Return: ERR_PTR on error, non-NULL extent_map on success.
6774 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_get_extent()
6779 int extent_type = -1; in btrfs_get_extent()
6781 struct btrfs_root *root = inode->root; in btrfs_get_extent()
6786 struct extent_map_tree *em_tree = &inode->extent_tree; in btrfs_get_extent()
6788 read_lock(&em_tree->lock); in btrfs_get_extent()
6790 read_unlock(&em_tree->lock); in btrfs_get_extent()
6793 if (em->start > start || em->start + em->len <= start) in btrfs_get_extent()
6795 else if (em->block_start == EXTENT_MAP_INLINE && page) in btrfs_get_extent()
6802 ret = -ENOMEM; in btrfs_get_extent()
6805 em->start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6806 em->orig_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6807 em->len = (u64)-1; in btrfs_get_extent()
6808 em->block_len = (u64)-1; in btrfs_get_extent()
6812 ret = -ENOMEM; in btrfs_get_extent()
6817 path->reada = READA_FORWARD; in btrfs_get_extent()
6825 path->search_commit_root = 1; in btrfs_get_extent()
6826 path->skip_locking = 1; in btrfs_get_extent()
6833 if (path->slots[0] == 0) in btrfs_get_extent()
6835 path->slots[0]--; in btrfs_get_extent()
6839 leaf = path->nodes[0]; in btrfs_get_extent()
6840 item = btrfs_item_ptr(leaf, path->slots[0], in btrfs_get_extent()
6842 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6861 if (!S_ISREG(inode->vfs_inode.i_mode)) { in btrfs_get_extent()
6862 ret = -EUCLEAN; in btrfs_get_extent()
6864 "regular/prealloc extent found for non-regular inode %llu", in btrfs_get_extent()
6872 path->slots[0], in btrfs_get_extent()
6877 path->slots[0]++; in btrfs_get_extent()
6878 if (path->slots[0] >= btrfs_header_nritems(leaf)) { in btrfs_get_extent()
6885 leaf = path->nodes[0]; in btrfs_get_extent()
6887 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); in btrfs_get_extent()
6897 em->start = start; in btrfs_get_extent()
6898 em->orig_start = start; in btrfs_get_extent()
6899 em->len = found_key.offset - start; in btrfs_get_extent()
6900 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6912 * ensured by tree-checker and inline extent creation path. in btrfs_get_extent()
6917 ASSERT(em->start == 0); in btrfs_get_extent()
6925 ASSERT(em->block_start == EXTENT_MAP_INLINE); in btrfs_get_extent()
6926 ASSERT(em->len == fs_info->sectorsize); in btrfs_get_extent()
6934 em->start = start; in btrfs_get_extent()
6935 em->orig_start = start; in btrfs_get_extent()
6936 em->len = len; in btrfs_get_extent()
6937 em->block_start = EXTENT_MAP_HOLE; in btrfs_get_extent()
6941 if (em->start > start || extent_map_end(em) <= start) { in btrfs_get_extent()
6944 em->start, em->len, start, len); in btrfs_get_extent()
6945 ret = -EIO; in btrfs_get_extent()
6949 write_lock(&em_tree->lock); in btrfs_get_extent()
6951 write_unlock(&em_tree->lock); in btrfs_get_extent()
6995 start + len - 1, false); in btrfs_create_dio_extent()
6999 ASSERT(!dio_data->ordered); in btrfs_create_dio_extent()
7000 dio_data->ordered = ordered; in btrfs_create_dio_extent()
7011 struct btrfs_root *root = inode->root; in btrfs_new_extent_direct()
7012 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_new_extent_direct()
7020 ret = btrfs_reserve_extent(root, len, len, fs_info->sectorsize, in btrfs_new_extent_direct()
7022 if (ret == -EAGAIN) { in btrfs_new_extent_direct()
7024 wait_on_bit_io(&inode->root->fs_info->flags, BTRFS_FS_NEED_ZONE_FINISH, in btrfs_new_extent_direct()
7048 if (!block_group || block_group->ro) in btrfs_extent_readonly()
7062 * @orig_len: (optional) Return the original on-disk length of the file extent
7079 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in can_nocow_extent()
7084 struct btrfs_root *root = BTRFS_I(inode)->root; in can_nocow_extent()
7085 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in can_nocow_extent()
7092 return -ENOMEM; in can_nocow_extent()
7093 path->nowait = nowait; in can_nocow_extent()
7101 if (path->slots[0] == 0) { in can_nocow_extent()
7106 path->slots[0]--; in can_nocow_extent()
7109 leaf = path->nodes[0]; in can_nocow_extent()
7110 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); in can_nocow_extent()
7125 fi = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in can_nocow_extent()
7131 nocow_args.end = offset + *len - 1; in can_nocow_extent()
7149 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in can_nocow_extent()
7154 root->fs_info->sectorsize) - 1; in can_nocow_extent()
7158 ret = -EAGAIN; in can_nocow_extent()
7164 *orig_start = key.offset - nocow_args.extent_offset; in can_nocow_extent()
7181 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in lock_extent_direct()
7189 return -EAGAIN; in lock_extent_direct()
7199 lockend - lockstart + 1); in lock_extent_direct()
7209 (!writing || !filemap_range_has_page(inode->i_mapping, in lock_extent_direct()
7218 ret = -EAGAIN; in lock_extent_direct()
7237 test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) in lock_extent_direct()
7240 ret = nowait ? -EAGAIN : -ENOTBLK; in lock_extent_direct()
7256 ret = nowait ? -EAGAIN : -ENOTBLK; in lock_extent_direct()
7285 return ERR_PTR(-ENOMEM); in create_io_em()
7287 em->start = start; in create_io_em()
7288 em->orig_start = orig_start; in create_io_em()
7289 em->len = len; in create_io_em()
7290 em->block_len = block_len; in create_io_em()
7291 em->block_start = block_start; in create_io_em()
7292 em->orig_block_len = orig_block_len; in create_io_em()
7293 em->ram_bytes = ram_bytes; in create_io_em()
7294 em->generation = -1; in create_io_em()
7295 set_bit(EXTENT_FLAG_PINNED, &em->flags); in create_io_em()
7297 set_bit(EXTENT_FLAG_FILLING, &em->flags); in create_io_em()
7299 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags); in create_io_em()
7300 em->compress_type = compress_type; in create_io_em()
7321 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_get_blocks_direct_write()
7341 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || in btrfs_get_blocks_direct_write()
7342 ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && in btrfs_get_blocks_direct_write()
7343 em->block_start != EXTENT_MAP_HOLE)) { in btrfs_get_blocks_direct_write()
7344 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) in btrfs_get_blocks_direct_write()
7348 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7349 block_start = em->block_start + (start - em->start); in btrfs_get_blocks_direct_write()
7371 if (nowait && (ret == -ENOSPC || ret == -EDQUOT)) in btrfs_get_blocks_direct_write()
7372 ret = -EAGAIN; in btrfs_get_blocks_direct_write()
7393 dio_data->nocow_done = true; in btrfs_get_blocks_direct_write()
7400 ret = -EAGAIN; in btrfs_get_blocks_direct_write()
7408 if (!dio_data->data_space_reserved) { in btrfs_get_blocks_direct_write()
7409 ret = -ENOSPC; in btrfs_get_blocks_direct_write()
7429 len = min(len, em->len - (start - em->start)); in btrfs_get_blocks_direct_write()
7432 prev_len - len, true); in btrfs_get_blocks_direct_write()
7461 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_dio_iomap_begin()
7464 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_iomap_begin()
7481 * -EAGAIN at this point so that the normal path is used. in btrfs_dio_iomap_begin()
7484 return -EAGAIN; in btrfs_dio_iomap_begin()
7491 len = min_t(u64, len, fs_info->sectorsize * BTRFS_MAX_BIO_SECTORS); in btrfs_dio_iomap_begin()
7494 lockend = start + len - 1; in btrfs_dio_iomap_begin()
7500 * outstanding dirty pages are on disk - the first flush only starts in btrfs_dio_iomap_begin()
7508 * dirty or under writeback (same as for the non-compression case). in btrfs_dio_iomap_begin()
7515 &BTRFS_I(inode)->runtime_flags)) { in btrfs_dio_iomap_begin()
7517 if (filemap_range_needs_writeback(inode->i_mapping, in btrfs_dio_iomap_begin()
7519 return -EAGAIN; in btrfs_dio_iomap_begin()
7521 ret = filemap_fdatawrite_range(inode->i_mapping, start, in btrfs_dio_iomap_begin()
7522 start + length - 1); in btrfs_dio_iomap_begin()
7541 &dio_data->data_reserved, in btrfs_dio_iomap_begin()
7544 dio_data->data_space_reserved = true; in btrfs_dio_iomap_begin()
7545 else if (ret && !(BTRFS_I(inode)->flags & in btrfs_dio_iomap_begin()
7575 * We return -ENOTBLK because that's what makes DIO go ahead and go back in btrfs_dio_iomap_begin()
7579 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || in btrfs_dio_iomap_begin()
7580 em->block_start == EXTENT_MAP_INLINE) { in btrfs_dio_iomap_begin()
7583 * If we are in a NOWAIT context, return -EAGAIN in order to in btrfs_dio_iomap_begin()
7587 * space - this happens if we were able to read some data from in btrfs_dio_iomap_begin()
7588 * previous non-compressed extents and then when we fallback to in btrfs_dio_iomap_begin()
7592 * of bytes previously read is > 0, so it does not return -EFAULT). in btrfs_dio_iomap_begin()
7594 ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; in btrfs_dio_iomap_begin()
7598 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7602 * (or a mix of extents and holes), then we return -EAGAIN to make the in btrfs_dio_iomap_begin()
7618 * which we return back to our caller - we should only return EIOCBQUEUED in btrfs_dio_iomap_begin()
7623 ret = -EAGAIN; in btrfs_dio_iomap_begin()
7634 len = min(len, em->len - (start - em->start)); in btrfs_dio_iomap_begin()
7635 if (dio_data->data_space_reserved) { in btrfs_dio_iomap_begin()
7639 if (dio_data->nocow_done) { in btrfs_dio_iomap_begin()
7644 release_len = data_alloc_len - len; in btrfs_dio_iomap_begin()
7649 dio_data->data_reserved, in btrfs_dio_iomap_begin()
7664 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7674 if ((em->block_start == EXTENT_MAP_HOLE) || in btrfs_dio_iomap_begin()
7675 (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { in btrfs_dio_iomap_begin()
7676 iomap->addr = IOMAP_NULL_ADDR; in btrfs_dio_iomap_begin()
7677 iomap->type = IOMAP_HOLE; in btrfs_dio_iomap_begin()
7679 iomap->addr = em->block_start + (start - em->start); in btrfs_dio_iomap_begin()
7680 iomap->type = IOMAP_MAPPED; in btrfs_dio_iomap_begin()
7682 iomap->offset = start; in btrfs_dio_iomap_begin()
7683 iomap->bdev = fs_info->fs_devices->latest_dev->bdev; in btrfs_dio_iomap_begin()
7684 iomap->length = len; in btrfs_dio_iomap_begin()
7690 unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend, in btrfs_dio_iomap_begin()
7693 if (dio_data->data_space_reserved) { in btrfs_dio_iomap_begin()
7695 dio_data->data_reserved, in btrfs_dio_iomap_begin()
7697 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_begin()
7707 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_iomap_end()
7708 size_t submitted = dio_data->submitted; in btrfs_dio_iomap_end()
7712 if (!write && (iomap->type == IOMAP_HOLE)) { in btrfs_dio_iomap_end()
7714 unlock_extent(&BTRFS_I(inode)->io_tree, pos, pos + length - 1, in btrfs_dio_iomap_end()
7721 length -= submitted; in btrfs_dio_iomap_end()
7723 btrfs_finish_ordered_extent(dio_data->ordered, NULL, in btrfs_dio_iomap_end()
7726 unlock_extent(&BTRFS_I(inode)->io_tree, pos, in btrfs_dio_iomap_end()
7727 pos + length - 1, NULL); in btrfs_dio_iomap_end()
7728 ret = -ENOTBLK; in btrfs_dio_iomap_end()
7731 btrfs_put_ordered_extent(dio_data->ordered); in btrfs_dio_iomap_end()
7732 dio_data->ordered = NULL; in btrfs_dio_iomap_end()
7736 extent_changeset_free(dio_data->data_reserved); in btrfs_dio_iomap_end()
7744 struct btrfs_inode *inode = bbio->inode; in btrfs_dio_end_io()
7745 struct bio *bio = &bbio->bio; in btrfs_dio_end_io()
7747 if (bio->bi_status) { in btrfs_dio_end_io()
7748 btrfs_warn(inode->root->fs_info, in btrfs_dio_end_io()
7750 btrfs_ino(inode), bio->bi_opf, in btrfs_dio_end_io()
7751 dip->file_offset, dip->bytes, bio->bi_status); in btrfs_dio_end_io()
7755 btrfs_finish_ordered_extent(bbio->ordered, NULL, in btrfs_dio_end_io()
7756 dip->file_offset, dip->bytes, in btrfs_dio_end_io()
7757 !bio->bi_status); in btrfs_dio_end_io()
7759 unlock_extent(&inode->io_tree, dip->file_offset, in btrfs_dio_end_io()
7760 dip->file_offset + dip->bytes - 1, NULL); in btrfs_dio_end_io()
7763 bbio->bio.bi_private = bbio->private; in btrfs_dio_end_io()
7773 struct btrfs_dio_data *dio_data = iter->private; in btrfs_dio_submit_io()
7775 btrfs_bio_init(bbio, BTRFS_I(iter->inode)->root->fs_info, in btrfs_dio_submit_io()
7776 btrfs_dio_end_io, bio->bi_private); in btrfs_dio_submit_io()
7777 bbio->inode = BTRFS_I(iter->inode); in btrfs_dio_submit_io()
7778 bbio->file_offset = file_offset; in btrfs_dio_submit_io()
7780 dip->file_offset = file_offset; in btrfs_dio_submit_io()
7781 dip->bytes = bio->bi_iter.bi_size; in btrfs_dio_submit_io()
7783 dio_data->submitted += bio->bi_iter.bi_size; in btrfs_dio_submit_io()
7792 if (iter->flags & IOMAP_WRITE) { in btrfs_dio_submit_io()
7795 ret = btrfs_extract_ordered_extent(bbio, dio_data->ordered); in btrfs_dio_submit_io()
7797 btrfs_finish_ordered_extent(dio_data->ordered, NULL, in btrfs_dio_submit_io()
7798 file_offset, dip->bytes, in btrfs_dio_submit_io()
7800 bio->bi_status = errno_to_blk_status(ret); in btrfs_dio_submit_io()
7859 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { in btrfs_fiemap()
7873 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC) { in btrfs_fiemap()
7901 * If we continue to release/invalidate the page, we could cause use-after-free
7907 struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); in wait_subpage_spinlock()
7913 ASSERT(PagePrivate(page) && page->private); in wait_subpage_spinlock()
7914 subpage = (struct btrfs_subpage *)page->private; in wait_subpage_spinlock()
7927 spin_lock_irq(&subpage->lock); in wait_subpage_spinlock()
7928 spin_unlock_irq(&subpage->lock); in wait_subpage_spinlock()
7933 int ret = try_release_extent_mapping(&folio->page, gfp_flags); in __btrfs_release_folio()
7936 wait_subpage_spinlock(&folio->page); in __btrfs_release_folio()
7937 clear_page_extent_mapped(&folio->page); in __btrfs_release_folio()
7973 struct btrfs_inode *inode = BTRFS_I(folio->mapping->host); in btrfs_invalidate_folio()
7974 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_invalidate_folio()
7975 struct extent_io_tree *tree = &inode->io_tree; in btrfs_invalidate_folio()
7978 u64 page_end = page_start + folio_size(folio) - 1; in btrfs_invalidate_folio()
7980 int inode_evicting = inode->vfs_inode.i_state & I_FREEING; in btrfs_invalidate_folio()
7996 wait_subpage_spinlock(&folio->page); in btrfs_invalidate_folio()
8003 * shouldn't clear page extent mapped, as folio->private can still in btrfs_invalidate_folio()
8026 page_end + 1 - cur); in btrfs_invalidate_folio()
8036 if (ordered->file_offset > cur) { in btrfs_invalidate_folio()
8038 * There is a range between [cur, oe->file_offset) not in btrfs_invalidate_folio()
8043 range_end = ordered->file_offset - 1; in btrfs_invalidate_folio()
8048 range_end = min(ordered->file_offset + ordered->num_bytes - 1, in btrfs_invalidate_folio()
8050 ASSERT(range_end + 1 - cur < U32_MAX); in btrfs_invalidate_folio()
8051 range_len = range_end + 1 - cur; in btrfs_invalidate_folio()
8052 if (!btrfs_page_test_ordered(fs_info, &folio->page, cur, range_len)) { in btrfs_invalidate_folio()
8061 btrfs_page_clear_ordered(fs_info, &folio->page, cur, range_len); in btrfs_invalidate_folio()
8077 spin_lock_irq(&inode->ordered_tree.lock); in btrfs_invalidate_folio()
8078 set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags); in btrfs_invalidate_folio()
8079 ordered->truncated_len = min(ordered->truncated_len, in btrfs_invalidate_folio()
8080 cur - ordered->file_offset); in btrfs_invalidate_folio()
8081 spin_unlock_irq(&inode->ordered_tree.lock); in btrfs_invalidate_folio()
8090 cur, range_end + 1 - cur)) { in btrfs_invalidate_folio()
8116 btrfs_qgroup_free_data(inode, NULL, cur, range_end + 1 - cur, NULL); in btrfs_invalidate_folio()
8131 btrfs_page_clear_checked(fs_info, &folio->page, folio_pos(folio), folio_size(folio)); in btrfs_invalidate_folio()
8134 clear_page_extent_mapped(&folio->page); in btrfs_invalidate_folio()
8154 struct page *page = vmf->page; in btrfs_page_mkwrite()
8155 struct inode *inode = file_inode(vmf->vma->vm_file); in btrfs_page_mkwrite()
8156 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in btrfs_page_mkwrite()
8157 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_page_mkwrite()
8173 sb_start_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8175 page_end = page_start + PAGE_SIZE - 1; in btrfs_page_mkwrite()
8189 ret2 = file_update_time(vmf->vma->vm_file); in btrfs_page_mkwrite()
8201 down_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8205 if ((page->mapping != inode->i_mapping) || in btrfs_page_mkwrite()
8229 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8235 if (page->index == ((size - 1) >> PAGE_SHIFT)) { in btrfs_page_mkwrite()
8236 reserved_space = round_up(size - page_start, in btrfs_page_mkwrite()
8237 fs_info->sectorsize); in btrfs_page_mkwrite()
8239 end = page_start + reserved_space - 1; in btrfs_page_mkwrite()
8242 PAGE_SIZE - reserved_space, true); in btrfs_page_mkwrite()
8253 clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end, in btrfs_page_mkwrite()
8272 memzero_page(page, zero_start, PAGE_SIZE - zero_start); in btrfs_page_mkwrite()
8275 btrfs_page_set_dirty(fs_info, page, page_start, end + 1 - page_start); in btrfs_page_mkwrite()
8276 btrfs_page_set_uptodate(fs_info, page, page_start, end + 1 - page_start); in btrfs_page_mkwrite()
8281 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8284 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8290 up_read(&BTRFS_I(inode)->i_mmap_lock); in btrfs_page_mkwrite()
8296 sb_end_pagefault(inode->i_sb); in btrfs_page_mkwrite()
8309 struct btrfs_root *root = inode->root; in btrfs_truncate()
8310 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_truncate()
8314 u64 mask = fs_info->sectorsize - 1; in btrfs_truncate()
8318 ret = btrfs_wait_ordered_range(&inode->vfs_inode, in btrfs_truncate()
8319 inode->vfs_inode.i_size & (~mask), in btrfs_truncate()
8320 (u64)-1); in btrfs_truncate()
8348 * 1) rsv - for the truncate reservation, which we will steal from the in btrfs_truncate()
8350 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for in btrfs_truncate()
8355 return -ENOMEM; in btrfs_truncate()
8356 rsv->size = min_size; in btrfs_truncate()
8357 rsv->failfast = true; in btrfs_truncate()
8370 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, rsv, in btrfs_truncate()
8382 trans->block_rsv = rsv; in btrfs_truncate()
8386 const u64 new_size = inode->vfs_inode.i_size; in btrfs_truncate()
8387 const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize); in btrfs_truncate()
8390 lock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); in btrfs_truncate()
8397 ALIGN(new_size, fs_info->sectorsize), in btrfs_truncate()
8398 (u64)-1, false); in btrfs_truncate()
8402 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes); in btrfs_truncate()
8405 unlock_extent(&inode->io_tree, lock_start, (u64)-1, &cached_state); in btrfs_truncate()
8407 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8408 if (ret != -ENOSPC && ret != -EAGAIN) in btrfs_truncate()
8425 btrfs_block_rsv_release(fs_info, rsv, -1, NULL); in btrfs_truncate()
8426 ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv, in btrfs_truncate()
8436 trans->block_rsv = rsv; in btrfs_truncate()
8449 ret = btrfs_truncate_block(inode, inode->vfs_inode.i_size, 0, 0); in btrfs_truncate()
8463 trans->block_rsv = &fs_info->trans_block_rsv; in btrfs_truncate()
8483 * fsync to truncate all the inode's items from the log and re-log them in btrfs_truncate()
8500 inode = new_inode(dir->i_sb); in btrfs_new_subvol_inode()
8508 inode->i_op = &btrfs_dir_inode_operations; in btrfs_new_subvol_inode()
8509 inode->i_fop = &btrfs_dir_file_operations; in btrfs_new_subvol_inode()
8524 ei->root = NULL; in btrfs_alloc_inode()
8525 ei->generation = 0; in btrfs_alloc_inode()
8526 ei->last_trans = 0; in btrfs_alloc_inode()
8527 ei->last_sub_trans = 0; in btrfs_alloc_inode()
8528 ei->logged_trans = 0; in btrfs_alloc_inode()
8529 ei->delalloc_bytes = 0; in btrfs_alloc_inode()
8530 ei->new_delalloc_bytes = 0; in btrfs_alloc_inode()
8531 ei->defrag_bytes = 0; in btrfs_alloc_inode()
8532 ei->disk_i_size = 0; in btrfs_alloc_inode()
8533 ei->flags = 0; in btrfs_alloc_inode()
8534 ei->ro_flags = 0; in btrfs_alloc_inode()
8535 ei->csum_bytes = 0; in btrfs_alloc_inode()
8536 ei->index_cnt = (u64)-1; in btrfs_alloc_inode()
8537 ei->dir_index = 0; in btrfs_alloc_inode()
8538 ei->last_unlink_trans = 0; in btrfs_alloc_inode()
8539 ei->last_reflink_trans = 0; in btrfs_alloc_inode()
8540 ei->last_log_commit = 0; in btrfs_alloc_inode()
8542 spin_lock_init(&ei->lock); in btrfs_alloc_inode()
8543 ei->outstanding_extents = 0; in btrfs_alloc_inode()
8544 if (sb->s_magic != BTRFS_TEST_MAGIC) in btrfs_alloc_inode()
8545 btrfs_init_metadata_block_rsv(fs_info, &ei->block_rsv, in btrfs_alloc_inode()
8547 ei->runtime_flags = 0; in btrfs_alloc_inode()
8548 ei->prop_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8549 ei->defrag_compress = BTRFS_COMPRESS_NONE; in btrfs_alloc_inode()
8551 ei->delayed_node = NULL; in btrfs_alloc_inode()
8553 ei->i_otime.tv_sec = 0; in btrfs_alloc_inode()
8554 ei->i_otime.tv_nsec = 0; in btrfs_alloc_inode()
8556 inode = &ei->vfs_inode; in btrfs_alloc_inode()
8557 extent_map_tree_init(&ei->extent_tree); in btrfs_alloc_inode()
8558 extent_io_tree_init(fs_info, &ei->io_tree, IO_TREE_INODE_IO); in btrfs_alloc_inode()
8559 ei->io_tree.inode = ei; in btrfs_alloc_inode()
8560 extent_io_tree_init(fs_info, &ei->file_extent_tree, in btrfs_alloc_inode()
8562 mutex_init(&ei->log_mutex); in btrfs_alloc_inode()
8563 btrfs_ordered_inode_tree_init(&ei->ordered_tree); in btrfs_alloc_inode()
8564 INIT_LIST_HEAD(&ei->delalloc_inodes); in btrfs_alloc_inode()
8565 INIT_LIST_HEAD(&ei->delayed_iput); in btrfs_alloc_inode()
8566 RB_CLEAR_NODE(&ei->rb_node); in btrfs_alloc_inode()
8567 init_rwsem(&ei->i_mmap_lock); in btrfs_alloc_inode()
8575 btrfs_drop_extent_map_range(BTRFS_I(inode), 0, (u64)-1, false); in btrfs_test_destroy_inode()
8589 struct btrfs_root *root = inode->root; in btrfs_destroy_inode()
8592 WARN_ON(!hlist_empty(&vfs_inode->i_dentry)); in btrfs_destroy_inode()
8593 WARN_ON(vfs_inode->i_data.nrpages); in btrfs_destroy_inode()
8594 WARN_ON(inode->block_rsv.reserved); in btrfs_destroy_inode()
8595 WARN_ON(inode->block_rsv.size); in btrfs_destroy_inode()
8596 WARN_ON(inode->outstanding_extents); in btrfs_destroy_inode()
8597 if (!S_ISDIR(vfs_inode->i_mode)) { in btrfs_destroy_inode()
8598 WARN_ON(inode->delalloc_bytes); in btrfs_destroy_inode()
8599 WARN_ON(inode->new_delalloc_bytes); in btrfs_destroy_inode()
8601 WARN_ON(inode->csum_bytes); in btrfs_destroy_inode()
8602 WARN_ON(inode->defrag_bytes); in btrfs_destroy_inode()
8619 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); in btrfs_destroy_inode()
8623 btrfs_err(root->fs_info, in btrfs_destroy_inode()
8625 ordered->file_offset, ordered->num_bytes); in btrfs_destroy_inode()
8628 btrfs_lockdep_acquire(root->fs_info, btrfs_ordered_extent); in btrfs_destroy_inode()
8637 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false); in btrfs_destroy_inode()
8638 btrfs_inode_clear_file_extent_range(inode, 0, (u64)-1); in btrfs_destroy_inode()
8639 btrfs_put_root(inode->root); in btrfs_destroy_inode()
8644 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_drop_inode()
8650 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_drop_inode()
8660 inode_init_once(&ei->vfs_inode); in init_once()
8691 return -ENOMEM; in btrfs_init_cachep()
8700 struct inode *inode = d_inode(path->dentry); in btrfs_getattr()
8701 u32 blocksize = btrfs_sb(inode->i_sb)->sectorsize; in btrfs_getattr()
8702 u32 bi_flags = BTRFS_I(inode)->flags; in btrfs_getattr()
8703 u32 bi_ro_flags = BTRFS_I(inode)->ro_flags; in btrfs_getattr()
8705 stat->result_mask |= STATX_BTIME; in btrfs_getattr()
8706 stat->btime.tv_sec = BTRFS_I(inode)->i_otime.tv_sec; in btrfs_getattr()
8707 stat->btime.tv_nsec = BTRFS_I(inode)->i_otime.tv_nsec; in btrfs_getattr()
8709 stat->attributes |= STATX_ATTR_APPEND; in btrfs_getattr()
8711 stat->attributes |= STATX_ATTR_COMPRESSED; in btrfs_getattr()
8713 stat->attributes |= STATX_ATTR_IMMUTABLE; in btrfs_getattr()
8715 stat->attributes |= STATX_ATTR_NODUMP; in btrfs_getattr()
8717 stat->attributes |= STATX_ATTR_VERITY; in btrfs_getattr()
8719 stat->attributes_mask |= (STATX_ATTR_APPEND | in btrfs_getattr()
8725 stat->dev = BTRFS_I(inode)->root->anon_dev; in btrfs_getattr()
8727 spin_lock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8728 delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; in btrfs_getattr()
8730 spin_unlock(&BTRFS_I(inode)->lock); in btrfs_getattr()
8731 stat->blocks = (ALIGN(inode_bytes, blocksize) + in btrfs_getattr()
8741 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename_exchange()
8744 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename_exchange()
8745 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename_exchange()
8746 struct inode *new_inode = new_dentry->d_inode; in btrfs_rename_exchange()
8747 struct inode *old_inode = old_dentry->d_inode; in btrfs_rename_exchange()
8762 * For non-subvolumes allow exchange only within one subvolume, in the in btrfs_rename_exchange()
8769 return -EXDEV; in btrfs_rename_exchange()
8771 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); in btrfs_rename_exchange()
8775 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); in btrfs_rename_exchange()
8787 down_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8843 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8844 BTRFS_I(new_inode)->dir_index = 0ULL; in btrfs_rename_exchange()
8906 if (old_dentry->d_parent != new_dentry->d_parent) { in btrfs_rename_exchange()
8918 BTRFS_I(old_dentry->d_inode), in btrfs_rename_exchange()
8933 BTRFS_I(new_dentry->d_inode), in btrfs_rename_exchange()
8957 if (old_inode->i_nlink == 1) in btrfs_rename_exchange()
8958 BTRFS_I(old_inode)->dir_index = old_idx; in btrfs_rename_exchange()
8959 if (new_inode->i_nlink == 1) in btrfs_rename_exchange()
8960 BTRFS_I(new_inode)->dir_index = new_idx; in btrfs_rename_exchange()
8970 old_rename_ctx.index, new_dentry->d_parent); in btrfs_rename_exchange()
8972 new_rename_ctx.index, old_dentry->d_parent); in btrfs_rename_exchange()
8985 up_read(&fs_info->subvol_sem); in btrfs_rename_exchange()
8997 inode = new_inode(dir->i_sb); in new_whiteout_inode()
9001 inode->i_op = &btrfs_special_inode_operations; in new_whiteout_inode()
9002 init_special_inode(inode, inode->i_mode, WHITEOUT_DEV); in new_whiteout_inode()
9012 struct btrfs_fs_info *fs_info = btrfs_sb(old_dir->i_sb); in btrfs_rename()
9019 struct btrfs_root *root = BTRFS_I(old_dir)->root; in btrfs_rename()
9020 struct btrfs_root *dest = BTRFS_I(new_dir)->root; in btrfs_rename()
9032 return -EPERM; in btrfs_rename()
9036 return -EXDEV; in btrfs_rename()
9040 return -ENOTEMPTY; in btrfs_rename()
9042 if (S_ISDIR(old_inode->i_mode) && new_inode && in btrfs_rename()
9043 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) in btrfs_rename()
9044 return -ENOTEMPTY; in btrfs_rename()
9046 ret = fscrypt_setup_filename(old_dir, &old_dentry->d_name, 0, &old_fname); in btrfs_rename()
9050 ret = fscrypt_setup_filename(new_dir, &new_dentry->d_name, 0, &new_fname); in btrfs_rename()
9057 ret = btrfs_check_dir_item_collision(dest, new_dir->i_ino, &new_fname.disk_name); in btrfs_rename()
9059 if (ret == -EEXIST) { in btrfs_rename()
9066 /* maybe -EOVERFLOW */ in btrfs_rename()
9076 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) in btrfs_rename()
9077 filemap_flush(old_inode->i_mapping); in btrfs_rename()
9082 ret = -ENOMEM; in btrfs_rename()
9095 down_read(&fs_info->subvol_sem); in btrfs_rename()
9147 BTRFS_I(old_inode)->dir_index = 0ULL; in btrfs_rename()
9187 if (old_dentry->d_parent != new_dentry->d_parent) in btrfs_rename()
9210 BUG_ON(new_inode->i_nlink == 0); in btrfs_rename()
9216 if (!ret && new_inode->i_nlink == 0) in btrfs_rename()
9232 if (old_inode->i_nlink == 1) in btrfs_rename()
9233 BTRFS_I(old_inode)->dir_index = index; in btrfs_rename()
9237 rename_ctx.index, new_dentry->d_parent); in btrfs_rename()
9259 up_read(&fs_info->subvol_sem); in btrfs_rename()
9278 return -EINVAL; in btrfs_rename2()
9287 btrfs_btree_balance_dirty(BTRFS_I(new_dir)->root->fs_info); in btrfs_rename2()
9306 inode = delalloc_work->inode; in btrfs_run_delalloc_work()
9307 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9309 &BTRFS_I(inode)->runtime_flags)) in btrfs_run_delalloc_work()
9310 filemap_flush(inode->i_mapping); in btrfs_run_delalloc_work()
9313 complete(&delalloc_work->completion); in btrfs_run_delalloc_work()
9324 init_completion(&work->completion); in btrfs_alloc_delalloc_work()
9325 INIT_LIST_HEAD(&work->list); in btrfs_alloc_delalloc_work()
9326 work->inode = inode; in btrfs_alloc_delalloc_work()
9327 btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); in btrfs_alloc_delalloc_work()
9346 bool full_flush = wbc->nr_to_write == LONG_MAX; in start_delalloc_inodes()
9348 mutex_lock(&root->delalloc_mutex); in start_delalloc_inodes()
9349 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9350 list_splice_init(&root->delalloc_inodes, &splice); in start_delalloc_inodes()
9355 list_move_tail(&binode->delalloc_inodes, in start_delalloc_inodes()
9356 &root->delalloc_inodes); in start_delalloc_inodes()
9359 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags)) in start_delalloc_inodes()
9362 inode = igrab(&binode->vfs_inode); in start_delalloc_inodes()
9364 cond_resched_lock(&root->delalloc_lock); in start_delalloc_inodes()
9367 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9371 &binode->runtime_flags); in start_delalloc_inodes()
9376 ret = -ENOMEM; in start_delalloc_inodes()
9379 list_add_tail(&work->list, &works); in start_delalloc_inodes()
9380 btrfs_queue_work(root->fs_info->flush_workers, in start_delalloc_inodes()
9381 &work->work); in start_delalloc_inodes()
9383 ret = filemap_fdatawrite_wbc(inode->i_mapping, wbc); in start_delalloc_inodes()
9385 if (ret || wbc->nr_to_write <= 0) in start_delalloc_inodes()
9389 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9391 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9395 list_del_init(&work->list); in start_delalloc_inodes()
9396 wait_for_completion(&work->completion); in start_delalloc_inodes()
9401 spin_lock(&root->delalloc_lock); in start_delalloc_inodes()
9402 list_splice_tail(&splice, &root->delalloc_inodes); in start_delalloc_inodes()
9403 spin_unlock(&root->delalloc_lock); in start_delalloc_inodes()
9405 mutex_unlock(&root->delalloc_mutex); in start_delalloc_inodes()
9417 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_start_delalloc_snapshot()
9420 return -EROFS; in btrfs_start_delalloc_snapshot()
9439 return -EROFS; in btrfs_start_delalloc_roots()
9441 mutex_lock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9442 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9443 list_splice_init(&fs_info->delalloc_roots, &splice); in btrfs_start_delalloc_roots()
9456 list_move_tail(&root->delalloc_root, in btrfs_start_delalloc_roots()
9457 &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9458 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9464 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9466 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9471 spin_lock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9472 list_splice_tail(&splice, &fs_info->delalloc_roots); in btrfs_start_delalloc_roots()
9473 spin_unlock(&fs_info->delalloc_root_lock); in btrfs_start_delalloc_roots()
9475 mutex_unlock(&fs_info->delalloc_root_mutex); in btrfs_start_delalloc_roots()
9482 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_symlink()
9484 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_symlink()
9502 return -ENAMETOOLONG; in btrfs_symlink()
9504 inode = new_inode(dir->i_sb); in btrfs_symlink()
9506 return -ENOMEM; in btrfs_symlink()
9508 inode->i_op = &btrfs_symlink_inode_operations; in btrfs_symlink()
9510 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_symlink()
9533 err = -ENOMEM; in btrfs_symlink()
9552 leaf = path->nodes[0]; in btrfs_symlink()
9553 ei = btrfs_item_ptr(leaf, path->slots[0], in btrfs_symlink()
9555 btrfs_set_file_extent_generation(leaf, ei, trans->transid); in btrfs_symlink()
9591 u64 start = ins->objectid; in insert_prealloc_file_extent()
9592 u64 len = ins->offset; in insert_prealloc_file_extent()
9632 ret = -ENOMEM; in insert_prealloc_file_extent()
9637 file_offset + len - 1, &extent_info, in insert_prealloc_file_extent()
9652 btrfs_qgroup_free_refroot(inode->root->fs_info, in insert_prealloc_file_extent()
9653 inode->root->root_key.objectid, qgroup_released, in insert_prealloc_file_extent()
9663 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); in __btrfs_prealloc_file_range()
9665 struct btrfs_root *root = BTRFS_I(inode)->root; in __btrfs_prealloc_file_range()
9671 u64 last_alloc = (u64)-1; in __btrfs_prealloc_file_range()
9674 u64 end = start + num_bytes - 1; in __btrfs_prealloc_file_range()
9695 * ->bytes_may_use to ->bytes_reserved. Any error that happens in __btrfs_prealloc_file_range()
9722 cur_offset + ins.offset - 1, false); in __btrfs_prealloc_file_range()
9727 em->start = cur_offset; in __btrfs_prealloc_file_range()
9728 em->orig_start = cur_offset; in __btrfs_prealloc_file_range()
9729 em->len = ins.offset; in __btrfs_prealloc_file_range()
9730 em->block_start = ins.objectid; in __btrfs_prealloc_file_range()
9731 em->block_len = ins.offset; in __btrfs_prealloc_file_range()
9732 em->orig_block_len = ins.offset; in __btrfs_prealloc_file_range()
9733 em->ram_bytes = ins.offset; in __btrfs_prealloc_file_range()
9734 set_bit(EXTENT_FLAG_PREALLOC, &em->flags); in __btrfs_prealloc_file_range()
9735 em->generation = trans->transid; in __btrfs_prealloc_file_range()
9740 num_bytes -= ins.offset; in __btrfs_prealloc_file_range()
9746 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; in __btrfs_prealloc_file_range()
9748 (actual_len > inode->i_size) && in __btrfs_prealloc_file_range()
9749 (cur_offset > inode->i_size)) { in __btrfs_prealloc_file_range()
9774 end - clear_offset + 1); in __btrfs_prealloc_file_range()
9799 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_permission()
9800 umode_t mode = inode->i_mode; in btrfs_permission()
9805 return -EROFS; in btrfs_permission()
9806 if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) in btrfs_permission()
9807 return -EACCES; in btrfs_permission()
9815 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb); in btrfs_tmpfile()
9817 struct btrfs_root *root = BTRFS_I(dir)->root; in btrfs_tmpfile()
9821 .dentry = file->f_path.dentry, in btrfs_tmpfile()
9827 inode = new_inode(dir->i_sb); in btrfs_tmpfile()
9829 return -ENOMEM; in btrfs_tmpfile()
9831 inode->i_fop = &btrfs_file_operations; in btrfs_tmpfile()
9832 inode->i_op = &btrfs_file_inode_operations; in btrfs_tmpfile()
9833 inode->i_mapping->a_ops = &btrfs_aops; in btrfs_tmpfile()
9853 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() in btrfs_tmpfile()
9875 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_set_range_writeback()
9881 ASSERT(end + 1 - start <= U32_MAX); in btrfs_set_range_writeback()
9882 len = end + 1 - start; in btrfs_set_range_writeback()
9884 page = find_get_page(inode->vfs_inode.i_mapping, index); in btrfs_set_range_writeback()
9906 if (fs_info->sectorsize < SZ_4K || fs_info->sectorsize > SZ_64K) in btrfs_encoded_io_compression_from_extent()
9907 return -EINVAL; in btrfs_encoded_io_compression_from_extent()
9909 (fs_info->sectorsize_bits - 12); in btrfs_encoded_io_compression_from_extent()
9913 return -EUCLEAN; in btrfs_encoded_io_compression_from_extent()
9926 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read_inline()
9927 struct btrfs_root *root = inode->root; in btrfs_encoded_read_inline()
9928 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_encoded_read_inline()
9929 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read_inline()
9940 ret = -ENOMEM; in btrfs_encoded_read_inline()
9948 ret = -EIO; in btrfs_encoded_read_inline()
9952 leaf = path->nodes[0]; in btrfs_encoded_read_inline()
9953 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_file_extent_item); in btrfs_encoded_read_inline()
9958 encoded->len = min_t(u64, extent_start + ram_bytes, in btrfs_encoded_read_inline()
9959 inode->vfs_inode.i_size) - iocb->ki_pos; in btrfs_encoded_read_inline()
9964 encoded->compression = ret; in btrfs_encoded_read_inline()
9965 if (encoded->compression) { in btrfs_encoded_read_inline()
9969 path->slots[0]); in btrfs_encoded_read_inline()
9971 ret = -ENOBUFS; in btrfs_encoded_read_inline()
9975 encoded->unencoded_len = ram_bytes; in btrfs_encoded_read_inline()
9976 encoded->unencoded_offset = iocb->ki_pos - extent_start; in btrfs_encoded_read_inline()
9978 count = min_t(u64, count, encoded->len); in btrfs_encoded_read_inline()
9979 encoded->len = count; in btrfs_encoded_read_inline()
9980 encoded->unencoded_len = count; in btrfs_encoded_read_inline()
9981 ptr += iocb->ki_pos - extent_start; in btrfs_encoded_read_inline()
9986 ret = -ENOMEM; in btrfs_encoded_read_inline()
9997 ret = -EFAULT; in btrfs_encoded_read_inline()
10012 struct btrfs_encoded_read_private *priv = bbio->private; in btrfs_encoded_read_endio()
10014 if (bbio->bio.bi_status) { in btrfs_encoded_read_endio()
10023 WRITE_ONCE(priv->status, bbio->bio.bi_status); in btrfs_encoded_read_endio()
10025 if (atomic_dec_and_test(&priv->pending)) in btrfs_encoded_read_endio()
10026 wake_up(&priv->wait); in btrfs_encoded_read_endio()
10027 bio_put(&bbio->bio); in btrfs_encoded_read_endio()
10034 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_encoded_read_regular_fill_pages()
10045 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; in btrfs_encoded_read_regular_fill_pages()
10046 bbio->inode = inode; in btrfs_encoded_read_regular_fill_pages()
10051 if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) { in btrfs_encoded_read_regular_fill_pages()
10057 bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; in btrfs_encoded_read_regular_fill_pages()
10058 bbio->inode = inode; in btrfs_encoded_read_regular_fill_pages()
10064 disk_io_size -= bytes; in btrfs_encoded_read_regular_fill_pages()
10084 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read_regular()
10085 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read_regular()
10095 return -ENOMEM; in btrfs_encoded_read_regular()
10098 ret = -ENOMEM; in btrfs_encoded_read_regular()
10115 i = (iocb->ki_pos - start) >> PAGE_SHIFT; in btrfs_encoded_read_regular()
10116 page_offset = (iocb->ki_pos - start) & (PAGE_SIZE - 1); in btrfs_encoded_read_regular()
10120 size_t bytes = min_t(size_t, count - cur, in btrfs_encoded_read_regular()
10121 PAGE_SIZE - page_offset); in btrfs_encoded_read_regular()
10125 ret = -EFAULT; in btrfs_encoded_read_regular()
10145 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_encoded_read()
10146 struct btrfs_fs_info *fs_info = inode->root->fs_info; in btrfs_encoded_read()
10147 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_encoded_read()
10155 file_accessed(iocb->ki_filp); in btrfs_encoded_read()
10159 if (iocb->ki_pos >= inode->vfs_inode.i_size) { in btrfs_encoded_read()
10163 start = ALIGN_DOWN(iocb->ki_pos, fs_info->sectorsize); in btrfs_encoded_read()
10165 * We don't know how long the extent containing iocb->ki_pos is, but if in btrfs_encoded_read()
10168 lockend = start + BTRFS_MAX_UNCOMPRESSED - 1; in btrfs_encoded_read()
10173 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, in btrfs_encoded_read()
10174 lockend - start + 1); in btrfs_encoded_read()
10179 lockend - start + 1); in btrfs_encoded_read()
10187 em = btrfs_get_extent(inode, NULL, 0, start, lockend - start + 1); in btrfs_encoded_read()
10193 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_encoded_read()
10194 u64 extent_start = em->start; in btrfs_encoded_read()
10212 encoded->len = min_t(u64, extent_map_end(em), in btrfs_encoded_read()
10213 inode->vfs_inode.i_size) - iocb->ki_pos; in btrfs_encoded_read()
10214 if (em->block_start == EXTENT_MAP_HOLE || in btrfs_encoded_read()
10215 test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { in btrfs_encoded_read()
10217 count = min_t(u64, count, encoded->len); in btrfs_encoded_read()
10218 encoded->len = count; in btrfs_encoded_read()
10219 encoded->unencoded_len = count; in btrfs_encoded_read()
10220 } else if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_encoded_read()
10221 disk_bytenr = em->block_start; in btrfs_encoded_read()
10226 if (em->block_len > count) { in btrfs_encoded_read()
10227 ret = -ENOBUFS; in btrfs_encoded_read()
10230 disk_io_size = em->block_len; in btrfs_encoded_read()
10231 count = em->block_len; in btrfs_encoded_read()
10232 encoded->unencoded_len = em->ram_bytes; in btrfs_encoded_read()
10233 encoded->unencoded_offset = iocb->ki_pos - em->orig_start; in btrfs_encoded_read()
10235 em->compress_type); in btrfs_encoded_read()
10238 encoded->compression = ret; in btrfs_encoded_read()
10240 disk_bytenr = em->block_start + (start - em->start); in btrfs_encoded_read()
10241 if (encoded->len > count) in btrfs_encoded_read()
10242 encoded->len = count; in btrfs_encoded_read()
10247 disk_io_size = min(lockend + 1, iocb->ki_pos + encoded->len) - start; in btrfs_encoded_read()
10248 count = start + disk_io_size - iocb->ki_pos; in btrfs_encoded_read()
10249 encoded->len = count; in btrfs_encoded_read()
10250 encoded->unencoded_len = count; in btrfs_encoded_read()
10251 disk_io_size = ALIGN(disk_io_size, fs_info->sectorsize); in btrfs_encoded_read()
10262 ret = -EFAULT; in btrfs_encoded_read()
10267 encoded->compression, in btrfs_encoded_read()
10273 iocb->ki_pos += encoded->len; in btrfs_encoded_read()
10288 struct btrfs_inode *inode = BTRFS_I(file_inode(iocb->ki_filp)); in btrfs_do_encoded_write()
10289 struct btrfs_root *root = inode->root; in btrfs_do_encoded_write()
10290 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_do_encoded_write()
10291 struct extent_io_tree *io_tree = &inode->io_tree; in btrfs_do_encoded_write()
10306 switch (encoded->compression) { in btrfs_do_encoded_write()
10319 if (encoded->compression - in btrfs_do_encoded_write()
10321 fs_info->sectorsize_bits) in btrfs_do_encoded_write()
10322 return -EINVAL; in btrfs_do_encoded_write()
10326 return -EINVAL; in btrfs_do_encoded_write()
10328 if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) in btrfs_do_encoded_write()
10329 return -EINVAL; in btrfs_do_encoded_write()
10335 if (inode->flags & BTRFS_INODE_NODATASUM) in btrfs_do_encoded_write()
10336 return -EINVAL; in btrfs_do_encoded_write()
10341 if (encoded->unencoded_len > BTRFS_MAX_UNCOMPRESSED || in btrfs_do_encoded_write()
10343 return -EINVAL; in btrfs_do_encoded_write()
10358 if (orig_count >= encoded->unencoded_len) in btrfs_do_encoded_write()
10359 return -EINVAL; in btrfs_do_encoded_write()
10362 start = iocb->ki_pos; in btrfs_do_encoded_write()
10363 if (!IS_ALIGNED(start, fs_info->sectorsize)) in btrfs_do_encoded_write()
10364 return -EINVAL; in btrfs_do_encoded_write()
10371 if (start + encoded->len < inode->vfs_inode.i_size && in btrfs_do_encoded_write()
10372 !IS_ALIGNED(start + encoded->len, fs_info->sectorsize)) in btrfs_do_encoded_write()
10373 return -EINVAL; in btrfs_do_encoded_write()
10375 /* Finally, the offset in the unencoded data must be sector-aligned. */ in btrfs_do_encoded_write()
10376 if (!IS_ALIGNED(encoded->unencoded_offset, fs_info->sectorsize)) in btrfs_do_encoded_write()
10377 return -EINVAL; in btrfs_do_encoded_write()
10379 num_bytes = ALIGN(encoded->len, fs_info->sectorsize); in btrfs_do_encoded_write()
10380 ram_bytes = ALIGN(encoded->unencoded_len, fs_info->sectorsize); in btrfs_do_encoded_write()
10381 end = start + num_bytes - 1; in btrfs_do_encoded_write()
10385 * sector-aligned. For convenience, we extend it with zeroes if it in btrfs_do_encoded_write()
10388 disk_num_bytes = ALIGN(orig_count, fs_info->sectorsize); in btrfs_do_encoded_write()
10392 return -ENOMEM; in btrfs_do_encoded_write()
10399 ret = -ENOMEM; in btrfs_do_encoded_write()
10405 ret = -EFAULT; in btrfs_do_encoded_write()
10409 memset(kaddr + bytes, 0, PAGE_SIZE - bytes); in btrfs_do_encoded_write()
10416 ret = btrfs_wait_ordered_range(&inode->vfs_inode, start, num_bytes); in btrfs_do_encoded_write()
10419 ret = invalidate_inode_pages2_range(inode->vfs_inode.i_mapping, in btrfs_do_encoded_write()
10427 !filemap_range_has_page(inode->vfs_inode.i_mapping, start, end)) in btrfs_do_encoded_write()
10436 * We don't use the higher-level delalloc space functions because our in btrfs_do_encoded_write()
10451 if (start == 0 && encoded->unencoded_len == encoded->len && in btrfs_do_encoded_write()
10452 encoded->unencoded_offset == 0) { in btrfs_do_encoded_write()
10453 ret = cow_file_range_inline(inode, encoded->len, orig_count, in btrfs_do_encoded_write()
10469 start - encoded->unencoded_offset, ins.objectid, in btrfs_do_encoded_write()
10480 encoded->unencoded_offset, in btrfs_do_encoded_write()
10491 if (start + encoded->len > inode->vfs_inode.i_size) in btrfs_do_encoded_write()
10492 i_size_write(&inode->vfs_inode, start + encoded->len); in btrfs_do_encoded_write()
10528 iocb->ki_pos += encoded->len; in btrfs_do_encoded_write()
10541 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_add_swapfile_pin()
10548 return -ENOMEM; in btrfs_add_swapfile_pin()
10549 sp->ptr = ptr; in btrfs_add_swapfile_pin()
10550 sp->inode = inode; in btrfs_add_swapfile_pin()
10551 sp->is_block_group = is_block_group; in btrfs_add_swapfile_pin()
10552 sp->bg_extent_count = 1; in btrfs_add_swapfile_pin()
10554 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10555 p = &fs_info->swapfile_pins.rb_node; in btrfs_add_swapfile_pin()
10559 if (sp->ptr < entry->ptr || in btrfs_add_swapfile_pin()
10560 (sp->ptr == entry->ptr && sp->inode < entry->inode)) { in btrfs_add_swapfile_pin()
10561 p = &(*p)->rb_left; in btrfs_add_swapfile_pin()
10562 } else if (sp->ptr > entry->ptr || in btrfs_add_swapfile_pin()
10563 (sp->ptr == entry->ptr && sp->inode > entry->inode)) { in btrfs_add_swapfile_pin()
10564 p = &(*p)->rb_right; in btrfs_add_swapfile_pin()
10567 entry->bg_extent_count++; in btrfs_add_swapfile_pin()
10568 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10573 rb_link_node(&sp->node, parent, p); in btrfs_add_swapfile_pin()
10574 rb_insert_color(&sp->node, &fs_info->swapfile_pins); in btrfs_add_swapfile_pin()
10575 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_add_swapfile_pin()
10582 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; in btrfs_free_swapfile_pins()
10586 spin_lock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10587 node = rb_first(&fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10591 if (sp->inode == inode) { in btrfs_free_swapfile_pins()
10592 rb_erase(&sp->node, &fs_info->swapfile_pins); in btrfs_free_swapfile_pins()
10593 if (sp->is_block_group) { in btrfs_free_swapfile_pins()
10594 btrfs_dec_block_group_swap_extents(sp->ptr, in btrfs_free_swapfile_pins()
10595 sp->bg_extent_count); in btrfs_free_swapfile_pins()
10596 btrfs_put_block_group(sp->ptr); in btrfs_free_swapfile_pins()
10602 spin_unlock(&fs_info->swapfile_pins_lock); in btrfs_free_swapfile_pins()
10628 if (bsi->nr_pages >= sis->max) in btrfs_add_swap_extent()
10631 max_pages = sis->max - bsi->nr_pages; in btrfs_add_swap_extent()
10632 first_ppage = PAGE_ALIGN(bsi->block_start) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10633 next_ppage = PAGE_ALIGN_DOWN(bsi->block_start + bsi->block_len) >> PAGE_SHIFT; in btrfs_add_swap_extent()
10637 nr_pages = next_ppage - first_ppage; in btrfs_add_swap_extent()
10641 if (bsi->start == 0) in btrfs_add_swap_extent()
10643 if (bsi->lowest_ppage > first_ppage_reported) in btrfs_add_swap_extent()
10644 bsi->lowest_ppage = first_ppage_reported; in btrfs_add_swap_extent()
10645 if (bsi->highest_ppage < (next_ppage - 1)) in btrfs_add_swap_extent()
10646 bsi->highest_ppage = next_ppage - 1; in btrfs_add_swap_extent()
10648 ret = add_swap_extent(sis, bsi->nr_pages, nr_pages, first_ppage); in btrfs_add_swap_extent()
10651 bsi->nr_extents += ret; in btrfs_add_swap_extent()
10652 bsi->nr_pages += nr_pages; in btrfs_add_swap_extent()
10661 atomic_dec(&BTRFS_I(inode)->root->nr_swapfiles); in btrfs_swap_deactivate()
10668 struct btrfs_root *root = BTRFS_I(inode)->root; in btrfs_swap_activate()
10669 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_swap_activate()
10670 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; in btrfs_swap_activate()
10675 .lowest_ppage = (sector_t)-1ULL, in btrfs_swap_activate()
10686 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); in btrfs_swap_activate()
10693 if (BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS) { in btrfs_swap_activate()
10695 return -EINVAL; in btrfs_swap_activate()
10697 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)) { in btrfs_swap_activate()
10698 btrfs_warn(fs_info, "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10699 return -EINVAL; in btrfs_swap_activate()
10701 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { in btrfs_swap_activate()
10703 return -EINVAL; in btrfs_swap_activate()
10710 * fs_info->swapfile_pins prevents them from running while the swap in btrfs_swap_activate()
10718 return -EBUSY; in btrfs_swap_activate()
10728 if (!btrfs_drew_try_write_lock(&root->snapshot_lock)) { in btrfs_swap_activate()
10732 return -EINVAL; in btrfs_swap_activate()
10744 spin_lock(&root->root_item_lock); in btrfs_swap_activate()
10746 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10748 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_swap_activate()
10752 root->root_key.objectid); in btrfs_swap_activate()
10753 return -EPERM; in btrfs_swap_activate()
10755 atomic_inc(&root->nr_swapfiles); in btrfs_swap_activate()
10756 spin_unlock(&root->root_item_lock); in btrfs_swap_activate()
10758 isize = ALIGN_DOWN(inode->i_size, fs_info->sectorsize); in btrfs_swap_activate()
10760 lock_extent(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10765 u64 len = isize - start; in btrfs_swap_activate()
10773 if (em->block_start == EXTENT_MAP_HOLE) { in btrfs_swap_activate()
10775 ret = -EINVAL; in btrfs_swap_activate()
10778 if (em->block_start == EXTENT_MAP_INLINE) { in btrfs_swap_activate()
10787 ret = -EINVAL; in btrfs_swap_activate()
10790 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) { in btrfs_swap_activate()
10792 ret = -EINVAL; in btrfs_swap_activate()
10796 logical_block_start = em->block_start + (start - em->start); in btrfs_swap_activate()
10797 len = min(len, em->len - (start - em->start)); in btrfs_swap_activate()
10808 "swapfile must not be copy-on-write"); in btrfs_swap_activate()
10809 ret = -EINVAL; in btrfs_swap_activate()
10819 if (em->map_lookup->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { in btrfs_swap_activate()
10822 ret = -EINVAL; in btrfs_swap_activate()
10827 device = em->map_lookup->stripes[0].dev; in btrfs_swap_activate()
10833 } else if (device != em->map_lookup->stripes[0].dev) { in btrfs_swap_activate()
10835 ret = -EINVAL; in btrfs_swap_activate()
10839 physical_block_start = (em->map_lookup->stripes[0].physical + in btrfs_swap_activate()
10840 (logical_block_start - em->start)); in btrfs_swap_activate()
10841 len = min(len, em->len - (logical_block_start - em->start)); in btrfs_swap_activate()
10849 ret = -EINVAL; in btrfs_swap_activate()
10855 "block group for swapfile at %llu is read-only%s", in btrfs_swap_activate()
10856 bg->start, in btrfs_swap_activate()
10857 atomic_read(&fs_info->scrubs_running) ? in btrfs_swap_activate()
10860 ret = -EINVAL; in btrfs_swap_activate()
10899 unlock_extent(io_tree, 0, isize - 1, &cached_state); in btrfs_swap_activate()
10904 btrfs_drew_write_unlock(&root->snapshot_lock); in btrfs_swap_activate()
10912 sis->bdev = device->bdev; in btrfs_swap_activate()
10913 *span = bsi.highest_ppage - bsi.lowest_ppage + 1; in btrfs_swap_activate()
10914 sis->max = bsi.nr_pages; in btrfs_swap_activate()
10915 sis->pages = bsi.nr_pages - 1; in btrfs_swap_activate()
10916 sis->highest_bit = bsi.nr_pages - 1; in btrfs_swap_activate()
10927 return -EOPNOTSUPP; in btrfs_swap_activate()
10944 spin_lock(&inode->lock); in btrfs_update_inode_bytes()
10946 inode_sub_bytes(&inode->vfs_inode, del_bytes); in btrfs_update_inode_bytes()
10948 inode_add_bytes(&inode->vfs_inode, add_bytes); in btrfs_update_inode_bytes()
10949 spin_unlock(&inode->lock); in btrfs_update_inode_bytes()
10968 struct btrfs_root *root = inode->root; in btrfs_assert_inode_range_clean()
10974 ordered = btrfs_lookup_first_ordered_range(inode, start, end + 1 - start); in btrfs_assert_inode_range_clean()
10976 btrfs_err(root->fs_info, in btrfs_assert_inode_range_clean()
10978 start, end, btrfs_ino(inode), root->root_key.objectid, in btrfs_assert_inode_range_clean()
10979 ordered->file_offset, in btrfs_assert_inode_range_clean()
10980 ordered->file_offset + ordered->num_bytes - 1); in btrfs_assert_inode_range_clean()