• Home
  • Raw
  • Download

Lines Matching full:root

15 static void fail_caching_thread(struct btrfs_root *root)  in fail_caching_thread()  argument
17 struct btrfs_fs_info *fs_info = root->fs_info; in fail_caching_thread()
22 spin_lock(&root->ino_cache_lock); in fail_caching_thread()
23 root->ino_cache_state = BTRFS_CACHE_ERROR; in fail_caching_thread()
24 spin_unlock(&root->ino_cache_lock); in fail_caching_thread()
25 wake_up(&root->ino_cache_wait); in fail_caching_thread()
30 struct btrfs_root *root = data; in caching_kthread() local
31 struct btrfs_fs_info *fs_info = root->fs_info; in caching_kthread()
32 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in caching_kthread()
45 fail_caching_thread(root); in caching_kthread()
49 /* Since the commit root is read-only, we can safely skip locking. */ in caching_kthread()
61 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); in caching_kthread()
72 ret = btrfs_next_leaf(root, path); in caching_kthread()
91 root->ino_cache_progress = last; in caching_kthread()
104 if (key.objectid >= root->highest_objectid) in caching_kthread()
110 wake_up(&root->ino_cache_wait); in caching_kthread()
118 if (last < root->highest_objectid - 1) { in caching_kthread()
120 root->highest_objectid - last - 1); in caching_kthread()
123 spin_lock(&root->ino_cache_lock); in caching_kthread()
124 root->ino_cache_state = BTRFS_CACHE_FINISHED; in caching_kthread()
125 spin_unlock(&root->ino_cache_lock); in caching_kthread()
127 root->ino_cache_progress = (u64)-1; in caching_kthread()
128 btrfs_unpin_free_ino(root); in caching_kthread()
130 wake_up(&root->ino_cache_wait); in caching_kthread()
138 static void start_caching(struct btrfs_root *root) in start_caching() argument
140 struct btrfs_fs_info *fs_info = root->fs_info; in start_caching()
141 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in start_caching()
149 spin_lock(&root->ino_cache_lock); in start_caching()
150 if (root->ino_cache_state != BTRFS_CACHE_NO) { in start_caching()
151 spin_unlock(&root->ino_cache_lock); in start_caching()
155 root->ino_cache_state = BTRFS_CACHE_STARTED; in start_caching()
156 spin_unlock(&root->ino_cache_lock); in start_caching()
158 ret = load_free_ino_cache(fs_info, root); in start_caching()
160 spin_lock(&root->ino_cache_lock); in start_caching()
161 root->ino_cache_state = BTRFS_CACHE_FINISHED; in start_caching()
162 spin_unlock(&root->ino_cache_lock); in start_caching()
163 wake_up(&root->ino_cache_wait); in start_caching()
174 ret = btrfs_find_free_objectid(root, &objectid); in start_caching()
180 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu", in start_caching()
181 root->root_key.objectid); in start_caching()
183 fail_caching_thread(root); in start_caching()
186 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) in btrfs_find_free_ino() argument
188 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) in btrfs_find_free_ino()
189 return btrfs_find_free_objectid(root, objectid); in btrfs_find_free_ino()
192 *objectid = btrfs_find_ino_for_alloc(root); in btrfs_find_free_ino()
197 start_caching(root); in btrfs_find_free_ino()
199 wait_event(root->ino_cache_wait, in btrfs_find_free_ino()
200 root->ino_cache_state == BTRFS_CACHE_FINISHED || in btrfs_find_free_ino()
201 root->ino_cache_state == BTRFS_CACHE_ERROR || in btrfs_find_free_ino()
202 root->free_ino_ctl->free_space > 0); in btrfs_find_free_ino()
204 if (root->ino_cache_state == BTRFS_CACHE_FINISHED && in btrfs_find_free_ino()
205 root->free_ino_ctl->free_space == 0) in btrfs_find_free_ino()
207 else if (root->ino_cache_state == BTRFS_CACHE_ERROR) in btrfs_find_free_ino()
208 return btrfs_find_free_objectid(root, objectid); in btrfs_find_free_ino()
213 void btrfs_return_ino(struct btrfs_root *root, u64 objectid) in btrfs_return_ino() argument
215 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_return_ino()
216 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; in btrfs_return_ino()
221 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { in btrfs_return_ino()
225 spin_lock(&root->ino_cache_lock); in btrfs_return_ino()
226 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) { in btrfs_return_ino()
227 spin_unlock(&root->ino_cache_lock); in btrfs_return_ino()
231 spin_unlock(&root->ino_cache_lock); in btrfs_return_ino()
233 start_caching(root); in btrfs_return_ino()
243 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
244 * others will just be dropped, because the commit root we were searching has
247 * Must be called with root->fs_info->commit_root_sem held
249 void btrfs_unpin_free_ino(struct btrfs_root *root) in btrfs_unpin_free_ino() argument
251 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in btrfs_unpin_free_ino()
252 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; in btrfs_unpin_free_ino()
253 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock; in btrfs_unpin_free_ino()
258 if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE)) in btrfs_unpin_free_ino()
272 if (info->offset > root->ino_cache_progress) in btrfs_unpin_free_ino()
275 count = min(root->ino_cache_progress - info->offset + 1, in btrfs_unpin_free_ino()
281 __btrfs_add_free_space(root->fs_info, ctl, in btrfs_unpin_free_ino()
366 void btrfs_init_free_ino_ctl(struct btrfs_root *root) in btrfs_init_free_ino_ctl() argument
368 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in btrfs_init_free_ino_ctl()
369 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; in btrfs_init_free_ino_ctl()
394 int btrfs_save_ino_cache(struct btrfs_root *root, in btrfs_save_ino_cache() argument
397 struct btrfs_fs_info *fs_info = root->fs_info; in btrfs_save_ino_cache()
398 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; in btrfs_save_ino_cache()
410 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID && in btrfs_save_ino_cache()
411 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID || in btrfs_save_ino_cache()
412 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID)) in btrfs_save_ino_cache()
415 /* Don't save inode cache if we are deleting this root */ in btrfs_save_ino_cache()
416 if (btrfs_root_refs(&root->root_item) == 0) in btrfs_save_ino_cache()
438 ret = btrfs_block_rsv_add(root, trans->block_rsv, in btrfs_save_ino_cache()
446 inode = lookup_free_ino_inode(root, path); in btrfs_save_ino_cache()
456 ret = create_free_ino_inode(root, trans, path); in btrfs_save_ino_cache()
463 ret = btrfs_update_inode(trans, root, inode); in btrfs_save_ino_cache()
478 spin_lock(&root->ino_cache_lock); in btrfs_save_ino_cache()
479 if (root->ino_cache_state != BTRFS_CACHE_FINISHED) { in btrfs_save_ino_cache()
481 spin_unlock(&root->ino_cache_lock); in btrfs_save_ino_cache()
484 spin_unlock(&root->ino_cache_lock); in btrfs_save_ino_cache()
507 ret = btrfs_write_out_ino_cache(root, trans, path, inode); in btrfs_save_ino_cache()
525 int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) in btrfs_find_highest_objectid() argument
541 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); in btrfs_find_highest_objectid()
560 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) in btrfs_find_free_objectid() argument
563 mutex_lock(&root->objectid_mutex); in btrfs_find_free_objectid()
565 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) { in btrfs_find_free_objectid()
566 btrfs_warn(root->fs_info, in btrfs_find_free_objectid()
567 "the objectid of root %llu reaches its highest value", in btrfs_find_free_objectid()
568 root->root_key.objectid); in btrfs_find_free_objectid()
573 *objectid = ++root->highest_objectid; in btrfs_find_free_objectid()
576 mutex_unlock(&root->objectid_mutex); in btrfs_find_free_objectid()