• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5 
6 #include <linux/kthread.h>
7 #include <linux/pagemap.h>
8 
9 #include "ctree.h"
10 #include "disk-io.h"
11 #include "free-space-cache.h"
12 #include "inode-map.h"
13 #include "transaction.h"
14 #include "delalloc-space.h"
15 
fail_caching_thread(struct btrfs_root * root)16 static void fail_caching_thread(struct btrfs_root *root)
17 {
18 	struct btrfs_fs_info *fs_info = root->fs_info;
19 
20 	btrfs_warn(fs_info, "failed to start inode caching task");
21 	btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
22 				     "disabling inode map caching");
23 	spin_lock(&root->ino_cache_lock);
24 	root->ino_cache_state = BTRFS_CACHE_ERROR;
25 	spin_unlock(&root->ino_cache_lock);
26 	wake_up(&root->ino_cache_wait);
27 }
28 
caching_kthread(void * data)29 static int caching_kthread(void *data)
30 {
31 	struct btrfs_root *root = data;
32 	struct btrfs_fs_info *fs_info = root->fs_info;
33 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34 	struct btrfs_key key;
35 	struct btrfs_path *path;
36 	struct extent_buffer *leaf;
37 	u64 last = (u64)-1;
38 	int slot;
39 	int ret;
40 
41 	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
42 		return 0;
43 
44 	path = btrfs_alloc_path();
45 	if (!path) {
46 		fail_caching_thread(root);
47 		return -ENOMEM;
48 	}
49 
50 	/* Since the commit root is read-only, we can safely skip locking. */
51 	path->skip_locking = 1;
52 	path->search_commit_root = 1;
53 	path->reada = READA_FORWARD;
54 
55 	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
56 	key.offset = 0;
57 	key.type = BTRFS_INODE_ITEM_KEY;
58 again:
59 	/* need to make sure the commit_root doesn't disappear */
60 	down_read(&fs_info->commit_root_sem);
61 
62 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
63 	if (ret < 0)
64 		goto out;
65 
66 	while (1) {
67 		if (btrfs_fs_closing(fs_info))
68 			goto out;
69 
70 		leaf = path->nodes[0];
71 		slot = path->slots[0];
72 		if (slot >= btrfs_header_nritems(leaf)) {
73 			ret = btrfs_next_leaf(root, path);
74 			if (ret < 0)
75 				goto out;
76 			else if (ret > 0)
77 				break;
78 
79 			if (need_resched() ||
80 			    btrfs_transaction_in_commit(fs_info)) {
81 				leaf = path->nodes[0];
82 
83 				if (WARN_ON(btrfs_header_nritems(leaf) == 0))
84 					break;
85 
86 				/*
87 				 * Save the key so we can advances forward
88 				 * in the next search.
89 				 */
90 				btrfs_item_key_to_cpu(leaf, &key, 0);
91 				btrfs_release_path(path);
92 				root->ino_cache_progress = last;
93 				up_read(&fs_info->commit_root_sem);
94 				schedule_timeout(1);
95 				goto again;
96 			} else
97 				continue;
98 		}
99 
100 		btrfs_item_key_to_cpu(leaf, &key, slot);
101 
102 		if (key.type != BTRFS_INODE_ITEM_KEY)
103 			goto next;
104 
105 		if (key.objectid >= root->highest_objectid)
106 			break;
107 
108 		if (last != (u64)-1 && last + 1 != key.objectid) {
109 			__btrfs_add_free_space(fs_info, ctl, last + 1,
110 					       key.objectid - last - 1, 0);
111 			wake_up(&root->ino_cache_wait);
112 		}
113 
114 		last = key.objectid;
115 next:
116 		path->slots[0]++;
117 	}
118 
119 	if (last < root->highest_objectid - 1) {
120 		__btrfs_add_free_space(fs_info, ctl, last + 1,
121 				       root->highest_objectid - last - 1, 0);
122 	}
123 
124 	spin_lock(&root->ino_cache_lock);
125 	root->ino_cache_state = BTRFS_CACHE_FINISHED;
126 	spin_unlock(&root->ino_cache_lock);
127 
128 	root->ino_cache_progress = (u64)-1;
129 	btrfs_unpin_free_ino(root);
130 out:
131 	wake_up(&root->ino_cache_wait);
132 	up_read(&fs_info->commit_root_sem);
133 
134 	btrfs_free_path(path);
135 
136 	return ret;
137 }
138 
start_caching(struct btrfs_root * root)139 static void start_caching(struct btrfs_root *root)
140 {
141 	struct btrfs_fs_info *fs_info = root->fs_info;
142 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
143 	struct task_struct *tsk;
144 	int ret;
145 	u64 objectid;
146 
147 	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
148 		return;
149 
150 	spin_lock(&root->ino_cache_lock);
151 	if (root->ino_cache_state != BTRFS_CACHE_NO) {
152 		spin_unlock(&root->ino_cache_lock);
153 		return;
154 	}
155 
156 	root->ino_cache_state = BTRFS_CACHE_STARTED;
157 	spin_unlock(&root->ino_cache_lock);
158 
159 	ret = load_free_ino_cache(fs_info, root);
160 	if (ret == 1) {
161 		spin_lock(&root->ino_cache_lock);
162 		root->ino_cache_state = BTRFS_CACHE_FINISHED;
163 		spin_unlock(&root->ino_cache_lock);
164 		wake_up(&root->ino_cache_wait);
165 		return;
166 	}
167 
168 	/*
169 	 * It can be quite time-consuming to fill the cache by searching
170 	 * through the extent tree, and this can keep ino allocation path
171 	 * waiting. Therefore at start we quickly find out the highest
172 	 * inode number and we know we can use inode numbers which fall in
173 	 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
174 	 */
175 	ret = btrfs_find_free_objectid(root, &objectid);
176 	if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
177 		__btrfs_add_free_space(fs_info, ctl, objectid,
178 				       BTRFS_LAST_FREE_OBJECTID - objectid + 1,
179 				       0);
180 		wake_up(&root->ino_cache_wait);
181 	}
182 
183 	tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
184 			  root->root_key.objectid);
185 	if (IS_ERR(tsk))
186 		fail_caching_thread(root);
187 }
188 
btrfs_find_free_ino(struct btrfs_root * root,u64 * objectid)189 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
190 {
191 	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
192 		return btrfs_find_free_objectid(root, objectid);
193 
194 again:
195 	*objectid = btrfs_find_ino_for_alloc(root);
196 
197 	if (*objectid != 0)
198 		return 0;
199 
200 	start_caching(root);
201 
202 	wait_event(root->ino_cache_wait,
203 		   root->ino_cache_state == BTRFS_CACHE_FINISHED ||
204 		   root->ino_cache_state == BTRFS_CACHE_ERROR ||
205 		   root->free_ino_ctl->free_space > 0);
206 
207 	if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
208 	    root->free_ino_ctl->free_space == 0)
209 		return -ENOSPC;
210 	else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
211 		return btrfs_find_free_objectid(root, objectid);
212 	else
213 		goto again;
214 }
215 
btrfs_return_ino(struct btrfs_root * root,u64 objectid)216 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
217 {
218 	struct btrfs_fs_info *fs_info = root->fs_info;
219 	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
220 
221 	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
222 		return;
223 again:
224 	if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
225 		__btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
226 	} else {
227 		down_write(&fs_info->commit_root_sem);
228 		spin_lock(&root->ino_cache_lock);
229 		if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
230 			spin_unlock(&root->ino_cache_lock);
231 			up_write(&fs_info->commit_root_sem);
232 			goto again;
233 		}
234 		spin_unlock(&root->ino_cache_lock);
235 
236 		start_caching(root);
237 
238 		__btrfs_add_free_space(fs_info, pinned, objectid, 1, 0);
239 
240 		up_write(&fs_info->commit_root_sem);
241 	}
242 }
243 
244 /*
245  * When a transaction is committed, we'll move those inode numbers which are
246  * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
247  * others will just be dropped, because the commit root we were searching has
248  * changed.
249  *
250  * Must be called with root->fs_info->commit_root_sem held
251  */
btrfs_unpin_free_ino(struct btrfs_root * root)252 void btrfs_unpin_free_ino(struct btrfs_root *root)
253 {
254 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
255 	struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
256 	spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
257 	struct btrfs_free_space *info;
258 	struct rb_node *n;
259 	u64 count;
260 
261 	if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
262 		return;
263 
264 	while (1) {
265 		spin_lock(rbroot_lock);
266 		n = rb_first(rbroot);
267 		if (!n) {
268 			spin_unlock(rbroot_lock);
269 			break;
270 		}
271 
272 		info = rb_entry(n, struct btrfs_free_space, offset_index);
273 		BUG_ON(info->bitmap); /* Logic error */
274 
275 		if (info->offset > root->ino_cache_progress)
276 			count = 0;
277 		else
278 			count = min(root->ino_cache_progress - info->offset + 1,
279 				    info->bytes);
280 
281 		rb_erase(&info->offset_index, rbroot);
282 		spin_unlock(rbroot_lock);
283 		if (count)
284 			__btrfs_add_free_space(root->fs_info, ctl,
285 					       info->offset, count, 0);
286 		kmem_cache_free(btrfs_free_space_cachep, info);
287 	}
288 }
289 
290 #define INIT_THRESHOLD	((SZ_32K / 2) / sizeof(struct btrfs_free_space))
291 #define INODES_PER_BITMAP (PAGE_SIZE * 8)
292 
293 /*
294  * The goal is to keep the memory used by the free_ino tree won't
295  * exceed the memory if we use bitmaps only.
296  */
recalculate_thresholds(struct btrfs_free_space_ctl * ctl)297 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
298 {
299 	struct btrfs_free_space *info;
300 	struct rb_node *n;
301 	int max_ino;
302 	int max_bitmaps;
303 
304 	n = rb_last(&ctl->free_space_offset);
305 	if (!n) {
306 		ctl->extents_thresh = INIT_THRESHOLD;
307 		return;
308 	}
309 	info = rb_entry(n, struct btrfs_free_space, offset_index);
310 
311 	/*
312 	 * Find the maximum inode number in the filesystem. Note we
313 	 * ignore the fact that this can be a bitmap, because we are
314 	 * not doing precise calculation.
315 	 */
316 	max_ino = info->bytes - 1;
317 
318 	max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
319 	if (max_bitmaps <= ctl->total_bitmaps) {
320 		ctl->extents_thresh = 0;
321 		return;
322 	}
323 
324 	ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
325 				PAGE_SIZE / sizeof(*info);
326 }
327 
328 /*
329  * We don't fall back to bitmap, if we are below the extents threshold
330  * or this chunk of inode numbers is a big one.
331  */
use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)332 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
333 		       struct btrfs_free_space *info)
334 {
335 	if (ctl->free_extents < ctl->extents_thresh ||
336 	    info->bytes > INODES_PER_BITMAP / 10)
337 		return false;
338 
339 	return true;
340 }
341 
342 static const struct btrfs_free_space_op free_ino_op = {
343 	.recalc_thresholds	= recalculate_thresholds,
344 	.use_bitmap		= use_bitmap,
345 };
346 
pinned_recalc_thresholds(struct btrfs_free_space_ctl * ctl)347 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
348 {
349 }
350 
pinned_use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)351 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
352 			      struct btrfs_free_space *info)
353 {
354 	/*
355 	 * We always use extents for two reasons:
356 	 *
357 	 * - The pinned tree is only used during the process of caching
358 	 *   work.
359 	 * - Make code simpler. See btrfs_unpin_free_ino().
360 	 */
361 	return false;
362 }
363 
364 static const struct btrfs_free_space_op pinned_free_ino_op = {
365 	.recalc_thresholds	= pinned_recalc_thresholds,
366 	.use_bitmap		= pinned_use_bitmap,
367 };
368 
btrfs_init_free_ino_ctl(struct btrfs_root * root)369 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
370 {
371 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
372 	struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
373 
374 	spin_lock_init(&ctl->tree_lock);
375 	ctl->unit = 1;
376 	ctl->start = 0;
377 	ctl->private = NULL;
378 	ctl->op = &free_ino_op;
379 	INIT_LIST_HEAD(&ctl->trimming_ranges);
380 	mutex_init(&ctl->cache_writeout_mutex);
381 
382 	/*
383 	 * Initially we allow to use 16K of ram to cache chunks of
384 	 * inode numbers before we resort to bitmaps. This is somewhat
385 	 * arbitrary, but it will be adjusted in runtime.
386 	 */
387 	ctl->extents_thresh = INIT_THRESHOLD;
388 
389 	spin_lock_init(&pinned->tree_lock);
390 	pinned->unit = 1;
391 	pinned->start = 0;
392 	pinned->private = NULL;
393 	pinned->extents_thresh = 0;
394 	pinned->op = &pinned_free_ino_op;
395 }
396 
btrfs_save_ino_cache(struct btrfs_root * root,struct btrfs_trans_handle * trans)397 int btrfs_save_ino_cache(struct btrfs_root *root,
398 			 struct btrfs_trans_handle *trans)
399 {
400 	struct btrfs_fs_info *fs_info = root->fs_info;
401 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
402 	struct btrfs_path *path;
403 	struct inode *inode;
404 	struct btrfs_block_rsv *rsv;
405 	struct extent_changeset *data_reserved = NULL;
406 	u64 num_bytes;
407 	u64 alloc_hint = 0;
408 	int ret;
409 	int prealloc;
410 	bool retry = false;
411 
412 	/* only fs tree and subvol/snap needs ino cache */
413 	if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
414 	    (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
415 	     root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
416 		return 0;
417 
418 	/* Don't save inode cache if we are deleting this root */
419 	if (btrfs_root_refs(&root->root_item) == 0)
420 		return 0;
421 
422 	if (!btrfs_test_opt(fs_info, INODE_MAP_CACHE))
423 		return 0;
424 
425 	path = btrfs_alloc_path();
426 	if (!path)
427 		return -ENOMEM;
428 
429 	rsv = trans->block_rsv;
430 	trans->block_rsv = &fs_info->trans_block_rsv;
431 
432 	num_bytes = trans->bytes_reserved;
433 	/*
434 	 * 1 item for inode item insertion if need
435 	 * 4 items for inode item update (in the worst case)
436 	 * 1 items for slack space if we need do truncation
437 	 * 1 item for free space object
438 	 * 3 items for pre-allocation
439 	 */
440 	trans->bytes_reserved = btrfs_calc_insert_metadata_size(fs_info, 10);
441 	ret = btrfs_block_rsv_add(root, trans->block_rsv,
442 				  trans->bytes_reserved,
443 				  BTRFS_RESERVE_NO_FLUSH);
444 	if (ret)
445 		goto out;
446 	trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
447 				      trans->bytes_reserved, 1);
448 again:
449 	inode = lookup_free_ino_inode(root, path);
450 	if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
451 		ret = PTR_ERR(inode);
452 		goto out_release;
453 	}
454 
455 	if (IS_ERR(inode)) {
456 		BUG_ON(retry); /* Logic error */
457 		retry = true;
458 
459 		ret = create_free_ino_inode(root, trans, path);
460 		if (ret)
461 			goto out_release;
462 		goto again;
463 	}
464 
465 	BTRFS_I(inode)->generation = 0;
466 	ret = btrfs_update_inode(trans, root, inode);
467 	if (ret) {
468 		btrfs_abort_transaction(trans, ret);
469 		goto out_put;
470 	}
471 
472 	if (i_size_read(inode) > 0) {
473 		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
474 		if (ret) {
475 			if (ret != -ENOSPC)
476 				btrfs_abort_transaction(trans, ret);
477 			goto out_put;
478 		}
479 	}
480 
481 	spin_lock(&root->ino_cache_lock);
482 	if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
483 		ret = -1;
484 		spin_unlock(&root->ino_cache_lock);
485 		goto out_put;
486 	}
487 	spin_unlock(&root->ino_cache_lock);
488 
489 	spin_lock(&ctl->tree_lock);
490 	prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
491 	prealloc = ALIGN(prealloc, PAGE_SIZE);
492 	prealloc += ctl->total_bitmaps * PAGE_SIZE;
493 	spin_unlock(&ctl->tree_lock);
494 
495 	/* Just to make sure we have enough space */
496 	prealloc += 8 * PAGE_SIZE;
497 
498 	ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved, 0,
499 					   prealloc);
500 	if (ret)
501 		goto out_put;
502 
503 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
504 					      prealloc, prealloc, &alloc_hint);
505 	if (ret) {
506 		btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
507 		btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
508 		goto out_put;
509 	}
510 
511 	ret = btrfs_write_out_ino_cache(root, trans, path, inode);
512 	btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
513 out_put:
514 	iput(inode);
515 out_release:
516 	trace_btrfs_space_reservation(fs_info, "ino_cache", trans->transid,
517 				      trans->bytes_reserved, 0);
518 	btrfs_block_rsv_release(fs_info, trans->block_rsv,
519 				trans->bytes_reserved, NULL);
520 out:
521 	trans->block_rsv = rsv;
522 	trans->bytes_reserved = num_bytes;
523 
524 	btrfs_free_path(path);
525 	extent_changeset_free(data_reserved);
526 	return ret;
527 }
528