1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/delay.h>
20 #include <linux/kthread.h>
21 #include <linux/pagemap.h>
22
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "free-space-cache.h"
26 #include "inode-map.h"
27 #include "transaction.h"
28
caching_kthread(void * data)29 static int caching_kthread(void *data)
30 {
31 struct btrfs_root *root = data;
32 struct btrfs_fs_info *fs_info = root->fs_info;
33 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
34 struct btrfs_key key;
35 struct btrfs_path *path;
36 struct extent_buffer *leaf;
37 u64 last = (u64)-1;
38 int slot;
39 int ret;
40
41 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
42 return 0;
43
44 path = btrfs_alloc_path();
45 if (!path)
46 return -ENOMEM;
47
48 /* Since the commit root is read-only, we can safely skip locking. */
49 path->skip_locking = 1;
50 path->search_commit_root = 1;
51 path->reada = 2;
52
53 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
54 key.offset = 0;
55 key.type = BTRFS_INODE_ITEM_KEY;
56 again:
57 /* need to make sure the commit_root doesn't disappear */
58 down_read(&fs_info->commit_root_sem);
59
60 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
61 if (ret < 0)
62 goto out;
63
64 while (1) {
65 if (btrfs_fs_closing(fs_info))
66 goto out;
67
68 leaf = path->nodes[0];
69 slot = path->slots[0];
70 if (slot >= btrfs_header_nritems(leaf)) {
71 ret = btrfs_next_leaf(root, path);
72 if (ret < 0)
73 goto out;
74 else if (ret > 0)
75 break;
76
77 if (need_resched() ||
78 btrfs_transaction_in_commit(fs_info)) {
79 leaf = path->nodes[0];
80
81 if (WARN_ON(btrfs_header_nritems(leaf) == 0))
82 break;
83
84 /*
85 * Save the key so we can advances forward
86 * in the next search.
87 */
88 btrfs_item_key_to_cpu(leaf, &key, 0);
89 btrfs_release_path(path);
90 root->ino_cache_progress = last;
91 up_read(&fs_info->commit_root_sem);
92 schedule_timeout(1);
93 goto again;
94 } else
95 continue;
96 }
97
98 btrfs_item_key_to_cpu(leaf, &key, slot);
99
100 if (key.type != BTRFS_INODE_ITEM_KEY)
101 goto next;
102
103 if (key.objectid >= root->highest_objectid)
104 break;
105
106 if (last != (u64)-1 && last + 1 != key.objectid) {
107 __btrfs_add_free_space(ctl, last + 1,
108 key.objectid - last - 1);
109 wake_up(&root->ino_cache_wait);
110 }
111
112 last = key.objectid;
113 next:
114 path->slots[0]++;
115 }
116
117 if (last < root->highest_objectid - 1) {
118 __btrfs_add_free_space(ctl, last + 1,
119 root->highest_objectid - last - 1);
120 }
121
122 spin_lock(&root->ino_cache_lock);
123 root->ino_cache_state = BTRFS_CACHE_FINISHED;
124 spin_unlock(&root->ino_cache_lock);
125
126 root->ino_cache_progress = (u64)-1;
127 btrfs_unpin_free_ino(root);
128 out:
129 wake_up(&root->ino_cache_wait);
130 up_read(&fs_info->commit_root_sem);
131
132 btrfs_free_path(path);
133
134 return ret;
135 }
136
start_caching(struct btrfs_root * root)137 static void start_caching(struct btrfs_root *root)
138 {
139 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
140 struct task_struct *tsk;
141 int ret;
142 u64 objectid;
143
144 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
145 return;
146
147 spin_lock(&root->ino_cache_lock);
148 if (root->ino_cache_state != BTRFS_CACHE_NO) {
149 spin_unlock(&root->ino_cache_lock);
150 return;
151 }
152
153 root->ino_cache_state = BTRFS_CACHE_STARTED;
154 spin_unlock(&root->ino_cache_lock);
155
156 ret = load_free_ino_cache(root->fs_info, root);
157 if (ret == 1) {
158 spin_lock(&root->ino_cache_lock);
159 root->ino_cache_state = BTRFS_CACHE_FINISHED;
160 spin_unlock(&root->ino_cache_lock);
161 wake_up(&root->ino_cache_wait);
162 return;
163 }
164
165 /*
166 * It can be quite time-consuming to fill the cache by searching
167 * through the extent tree, and this can keep ino allocation path
168 * waiting. Therefore at start we quickly find out the highest
169 * inode number and we know we can use inode numbers which fall in
170 * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
171 */
172 ret = btrfs_find_free_objectid(root, &objectid);
173 if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
174 __btrfs_add_free_space(ctl, objectid,
175 BTRFS_LAST_FREE_OBJECTID - objectid + 1);
176 }
177
178 tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
179 root->root_key.objectid);
180 if (IS_ERR(tsk)) {
181 btrfs_warn(root->fs_info, "failed to start inode caching task");
182 btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
183 "disabling inode map caching");
184 }
185 }
186
btrfs_find_free_ino(struct btrfs_root * root,u64 * objectid)187 int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
188 {
189 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
190 return btrfs_find_free_objectid(root, objectid);
191
192 again:
193 *objectid = btrfs_find_ino_for_alloc(root);
194
195 if (*objectid != 0)
196 return 0;
197
198 start_caching(root);
199
200 wait_event(root->ino_cache_wait,
201 root->ino_cache_state == BTRFS_CACHE_FINISHED ||
202 root->free_ino_ctl->free_space > 0);
203
204 if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
205 root->free_ino_ctl->free_space == 0)
206 return -ENOSPC;
207 else
208 goto again;
209 }
210
btrfs_return_ino(struct btrfs_root * root,u64 objectid)211 void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
212 {
213 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
214
215 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
216 return;
217 again:
218 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
219 __btrfs_add_free_space(pinned, objectid, 1);
220 } else {
221 down_write(&root->fs_info->commit_root_sem);
222 spin_lock(&root->ino_cache_lock);
223 if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
224 spin_unlock(&root->ino_cache_lock);
225 up_write(&root->fs_info->commit_root_sem);
226 goto again;
227 }
228 spin_unlock(&root->ino_cache_lock);
229
230 start_caching(root);
231
232 __btrfs_add_free_space(pinned, objectid, 1);
233
234 up_write(&root->fs_info->commit_root_sem);
235 }
236 }
237
238 /*
239 * When a transaction is committed, we'll move those inode numbers which are
240 * smaller than root->ino_cache_progress from pinned tree to free_ino tree, and
241 * others will just be dropped, because the commit root we were searching has
242 * changed.
243 *
244 * Must be called with root->fs_info->commit_root_sem held
245 */
btrfs_unpin_free_ino(struct btrfs_root * root)246 void btrfs_unpin_free_ino(struct btrfs_root *root)
247 {
248 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
249 struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
250 spinlock_t *rbroot_lock = &root->free_ino_pinned->tree_lock;
251 struct btrfs_free_space *info;
252 struct rb_node *n;
253 u64 count;
254
255 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
256 return;
257
258 while (1) {
259 bool add_to_ctl = true;
260
261 spin_lock(rbroot_lock);
262 n = rb_first(rbroot);
263 if (!n) {
264 spin_unlock(rbroot_lock);
265 break;
266 }
267
268 info = rb_entry(n, struct btrfs_free_space, offset_index);
269 BUG_ON(info->bitmap); /* Logic error */
270
271 if (info->offset > root->ino_cache_progress)
272 add_to_ctl = false;
273 else if (info->offset + info->bytes > root->ino_cache_progress)
274 count = root->ino_cache_progress - info->offset + 1;
275 else
276 count = info->bytes;
277
278 rb_erase(&info->offset_index, rbroot);
279 spin_unlock(rbroot_lock);
280 if (add_to_ctl)
281 __btrfs_add_free_space(ctl, info->offset, count);
282 kmem_cache_free(btrfs_free_space_cachep, info);
283 }
284 }
285
286 #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
287 #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
288
289 /*
290 * The goal is to keep the memory used by the free_ino tree won't
291 * exceed the memory if we use bitmaps only.
292 */
recalculate_thresholds(struct btrfs_free_space_ctl * ctl)293 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
294 {
295 struct btrfs_free_space *info;
296 struct rb_node *n;
297 int max_ino;
298 int max_bitmaps;
299
300 n = rb_last(&ctl->free_space_offset);
301 if (!n) {
302 ctl->extents_thresh = INIT_THRESHOLD;
303 return;
304 }
305 info = rb_entry(n, struct btrfs_free_space, offset_index);
306
307 /*
308 * Find the maximum inode number in the filesystem. Note we
309 * ignore the fact that this can be a bitmap, because we are
310 * not doing precise calculation.
311 */
312 max_ino = info->bytes - 1;
313
314 max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
315 if (max_bitmaps <= ctl->total_bitmaps) {
316 ctl->extents_thresh = 0;
317 return;
318 }
319
320 ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
321 PAGE_CACHE_SIZE / sizeof(*info);
322 }
323
324 /*
325 * We don't fall back to bitmap, if we are below the extents threshold
326 * or this chunk of inode numbers is a big one.
327 */
use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)328 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
329 struct btrfs_free_space *info)
330 {
331 if (ctl->free_extents < ctl->extents_thresh ||
332 info->bytes > INODES_PER_BITMAP / 10)
333 return false;
334
335 return true;
336 }
337
338 static struct btrfs_free_space_op free_ino_op = {
339 .recalc_thresholds = recalculate_thresholds,
340 .use_bitmap = use_bitmap,
341 };
342
pinned_recalc_thresholds(struct btrfs_free_space_ctl * ctl)343 static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
344 {
345 }
346
pinned_use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)347 static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
348 struct btrfs_free_space *info)
349 {
350 /*
351 * We always use extents for two reasons:
352 *
353 * - The pinned tree is only used during the process of caching
354 * work.
355 * - Make code simpler. See btrfs_unpin_free_ino().
356 */
357 return false;
358 }
359
360 static struct btrfs_free_space_op pinned_free_ino_op = {
361 .recalc_thresholds = pinned_recalc_thresholds,
362 .use_bitmap = pinned_use_bitmap,
363 };
364
btrfs_init_free_ino_ctl(struct btrfs_root * root)365 void btrfs_init_free_ino_ctl(struct btrfs_root *root)
366 {
367 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
368 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
369
370 spin_lock_init(&ctl->tree_lock);
371 ctl->unit = 1;
372 ctl->start = 0;
373 ctl->private = NULL;
374 ctl->op = &free_ino_op;
375 INIT_LIST_HEAD(&ctl->trimming_ranges);
376 mutex_init(&ctl->cache_writeout_mutex);
377
378 /*
379 * Initially we allow to use 16K of ram to cache chunks of
380 * inode numbers before we resort to bitmaps. This is somewhat
381 * arbitrary, but it will be adjusted in runtime.
382 */
383 ctl->extents_thresh = INIT_THRESHOLD;
384
385 spin_lock_init(&pinned->tree_lock);
386 pinned->unit = 1;
387 pinned->start = 0;
388 pinned->private = NULL;
389 pinned->extents_thresh = 0;
390 pinned->op = &pinned_free_ino_op;
391 }
392
btrfs_save_ino_cache(struct btrfs_root * root,struct btrfs_trans_handle * trans)393 int btrfs_save_ino_cache(struct btrfs_root *root,
394 struct btrfs_trans_handle *trans)
395 {
396 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
397 struct btrfs_path *path;
398 struct inode *inode;
399 struct btrfs_block_rsv *rsv;
400 u64 num_bytes;
401 u64 alloc_hint = 0;
402 int ret;
403 int prealloc;
404 bool retry = false;
405
406 /* only fs tree and subvol/snap needs ino cache */
407 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
408 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
409 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
410 return 0;
411
412 /* Don't save inode cache if we are deleting this root */
413 if (btrfs_root_refs(&root->root_item) == 0)
414 return 0;
415
416 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
417 return 0;
418
419 path = btrfs_alloc_path();
420 if (!path)
421 return -ENOMEM;
422
423 rsv = trans->block_rsv;
424 trans->block_rsv = &root->fs_info->trans_block_rsv;
425
426 num_bytes = trans->bytes_reserved;
427 /*
428 * 1 item for inode item insertion if need
429 * 4 items for inode item update (in the worst case)
430 * 1 items for slack space if we need do truncation
431 * 1 item for free space object
432 * 3 items for pre-allocation
433 */
434 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
435 ret = btrfs_block_rsv_add(root, trans->block_rsv,
436 trans->bytes_reserved,
437 BTRFS_RESERVE_NO_FLUSH);
438 if (ret)
439 goto out;
440 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
441 trans->transid, trans->bytes_reserved, 1);
442 again:
443 inode = lookup_free_ino_inode(root, path);
444 if (IS_ERR(inode) && (PTR_ERR(inode) != -ENOENT || retry)) {
445 ret = PTR_ERR(inode);
446 goto out_release;
447 }
448
449 if (IS_ERR(inode)) {
450 BUG_ON(retry); /* Logic error */
451 retry = true;
452
453 ret = create_free_ino_inode(root, trans, path);
454 if (ret)
455 goto out_release;
456 goto again;
457 }
458
459 BTRFS_I(inode)->generation = 0;
460 ret = btrfs_update_inode(trans, root, inode);
461 if (ret) {
462 btrfs_abort_transaction(trans, root, ret);
463 goto out_put;
464 }
465
466 if (i_size_read(inode) > 0) {
467 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
468 if (ret) {
469 if (ret != -ENOSPC)
470 btrfs_abort_transaction(trans, root, ret);
471 goto out_put;
472 }
473 }
474
475 spin_lock(&root->ino_cache_lock);
476 if (root->ino_cache_state != BTRFS_CACHE_FINISHED) {
477 ret = -1;
478 spin_unlock(&root->ino_cache_lock);
479 goto out_put;
480 }
481 spin_unlock(&root->ino_cache_lock);
482
483 spin_lock(&ctl->tree_lock);
484 prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
485 prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
486 prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
487 spin_unlock(&ctl->tree_lock);
488
489 /* Just to make sure we have enough space */
490 prealloc += 8 * PAGE_CACHE_SIZE;
491
492 ret = btrfs_delalloc_reserve_space(inode, 0, prealloc);
493 if (ret)
494 goto out_put;
495
496 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
497 prealloc, prealloc, &alloc_hint);
498 if (ret) {
499 btrfs_delalloc_release_space(inode, 0, prealloc);
500 goto out_put;
501 }
502 btrfs_free_reserved_data_space(inode, 0, prealloc);
503
504 ret = btrfs_write_out_ino_cache(root, trans, path, inode);
505 out_put:
506 iput(inode);
507 out_release:
508 trace_btrfs_space_reservation(root->fs_info, "ino_cache",
509 trans->transid, trans->bytes_reserved, 0);
510 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
511 out:
512 trans->block_rsv = rsv;
513 trans->bytes_reserved = num_bytes;
514
515 btrfs_free_path(path);
516 return ret;
517 }
518
btrfs_find_highest_objectid(struct btrfs_root * root,u64 * objectid)519 int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
520 {
521 struct btrfs_path *path;
522 int ret;
523 struct extent_buffer *l;
524 struct btrfs_key search_key;
525 struct btrfs_key found_key;
526 int slot;
527
528 path = btrfs_alloc_path();
529 if (!path)
530 return -ENOMEM;
531
532 search_key.objectid = BTRFS_LAST_FREE_OBJECTID;
533 search_key.type = -1;
534 search_key.offset = (u64)-1;
535 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
536 if (ret < 0)
537 goto error;
538 BUG_ON(ret == 0); /* Corruption */
539 if (path->slots[0] > 0) {
540 slot = path->slots[0] - 1;
541 l = path->nodes[0];
542 btrfs_item_key_to_cpu(l, &found_key, slot);
543 *objectid = max_t(u64, found_key.objectid,
544 BTRFS_FIRST_FREE_OBJECTID - 1);
545 } else {
546 *objectid = BTRFS_FIRST_FREE_OBJECTID - 1;
547 }
548 ret = 0;
549 error:
550 btrfs_free_path(path);
551 return ret;
552 }
553
btrfs_find_free_objectid(struct btrfs_root * root,u64 * objectid)554 int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
555 {
556 int ret;
557 mutex_lock(&root->objectid_mutex);
558
559 if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
560 ret = -ENOSPC;
561 goto out;
562 }
563
564 *objectid = ++root->highest_objectid;
565 ret = 0;
566 out:
567 mutex_unlock(&root->objectid_mutex);
568 return ret;
569 }
570