1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 Fujitsu. All rights reserved.
4 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 */
6
7 #include <linux/slab.h>
8 #include <linux/iversion.h>
9 #include "misc.h"
10 #include "delayed-inode.h"
11 #include "disk-io.h"
12 #include "transaction.h"
13 #include "ctree.h"
14 #include "qgroup.h"
15 #include "locking.h"
16 #include "inode-item.h"
17
18 #define BTRFS_DELAYED_WRITEBACK 512
19 #define BTRFS_DELAYED_BACKGROUND 128
20 #define BTRFS_DELAYED_BATCH 16
21
22 static struct kmem_cache *delayed_node_cache;
23
btrfs_delayed_inode_init(void)24 int __init btrfs_delayed_inode_init(void)
25 {
26 delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
27 sizeof(struct btrfs_delayed_node),
28 0,
29 SLAB_MEM_SPREAD,
30 NULL);
31 if (!delayed_node_cache)
32 return -ENOMEM;
33 return 0;
34 }
35
btrfs_delayed_inode_exit(void)36 void __cold btrfs_delayed_inode_exit(void)
37 {
38 kmem_cache_destroy(delayed_node_cache);
39 }
40
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)41 static inline void btrfs_init_delayed_node(
42 struct btrfs_delayed_node *delayed_node,
43 struct btrfs_root *root, u64 inode_id)
44 {
45 delayed_node->root = root;
46 delayed_node->inode_id = inode_id;
47 refcount_set(&delayed_node->refs, 0);
48 delayed_node->ins_root = RB_ROOT_CACHED;
49 delayed_node->del_root = RB_ROOT_CACHED;
50 mutex_init(&delayed_node->mutex);
51 INIT_LIST_HEAD(&delayed_node->n_list);
52 INIT_LIST_HEAD(&delayed_node->p_list);
53 }
54
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)55 static struct btrfs_delayed_node *btrfs_get_delayed_node(
56 struct btrfs_inode *btrfs_inode)
57 {
58 struct btrfs_root *root = btrfs_inode->root;
59 u64 ino = btrfs_ino(btrfs_inode);
60 struct btrfs_delayed_node *node;
61
62 node = READ_ONCE(btrfs_inode->delayed_node);
63 if (node) {
64 refcount_inc(&node->refs);
65 return node;
66 }
67
68 spin_lock(&root->inode_lock);
69 node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
70
71 if (node) {
72 if (btrfs_inode->delayed_node) {
73 refcount_inc(&node->refs); /* can be accessed */
74 BUG_ON(btrfs_inode->delayed_node != node);
75 spin_unlock(&root->inode_lock);
76 return node;
77 }
78
79 /*
80 * It's possible that we're racing into the middle of removing
81 * this node from the radix tree. In this case, the refcount
82 * was zero and it should never go back to one. Just return
83 * NULL like it was never in the radix at all; our release
84 * function is in the process of removing it.
85 *
86 * Some implementations of refcount_inc refuse to bump the
87 * refcount once it has hit zero. If we don't do this dance
88 * here, refcount_inc() may decide to just WARN_ONCE() instead
89 * of actually bumping the refcount.
90 *
91 * If this node is properly in the radix, we want to bump the
92 * refcount twice, once for the inode and once for this get
93 * operation.
94 */
95 if (refcount_inc_not_zero(&node->refs)) {
96 refcount_inc(&node->refs);
97 btrfs_inode->delayed_node = node;
98 } else {
99 node = NULL;
100 }
101
102 spin_unlock(&root->inode_lock);
103 return node;
104 }
105 spin_unlock(&root->inode_lock);
106
107 return NULL;
108 }
109
110 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)111 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
112 struct btrfs_inode *btrfs_inode)
113 {
114 struct btrfs_delayed_node *node;
115 struct btrfs_root *root = btrfs_inode->root;
116 u64 ino = btrfs_ino(btrfs_inode);
117 int ret;
118
119 again:
120 node = btrfs_get_delayed_node(btrfs_inode);
121 if (node)
122 return node;
123
124 node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
125 if (!node)
126 return ERR_PTR(-ENOMEM);
127 btrfs_init_delayed_node(node, root, ino);
128
129 /* cached in the btrfs inode and can be accessed */
130 refcount_set(&node->refs, 2);
131
132 ret = radix_tree_preload(GFP_NOFS);
133 if (ret) {
134 kmem_cache_free(delayed_node_cache, node);
135 return ERR_PTR(ret);
136 }
137
138 spin_lock(&root->inode_lock);
139 ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
140 if (ret == -EEXIST) {
141 spin_unlock(&root->inode_lock);
142 kmem_cache_free(delayed_node_cache, node);
143 radix_tree_preload_end();
144 goto again;
145 }
146 btrfs_inode->delayed_node = node;
147 spin_unlock(&root->inode_lock);
148 radix_tree_preload_end();
149
150 return node;
151 }
152
153 /*
154 * Call it when holding delayed_node->mutex
155 *
156 * If mod = 1, add this node into the prepared list.
157 */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)158 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
159 struct btrfs_delayed_node *node,
160 int mod)
161 {
162 spin_lock(&root->lock);
163 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
164 if (!list_empty(&node->p_list))
165 list_move_tail(&node->p_list, &root->prepare_list);
166 else if (mod)
167 list_add_tail(&node->p_list, &root->prepare_list);
168 } else {
169 list_add_tail(&node->n_list, &root->node_list);
170 list_add_tail(&node->p_list, &root->prepare_list);
171 refcount_inc(&node->refs); /* inserted into list */
172 root->nodes++;
173 set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
174 }
175 spin_unlock(&root->lock);
176 }
177
178 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)179 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
180 struct btrfs_delayed_node *node)
181 {
182 spin_lock(&root->lock);
183 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
184 root->nodes--;
185 refcount_dec(&node->refs); /* not in the list */
186 list_del_init(&node->n_list);
187 if (!list_empty(&node->p_list))
188 list_del_init(&node->p_list);
189 clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
190 }
191 spin_unlock(&root->lock);
192 }
193
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)194 static struct btrfs_delayed_node *btrfs_first_delayed_node(
195 struct btrfs_delayed_root *delayed_root)
196 {
197 struct list_head *p;
198 struct btrfs_delayed_node *node = NULL;
199
200 spin_lock(&delayed_root->lock);
201 if (list_empty(&delayed_root->node_list))
202 goto out;
203
204 p = delayed_root->node_list.next;
205 node = list_entry(p, struct btrfs_delayed_node, n_list);
206 refcount_inc(&node->refs);
207 out:
208 spin_unlock(&delayed_root->lock);
209
210 return node;
211 }
212
btrfs_next_delayed_node(struct btrfs_delayed_node * node)213 static struct btrfs_delayed_node *btrfs_next_delayed_node(
214 struct btrfs_delayed_node *node)
215 {
216 struct btrfs_delayed_root *delayed_root;
217 struct list_head *p;
218 struct btrfs_delayed_node *next = NULL;
219
220 delayed_root = node->root->fs_info->delayed_root;
221 spin_lock(&delayed_root->lock);
222 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
223 /* not in the list */
224 if (list_empty(&delayed_root->node_list))
225 goto out;
226 p = delayed_root->node_list.next;
227 } else if (list_is_last(&node->n_list, &delayed_root->node_list))
228 goto out;
229 else
230 p = node->n_list.next;
231
232 next = list_entry(p, struct btrfs_delayed_node, n_list);
233 refcount_inc(&next->refs);
234 out:
235 spin_unlock(&delayed_root->lock);
236
237 return next;
238 }
239
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)240 static void __btrfs_release_delayed_node(
241 struct btrfs_delayed_node *delayed_node,
242 int mod)
243 {
244 struct btrfs_delayed_root *delayed_root;
245
246 if (!delayed_node)
247 return;
248
249 delayed_root = delayed_node->root->fs_info->delayed_root;
250
251 mutex_lock(&delayed_node->mutex);
252 if (delayed_node->count)
253 btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
254 else
255 btrfs_dequeue_delayed_node(delayed_root, delayed_node);
256 mutex_unlock(&delayed_node->mutex);
257
258 if (refcount_dec_and_test(&delayed_node->refs)) {
259 struct btrfs_root *root = delayed_node->root;
260
261 spin_lock(&root->inode_lock);
262 /*
263 * Once our refcount goes to zero, nobody is allowed to bump it
264 * back up. We can delete it now.
265 */
266 ASSERT(refcount_read(&delayed_node->refs) == 0);
267 radix_tree_delete(&root->delayed_nodes_tree,
268 delayed_node->inode_id);
269 spin_unlock(&root->inode_lock);
270 kmem_cache_free(delayed_node_cache, delayed_node);
271 }
272 }
273
btrfs_release_delayed_node(struct btrfs_delayed_node * node)274 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
275 {
276 __btrfs_release_delayed_node(node, 0);
277 }
278
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)279 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
280 struct btrfs_delayed_root *delayed_root)
281 {
282 struct list_head *p;
283 struct btrfs_delayed_node *node = NULL;
284
285 spin_lock(&delayed_root->lock);
286 if (list_empty(&delayed_root->prepare_list))
287 goto out;
288
289 p = delayed_root->prepare_list.next;
290 list_del_init(p);
291 node = list_entry(p, struct btrfs_delayed_node, p_list);
292 refcount_inc(&node->refs);
293 out:
294 spin_unlock(&delayed_root->lock);
295
296 return node;
297 }
298
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)299 static inline void btrfs_release_prepared_delayed_node(
300 struct btrfs_delayed_node *node)
301 {
302 __btrfs_release_delayed_node(node, 1);
303 }
304
btrfs_alloc_delayed_item(u16 data_len,struct btrfs_delayed_node * node,enum btrfs_delayed_item_type type)305 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
306 struct btrfs_delayed_node *node,
307 enum btrfs_delayed_item_type type)
308 {
309 struct btrfs_delayed_item *item;
310
311 item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
312 if (item) {
313 item->data_len = data_len;
314 item->type = type;
315 item->bytes_reserved = 0;
316 item->delayed_node = node;
317 RB_CLEAR_NODE(&item->rb_node);
318 INIT_LIST_HEAD(&item->log_list);
319 item->logged = false;
320 refcount_set(&item->refs, 1);
321 }
322 return item;
323 }
324
325 /*
326 * __btrfs_lookup_delayed_item - look up the delayed item by key
327 * @delayed_node: pointer to the delayed node
328 * @index: the dir index value to lookup (offset of a dir index key)
329 *
330 * Note: if we don't find the right item, we will return the prev item and
331 * the next item.
332 */
__btrfs_lookup_delayed_item(struct rb_root * root,u64 index)333 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
334 struct rb_root *root,
335 u64 index)
336 {
337 struct rb_node *node = root->rb_node;
338 struct btrfs_delayed_item *delayed_item = NULL;
339
340 while (node) {
341 delayed_item = rb_entry(node, struct btrfs_delayed_item,
342 rb_node);
343 if (delayed_item->index < index)
344 node = node->rb_right;
345 else if (delayed_item->index > index)
346 node = node->rb_left;
347 else
348 return delayed_item;
349 }
350
351 return NULL;
352 }
353
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins)354 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
355 struct btrfs_delayed_item *ins)
356 {
357 struct rb_node **p, *node;
358 struct rb_node *parent_node = NULL;
359 struct rb_root_cached *root;
360 struct btrfs_delayed_item *item;
361 bool leftmost = true;
362
363 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM)
364 root = &delayed_node->ins_root;
365 else
366 root = &delayed_node->del_root;
367
368 p = &root->rb_root.rb_node;
369 node = &ins->rb_node;
370
371 while (*p) {
372 parent_node = *p;
373 item = rb_entry(parent_node, struct btrfs_delayed_item,
374 rb_node);
375
376 if (item->index < ins->index) {
377 p = &(*p)->rb_right;
378 leftmost = false;
379 } else if (item->index > ins->index) {
380 p = &(*p)->rb_left;
381 } else {
382 return -EEXIST;
383 }
384 }
385
386 rb_link_node(node, parent_node, p);
387 rb_insert_color_cached(node, root, leftmost);
388
389 if (ins->type == BTRFS_DELAYED_INSERTION_ITEM &&
390 ins->index >= delayed_node->index_cnt)
391 delayed_node->index_cnt = ins->index + 1;
392
393 delayed_node->count++;
394 atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
395 return 0;
396 }
397
finish_one_item(struct btrfs_delayed_root * delayed_root)398 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
399 {
400 int seq = atomic_inc_return(&delayed_root->items_seq);
401
402 /* atomic_dec_return implies a barrier */
403 if ((atomic_dec_return(&delayed_root->items) <
404 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0))
405 cond_wake_up_nomb(&delayed_root->wait);
406 }
407
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)408 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
409 {
410 struct btrfs_delayed_node *delayed_node = delayed_item->delayed_node;
411 struct rb_root_cached *root;
412 struct btrfs_delayed_root *delayed_root;
413
414 /* Not inserted, ignore it. */
415 if (RB_EMPTY_NODE(&delayed_item->rb_node))
416 return;
417
418 /* If it's in a rbtree, then we need to have delayed node locked. */
419 lockdep_assert_held(&delayed_node->mutex);
420
421 delayed_root = delayed_node->root->fs_info->delayed_root;
422
423 BUG_ON(!delayed_root);
424
425 if (delayed_item->type == BTRFS_DELAYED_INSERTION_ITEM)
426 root = &delayed_node->ins_root;
427 else
428 root = &delayed_node->del_root;
429
430 rb_erase_cached(&delayed_item->rb_node, root);
431 RB_CLEAR_NODE(&delayed_item->rb_node);
432 delayed_node->count--;
433
434 finish_one_item(delayed_root);
435 }
436
btrfs_release_delayed_item(struct btrfs_delayed_item * item)437 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
438 {
439 if (item) {
440 __btrfs_remove_delayed_item(item);
441 if (refcount_dec_and_test(&item->refs))
442 kfree(item);
443 }
444 }
445
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)446 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
447 struct btrfs_delayed_node *delayed_node)
448 {
449 struct rb_node *p;
450 struct btrfs_delayed_item *item = NULL;
451
452 p = rb_first_cached(&delayed_node->ins_root);
453 if (p)
454 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
455
456 return item;
457 }
458
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)459 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
460 struct btrfs_delayed_node *delayed_node)
461 {
462 struct rb_node *p;
463 struct btrfs_delayed_item *item = NULL;
464
465 p = rb_first_cached(&delayed_node->del_root);
466 if (p)
467 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
468
469 return item;
470 }
471
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)472 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
473 struct btrfs_delayed_item *item)
474 {
475 struct rb_node *p;
476 struct btrfs_delayed_item *next = NULL;
477
478 p = rb_next(&item->rb_node);
479 if (p)
480 next = rb_entry(p, struct btrfs_delayed_item, rb_node);
481
482 return next;
483 }
484
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_delayed_item * item)485 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
486 struct btrfs_delayed_item *item)
487 {
488 struct btrfs_block_rsv *src_rsv;
489 struct btrfs_block_rsv *dst_rsv;
490 struct btrfs_fs_info *fs_info = trans->fs_info;
491 u64 num_bytes;
492 int ret;
493
494 if (!trans->bytes_reserved)
495 return 0;
496
497 src_rsv = trans->block_rsv;
498 dst_rsv = &fs_info->delayed_block_rsv;
499
500 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
501
502 /*
503 * Here we migrate space rsv from transaction rsv, since have already
504 * reserved space when starting a transaction. So no need to reserve
505 * qgroup space here.
506 */
507 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
508 if (!ret) {
509 trace_btrfs_space_reservation(fs_info, "delayed_item",
510 item->delayed_node->inode_id,
511 num_bytes, 1);
512 /*
513 * For insertions we track reserved metadata space by accounting
514 * for the number of leaves that will be used, based on the delayed
515 * node's index_items_size field.
516 */
517 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
518 item->bytes_reserved = num_bytes;
519 }
520
521 return ret;
522 }
523
btrfs_delayed_item_release_metadata(struct btrfs_root * root,struct btrfs_delayed_item * item)524 static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
525 struct btrfs_delayed_item *item)
526 {
527 struct btrfs_block_rsv *rsv;
528 struct btrfs_fs_info *fs_info = root->fs_info;
529
530 if (!item->bytes_reserved)
531 return;
532
533 rsv = &fs_info->delayed_block_rsv;
534 /*
535 * Check btrfs_delayed_item_reserve_metadata() to see why we don't need
536 * to release/reserve qgroup space.
537 */
538 trace_btrfs_space_reservation(fs_info, "delayed_item",
539 item->delayed_node->inode_id,
540 item->bytes_reserved, 0);
541 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
542 }
543
btrfs_delayed_item_release_leaves(struct btrfs_delayed_node * node,unsigned int num_leaves)544 static void btrfs_delayed_item_release_leaves(struct btrfs_delayed_node *node,
545 unsigned int num_leaves)
546 {
547 struct btrfs_fs_info *fs_info = node->root->fs_info;
548 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, num_leaves);
549
550 /* There are no space reservations during log replay, bail out. */
551 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
552 return;
553
554 trace_btrfs_space_reservation(fs_info, "delayed_item", node->inode_id,
555 bytes, 0);
556 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv, bytes, NULL);
557 }
558
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_node * node)559 static int btrfs_delayed_inode_reserve_metadata(
560 struct btrfs_trans_handle *trans,
561 struct btrfs_root *root,
562 struct btrfs_delayed_node *node)
563 {
564 struct btrfs_fs_info *fs_info = root->fs_info;
565 struct btrfs_block_rsv *src_rsv;
566 struct btrfs_block_rsv *dst_rsv;
567 u64 num_bytes;
568 int ret;
569
570 src_rsv = trans->block_rsv;
571 dst_rsv = &fs_info->delayed_block_rsv;
572
573 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
574
575 /*
576 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
577 * which doesn't reserve space for speed. This is a problem since we
578 * still need to reserve space for this update, so try to reserve the
579 * space.
580 *
581 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
582 * we always reserve enough to update the inode item.
583 */
584 if (!src_rsv || (!trans->bytes_reserved &&
585 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
586 ret = btrfs_qgroup_reserve_meta(root, num_bytes,
587 BTRFS_QGROUP_RSV_META_PREALLOC, true);
588 if (ret < 0)
589 return ret;
590 ret = btrfs_block_rsv_add(fs_info, dst_rsv, num_bytes,
591 BTRFS_RESERVE_NO_FLUSH);
592 /* NO_FLUSH could only fail with -ENOSPC */
593 ASSERT(ret == 0 || ret == -ENOSPC);
594 if (ret)
595 btrfs_qgroup_free_meta_prealloc(root, num_bytes);
596 } else {
597 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, true);
598 }
599
600 if (!ret) {
601 trace_btrfs_space_reservation(fs_info, "delayed_inode",
602 node->inode_id, num_bytes, 1);
603 node->bytes_reserved = num_bytes;
604 }
605
606 return ret;
607 }
608
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,bool qgroup_free)609 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
610 struct btrfs_delayed_node *node,
611 bool qgroup_free)
612 {
613 struct btrfs_block_rsv *rsv;
614
615 if (!node->bytes_reserved)
616 return;
617
618 rsv = &fs_info->delayed_block_rsv;
619 trace_btrfs_space_reservation(fs_info, "delayed_inode",
620 node->inode_id, node->bytes_reserved, 0);
621 btrfs_block_rsv_release(fs_info, rsv, node->bytes_reserved, NULL);
622 if (qgroup_free)
623 btrfs_qgroup_free_meta_prealloc(node->root,
624 node->bytes_reserved);
625 else
626 btrfs_qgroup_convert_reserved_meta(node->root,
627 node->bytes_reserved);
628 node->bytes_reserved = 0;
629 }
630
631 /*
632 * Insert a single delayed item or a batch of delayed items, as many as possible
633 * that fit in a leaf. The delayed items (dir index keys) are sorted by their key
634 * in the rbtree, and if there's a gap between two consecutive dir index items,
635 * then it means at some point we had delayed dir indexes to add but they got
636 * removed (by btrfs_delete_delayed_dir_index()) before we attempted to flush them
637 * into the subvolume tree. Dir index keys also have their offsets coming from a
638 * monotonically increasing counter, so we can't get new keys with an offset that
639 * fits within a gap between delayed dir index items.
640 */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * first_item)641 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
642 struct btrfs_root *root,
643 struct btrfs_path *path,
644 struct btrfs_delayed_item *first_item)
645 {
646 struct btrfs_fs_info *fs_info = root->fs_info;
647 struct btrfs_delayed_node *node = first_item->delayed_node;
648 LIST_HEAD(item_list);
649 struct btrfs_delayed_item *curr;
650 struct btrfs_delayed_item *next;
651 const int max_size = BTRFS_LEAF_DATA_SIZE(fs_info);
652 struct btrfs_item_batch batch;
653 struct btrfs_key first_key;
654 const u32 first_data_size = first_item->data_len;
655 int total_size;
656 char *ins_data = NULL;
657 int ret;
658 bool continuous_keys_only = false;
659
660 lockdep_assert_held(&node->mutex);
661
662 /*
663 * During normal operation the delayed index offset is continuously
664 * increasing, so we can batch insert all items as there will not be any
665 * overlapping keys in the tree.
666 *
667 * The exception to this is log replay, where we may have interleaved
668 * offsets in the tree, so our batch needs to be continuous keys only in
669 * order to ensure we do not end up with out of order items in our leaf.
670 */
671 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
672 continuous_keys_only = true;
673
674 /*
675 * For delayed items to insert, we track reserved metadata bytes based
676 * on the number of leaves that we will use.
677 * See btrfs_insert_delayed_dir_index() and
678 * btrfs_delayed_item_reserve_metadata()).
679 */
680 ASSERT(first_item->bytes_reserved == 0);
681
682 list_add_tail(&first_item->tree_list, &item_list);
683 batch.total_data_size = first_data_size;
684 batch.nr = 1;
685 total_size = first_data_size + sizeof(struct btrfs_item);
686 curr = first_item;
687
688 while (true) {
689 int next_size;
690
691 next = __btrfs_next_delayed_item(curr);
692 if (!next)
693 break;
694
695 /*
696 * We cannot allow gaps in the key space if we're doing log
697 * replay.
698 */
699 if (continuous_keys_only && (next->index != curr->index + 1))
700 break;
701
702 ASSERT(next->bytes_reserved == 0);
703
704 next_size = next->data_len + sizeof(struct btrfs_item);
705 if (total_size + next_size > max_size)
706 break;
707
708 list_add_tail(&next->tree_list, &item_list);
709 batch.nr++;
710 total_size += next_size;
711 batch.total_data_size += next->data_len;
712 curr = next;
713 }
714
715 if (batch.nr == 1) {
716 first_key.objectid = node->inode_id;
717 first_key.type = BTRFS_DIR_INDEX_KEY;
718 first_key.offset = first_item->index;
719 batch.keys = &first_key;
720 batch.data_sizes = &first_data_size;
721 } else {
722 struct btrfs_key *ins_keys;
723 u32 *ins_sizes;
724 int i = 0;
725
726 ins_data = kmalloc(batch.nr * sizeof(u32) +
727 batch.nr * sizeof(struct btrfs_key), GFP_NOFS);
728 if (!ins_data) {
729 ret = -ENOMEM;
730 goto out;
731 }
732 ins_sizes = (u32 *)ins_data;
733 ins_keys = (struct btrfs_key *)(ins_data + batch.nr * sizeof(u32));
734 batch.keys = ins_keys;
735 batch.data_sizes = ins_sizes;
736 list_for_each_entry(curr, &item_list, tree_list) {
737 ins_keys[i].objectid = node->inode_id;
738 ins_keys[i].type = BTRFS_DIR_INDEX_KEY;
739 ins_keys[i].offset = curr->index;
740 ins_sizes[i] = curr->data_len;
741 i++;
742 }
743 }
744
745 ret = btrfs_insert_empty_items(trans, root, path, &batch);
746 if (ret)
747 goto out;
748
749 list_for_each_entry(curr, &item_list, tree_list) {
750 char *data_ptr;
751
752 data_ptr = btrfs_item_ptr(path->nodes[0], path->slots[0], char);
753 write_extent_buffer(path->nodes[0], &curr->data,
754 (unsigned long)data_ptr, curr->data_len);
755 path->slots[0]++;
756 }
757
758 /*
759 * Now release our path before releasing the delayed items and their
760 * metadata reservations, so that we don't block other tasks for more
761 * time than needed.
762 */
763 btrfs_release_path(path);
764
765 ASSERT(node->index_item_leaves > 0);
766
767 /*
768 * For normal operations we will batch an entire leaf's worth of delayed
769 * items, so if there are more items to process we can decrement
770 * index_item_leaves by 1 as we inserted 1 leaf's worth of items.
771 *
772 * However for log replay we may not have inserted an entire leaf's
773 * worth of items, we may have not had continuous items, so decrementing
774 * here would mess up the index_item_leaves accounting. For this case
775 * only clean up the accounting when there are no items left.
776 */
777 if (next && !continuous_keys_only) {
778 /*
779 * We inserted one batch of items into a leaf a there are more
780 * items to flush in a future batch, now release one unit of
781 * metadata space from the delayed block reserve, corresponding
782 * the leaf we just flushed to.
783 */
784 btrfs_delayed_item_release_leaves(node, 1);
785 node->index_item_leaves--;
786 } else if (!next) {
787 /*
788 * There are no more items to insert. We can have a number of
789 * reserved leaves > 1 here - this happens when many dir index
790 * items are added and then removed before they are flushed (file
791 * names with a very short life, never span a transaction). So
792 * release all remaining leaves.
793 */
794 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
795 node->index_item_leaves = 0;
796 }
797
798 list_for_each_entry_safe(curr, next, &item_list, tree_list) {
799 list_del(&curr->tree_list);
800 btrfs_release_delayed_item(curr);
801 }
802 out:
803 kfree(ins_data);
804 return ret;
805 }
806
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)807 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
808 struct btrfs_path *path,
809 struct btrfs_root *root,
810 struct btrfs_delayed_node *node)
811 {
812 int ret = 0;
813
814 while (ret == 0) {
815 struct btrfs_delayed_item *curr;
816
817 mutex_lock(&node->mutex);
818 curr = __btrfs_first_delayed_insertion_item(node);
819 if (!curr) {
820 mutex_unlock(&node->mutex);
821 break;
822 }
823 ret = btrfs_insert_delayed_item(trans, root, path, curr);
824 mutex_unlock(&node->mutex);
825 }
826
827 return ret;
828 }
829
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)830 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
831 struct btrfs_root *root,
832 struct btrfs_path *path,
833 struct btrfs_delayed_item *item)
834 {
835 const u64 ino = item->delayed_node->inode_id;
836 struct btrfs_fs_info *fs_info = root->fs_info;
837 struct btrfs_delayed_item *curr, *next;
838 struct extent_buffer *leaf = path->nodes[0];
839 LIST_HEAD(batch_list);
840 int nitems, slot, last_slot;
841 int ret;
842 u64 total_reserved_size = item->bytes_reserved;
843
844 ASSERT(leaf != NULL);
845
846 slot = path->slots[0];
847 last_slot = btrfs_header_nritems(leaf) - 1;
848 /*
849 * Our caller always gives us a path pointing to an existing item, so
850 * this can not happen.
851 */
852 ASSERT(slot <= last_slot);
853 if (WARN_ON(slot > last_slot))
854 return -ENOENT;
855
856 nitems = 1;
857 curr = item;
858 list_add_tail(&curr->tree_list, &batch_list);
859
860 /*
861 * Keep checking if the next delayed item matches the next item in the
862 * leaf - if so, we can add it to the batch of items to delete from the
863 * leaf.
864 */
865 while (slot < last_slot) {
866 struct btrfs_key key;
867
868 next = __btrfs_next_delayed_item(curr);
869 if (!next)
870 break;
871
872 slot++;
873 btrfs_item_key_to_cpu(leaf, &key, slot);
874 if (key.objectid != ino ||
875 key.type != BTRFS_DIR_INDEX_KEY ||
876 key.offset != next->index)
877 break;
878 nitems++;
879 curr = next;
880 list_add_tail(&curr->tree_list, &batch_list);
881 total_reserved_size += curr->bytes_reserved;
882 }
883
884 ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
885 if (ret)
886 return ret;
887
888 /* In case of BTRFS_FS_LOG_RECOVERING items won't have reserved space */
889 if (total_reserved_size > 0) {
890 /*
891 * Check btrfs_delayed_item_reserve_metadata() to see why we
892 * don't need to release/reserve qgroup space.
893 */
894 trace_btrfs_space_reservation(fs_info, "delayed_item", ino,
895 total_reserved_size, 0);
896 btrfs_block_rsv_release(fs_info, &fs_info->delayed_block_rsv,
897 total_reserved_size, NULL);
898 }
899
900 list_for_each_entry_safe(curr, next, &batch_list, tree_list) {
901 list_del(&curr->tree_list);
902 btrfs_release_delayed_item(curr);
903 }
904
905 return 0;
906 }
907
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)908 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
909 struct btrfs_path *path,
910 struct btrfs_root *root,
911 struct btrfs_delayed_node *node)
912 {
913 struct btrfs_key key;
914 int ret = 0;
915
916 key.objectid = node->inode_id;
917 key.type = BTRFS_DIR_INDEX_KEY;
918
919 while (ret == 0) {
920 struct btrfs_delayed_item *item;
921
922 mutex_lock(&node->mutex);
923 item = __btrfs_first_delayed_deletion_item(node);
924 if (!item) {
925 mutex_unlock(&node->mutex);
926 break;
927 }
928
929 key.offset = item->index;
930 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
931 if (ret > 0) {
932 /*
933 * There's no matching item in the leaf. This means we
934 * have already deleted this item in a past run of the
935 * delayed items. We ignore errors when running delayed
936 * items from an async context, through a work queue job
937 * running btrfs_async_run_delayed_root(), and don't
938 * release delayed items that failed to complete. This
939 * is because we will retry later, and at transaction
940 * commit time we always run delayed items and will
941 * then deal with errors if they fail to run again.
942 *
943 * So just release delayed items for which we can't find
944 * an item in the tree, and move to the next item.
945 */
946 btrfs_release_path(path);
947 btrfs_release_delayed_item(item);
948 ret = 0;
949 } else if (ret == 0) {
950 ret = btrfs_batch_delete_items(trans, root, path, item);
951 btrfs_release_path(path);
952 }
953
954 /*
955 * We unlock and relock on each iteration, this is to prevent
956 * blocking other tasks for too long while we are being run from
957 * the async context (work queue job). Those tasks are typically
958 * running system calls like creat/mkdir/rename/unlink/etc which
959 * need to add delayed items to this delayed node.
960 */
961 mutex_unlock(&node->mutex);
962 }
963
964 return ret;
965 }
966
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)967 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
968 {
969 struct btrfs_delayed_root *delayed_root;
970
971 if (delayed_node &&
972 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
973 BUG_ON(!delayed_node->root);
974 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
975 delayed_node->count--;
976
977 delayed_root = delayed_node->root->fs_info->delayed_root;
978 finish_one_item(delayed_root);
979 }
980 }
981
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)982 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
983 {
984
985 if (test_and_clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags)) {
986 struct btrfs_delayed_root *delayed_root;
987
988 ASSERT(delayed_node->root);
989 delayed_node->count--;
990
991 delayed_root = delayed_node->root->fs_info->delayed_root;
992 finish_one_item(delayed_root);
993 }
994 }
995
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)996 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
997 struct btrfs_root *root,
998 struct btrfs_path *path,
999 struct btrfs_delayed_node *node)
1000 {
1001 struct btrfs_fs_info *fs_info = root->fs_info;
1002 struct btrfs_key key;
1003 struct btrfs_inode_item *inode_item;
1004 struct extent_buffer *leaf;
1005 int mod;
1006 int ret;
1007
1008 key.objectid = node->inode_id;
1009 key.type = BTRFS_INODE_ITEM_KEY;
1010 key.offset = 0;
1011
1012 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1013 mod = -1;
1014 else
1015 mod = 1;
1016
1017 ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1018 if (ret > 0)
1019 ret = -ENOENT;
1020 if (ret < 0)
1021 goto out;
1022
1023 leaf = path->nodes[0];
1024 inode_item = btrfs_item_ptr(leaf, path->slots[0],
1025 struct btrfs_inode_item);
1026 write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1027 sizeof(struct btrfs_inode_item));
1028 btrfs_mark_buffer_dirty(leaf);
1029
1030 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1031 goto out;
1032
1033 path->slots[0]++;
1034 if (path->slots[0] >= btrfs_header_nritems(leaf))
1035 goto search;
1036 again:
1037 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1038 if (key.objectid != node->inode_id)
1039 goto out;
1040
1041 if (key.type != BTRFS_INODE_REF_KEY &&
1042 key.type != BTRFS_INODE_EXTREF_KEY)
1043 goto out;
1044
1045 /*
1046 * Delayed iref deletion is for the inode who has only one link,
1047 * so there is only one iref. The case that several irefs are
1048 * in the same item doesn't exist.
1049 */
1050 btrfs_del_item(trans, root, path);
1051 out:
1052 btrfs_release_delayed_iref(node);
1053 btrfs_release_path(path);
1054 err_out:
1055 btrfs_delayed_inode_release_metadata(fs_info, node, (ret < 0));
1056 btrfs_release_delayed_inode(node);
1057
1058 /*
1059 * If we fail to update the delayed inode we need to abort the
1060 * transaction, because we could leave the inode with the improper
1061 * counts behind.
1062 */
1063 if (ret && ret != -ENOENT)
1064 btrfs_abort_transaction(trans, ret);
1065
1066 return ret;
1067
1068 search:
1069 btrfs_release_path(path);
1070
1071 key.type = BTRFS_INODE_EXTREF_KEY;
1072 key.offset = -1;
1073
1074 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1075 if (ret < 0)
1076 goto err_out;
1077 ASSERT(ret);
1078
1079 ret = 0;
1080 leaf = path->nodes[0];
1081 path->slots[0]--;
1082 goto again;
1083 }
1084
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1085 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1086 struct btrfs_root *root,
1087 struct btrfs_path *path,
1088 struct btrfs_delayed_node *node)
1089 {
1090 int ret;
1091
1092 mutex_lock(&node->mutex);
1093 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1094 mutex_unlock(&node->mutex);
1095 return 0;
1096 }
1097
1098 ret = __btrfs_update_delayed_inode(trans, root, path, node);
1099 mutex_unlock(&node->mutex);
1100 return ret;
1101 }
1102
1103 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1104 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1105 struct btrfs_path *path,
1106 struct btrfs_delayed_node *node)
1107 {
1108 int ret;
1109
1110 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1111 if (ret)
1112 return ret;
1113
1114 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1115 if (ret)
1116 return ret;
1117
1118 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1119 return ret;
1120 }
1121
1122 /*
1123 * Called when committing the transaction.
1124 * Returns 0 on success.
1125 * Returns < 0 on error and returns with an aborted transaction with any
1126 * outstanding delayed items cleaned up.
1127 */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,int nr)1128 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1129 {
1130 struct btrfs_fs_info *fs_info = trans->fs_info;
1131 struct btrfs_delayed_root *delayed_root;
1132 struct btrfs_delayed_node *curr_node, *prev_node;
1133 struct btrfs_path *path;
1134 struct btrfs_block_rsv *block_rsv;
1135 int ret = 0;
1136 bool count = (nr > 0);
1137
1138 if (TRANS_ABORTED(trans))
1139 return -EIO;
1140
1141 path = btrfs_alloc_path();
1142 if (!path)
1143 return -ENOMEM;
1144
1145 block_rsv = trans->block_rsv;
1146 trans->block_rsv = &fs_info->delayed_block_rsv;
1147
1148 delayed_root = fs_info->delayed_root;
1149
1150 curr_node = btrfs_first_delayed_node(delayed_root);
1151 while (curr_node && (!count || nr--)) {
1152 ret = __btrfs_commit_inode_delayed_items(trans, path,
1153 curr_node);
1154 if (ret) {
1155 btrfs_abort_transaction(trans, ret);
1156 break;
1157 }
1158
1159 prev_node = curr_node;
1160 curr_node = btrfs_next_delayed_node(curr_node);
1161 /*
1162 * See the comment below about releasing path before releasing
1163 * node. If the commit of delayed items was successful the path
1164 * should always be released, but in case of an error, it may
1165 * point to locked extent buffers (a leaf at the very least).
1166 */
1167 ASSERT(path->nodes[0] == NULL);
1168 btrfs_release_delayed_node(prev_node);
1169 }
1170
1171 /*
1172 * Release the path to avoid a potential deadlock and lockdep splat when
1173 * releasing the delayed node, as that requires taking the delayed node's
1174 * mutex. If another task starts running delayed items before we take
1175 * the mutex, it will first lock the mutex and then it may try to lock
1176 * the same btree path (leaf).
1177 */
1178 btrfs_free_path(path);
1179
1180 if (curr_node)
1181 btrfs_release_delayed_node(curr_node);
1182 trans->block_rsv = block_rsv;
1183
1184 return ret;
1185 }
1186
btrfs_run_delayed_items(struct btrfs_trans_handle * trans)1187 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans)
1188 {
1189 return __btrfs_run_delayed_items(trans, -1);
1190 }
1191
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,int nr)1192 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans, int nr)
1193 {
1194 return __btrfs_run_delayed_items(trans, nr);
1195 }
1196
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1197 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1198 struct btrfs_inode *inode)
1199 {
1200 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1201 struct btrfs_path *path;
1202 struct btrfs_block_rsv *block_rsv;
1203 int ret;
1204
1205 if (!delayed_node)
1206 return 0;
1207
1208 mutex_lock(&delayed_node->mutex);
1209 if (!delayed_node->count) {
1210 mutex_unlock(&delayed_node->mutex);
1211 btrfs_release_delayed_node(delayed_node);
1212 return 0;
1213 }
1214 mutex_unlock(&delayed_node->mutex);
1215
1216 path = btrfs_alloc_path();
1217 if (!path) {
1218 btrfs_release_delayed_node(delayed_node);
1219 return -ENOMEM;
1220 }
1221
1222 block_rsv = trans->block_rsv;
1223 trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1224
1225 ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1226
1227 btrfs_release_delayed_node(delayed_node);
1228 btrfs_free_path(path);
1229 trans->block_rsv = block_rsv;
1230
1231 return ret;
1232 }
1233
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1234 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1235 {
1236 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1237 struct btrfs_trans_handle *trans;
1238 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1239 struct btrfs_path *path;
1240 struct btrfs_block_rsv *block_rsv;
1241 int ret;
1242
1243 if (!delayed_node)
1244 return 0;
1245
1246 mutex_lock(&delayed_node->mutex);
1247 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1248 mutex_unlock(&delayed_node->mutex);
1249 btrfs_release_delayed_node(delayed_node);
1250 return 0;
1251 }
1252 mutex_unlock(&delayed_node->mutex);
1253
1254 trans = btrfs_join_transaction(delayed_node->root);
1255 if (IS_ERR(trans)) {
1256 ret = PTR_ERR(trans);
1257 goto out;
1258 }
1259
1260 path = btrfs_alloc_path();
1261 if (!path) {
1262 ret = -ENOMEM;
1263 goto trans_out;
1264 }
1265
1266 block_rsv = trans->block_rsv;
1267 trans->block_rsv = &fs_info->delayed_block_rsv;
1268
1269 mutex_lock(&delayed_node->mutex);
1270 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1271 ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1272 path, delayed_node);
1273 else
1274 ret = 0;
1275 mutex_unlock(&delayed_node->mutex);
1276
1277 btrfs_free_path(path);
1278 trans->block_rsv = block_rsv;
1279 trans_out:
1280 btrfs_end_transaction(trans);
1281 btrfs_btree_balance_dirty(fs_info);
1282 out:
1283 btrfs_release_delayed_node(delayed_node);
1284
1285 return ret;
1286 }
1287
btrfs_remove_delayed_node(struct btrfs_inode * inode)1288 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1289 {
1290 struct btrfs_delayed_node *delayed_node;
1291
1292 delayed_node = READ_ONCE(inode->delayed_node);
1293 if (!delayed_node)
1294 return;
1295
1296 inode->delayed_node = NULL;
1297 btrfs_release_delayed_node(delayed_node);
1298 }
1299
1300 struct btrfs_async_delayed_work {
1301 struct btrfs_delayed_root *delayed_root;
1302 int nr;
1303 struct btrfs_work work;
1304 };
1305
btrfs_async_run_delayed_root(struct btrfs_work * work)1306 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1307 {
1308 struct btrfs_async_delayed_work *async_work;
1309 struct btrfs_delayed_root *delayed_root;
1310 struct btrfs_trans_handle *trans;
1311 struct btrfs_path *path;
1312 struct btrfs_delayed_node *delayed_node = NULL;
1313 struct btrfs_root *root;
1314 struct btrfs_block_rsv *block_rsv;
1315 int total_done = 0;
1316
1317 async_work = container_of(work, struct btrfs_async_delayed_work, work);
1318 delayed_root = async_work->delayed_root;
1319
1320 path = btrfs_alloc_path();
1321 if (!path)
1322 goto out;
1323
1324 do {
1325 if (atomic_read(&delayed_root->items) <
1326 BTRFS_DELAYED_BACKGROUND / 2)
1327 break;
1328
1329 delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1330 if (!delayed_node)
1331 break;
1332
1333 root = delayed_node->root;
1334
1335 trans = btrfs_join_transaction(root);
1336 if (IS_ERR(trans)) {
1337 btrfs_release_path(path);
1338 btrfs_release_prepared_delayed_node(delayed_node);
1339 total_done++;
1340 continue;
1341 }
1342
1343 block_rsv = trans->block_rsv;
1344 trans->block_rsv = &root->fs_info->delayed_block_rsv;
1345
1346 __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1347
1348 trans->block_rsv = block_rsv;
1349 btrfs_end_transaction(trans);
1350 btrfs_btree_balance_dirty_nodelay(root->fs_info);
1351
1352 btrfs_release_path(path);
1353 btrfs_release_prepared_delayed_node(delayed_node);
1354 total_done++;
1355
1356 } while ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK)
1357 || total_done < async_work->nr);
1358
1359 btrfs_free_path(path);
1360 out:
1361 wake_up(&delayed_root->wait);
1362 kfree(async_work);
1363 }
1364
1365
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1366 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1367 struct btrfs_fs_info *fs_info, int nr)
1368 {
1369 struct btrfs_async_delayed_work *async_work;
1370
1371 async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1372 if (!async_work)
1373 return -ENOMEM;
1374
1375 async_work->delayed_root = delayed_root;
1376 btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
1377 NULL);
1378 async_work->nr = nr;
1379
1380 btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1381 return 0;
1382 }
1383
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1384 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1385 {
1386 WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1387 }
1388
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1389 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1390 {
1391 int val = atomic_read(&delayed_root->items_seq);
1392
1393 if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1394 return 1;
1395
1396 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1397 return 1;
1398
1399 return 0;
1400 }
1401
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1402 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1403 {
1404 struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1405
1406 if ((atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) ||
1407 btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1408 return;
1409
1410 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1411 int seq;
1412 int ret;
1413
1414 seq = atomic_read(&delayed_root->items_seq);
1415
1416 ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1417 if (ret)
1418 return;
1419
1420 wait_event_interruptible(delayed_root->wait,
1421 could_end_wait(delayed_root, seq));
1422 return;
1423 }
1424
1425 btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1426 }
1427
btrfs_release_dir_index_item_space(struct btrfs_trans_handle * trans)1428 static void btrfs_release_dir_index_item_space(struct btrfs_trans_handle *trans)
1429 {
1430 struct btrfs_fs_info *fs_info = trans->fs_info;
1431 const u64 bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
1432
1433 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1434 return;
1435
1436 /*
1437 * Adding the new dir index item does not require touching another
1438 * leaf, so we can release 1 unit of metadata that was previously
1439 * reserved when starting the transaction. This applies only to
1440 * the case where we had a transaction start and excludes the
1441 * transaction join case (when replaying log trees).
1442 */
1443 trace_btrfs_space_reservation(fs_info, "transaction",
1444 trans->transid, bytes, 0);
1445 btrfs_block_rsv_release(fs_info, trans->block_rsv, bytes, NULL);
1446 ASSERT(trans->bytes_reserved >= bytes);
1447 trans->bytes_reserved -= bytes;
1448 }
1449
1450 /* Will return 0, -ENOMEM or -EEXIST (index number collision, unexpected). */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 type,u64 index)1451 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1452 const char *name, int name_len,
1453 struct btrfs_inode *dir,
1454 struct btrfs_disk_key *disk_key, u8 type,
1455 u64 index)
1456 {
1457 struct btrfs_fs_info *fs_info = trans->fs_info;
1458 const unsigned int leaf_data_size = BTRFS_LEAF_DATA_SIZE(fs_info);
1459 struct btrfs_delayed_node *delayed_node;
1460 struct btrfs_delayed_item *delayed_item;
1461 struct btrfs_dir_item *dir_item;
1462 bool reserve_leaf_space;
1463 u32 data_len;
1464 int ret;
1465
1466 delayed_node = btrfs_get_or_create_delayed_node(dir);
1467 if (IS_ERR(delayed_node))
1468 return PTR_ERR(delayed_node);
1469
1470 delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len,
1471 delayed_node,
1472 BTRFS_DELAYED_INSERTION_ITEM);
1473 if (!delayed_item) {
1474 ret = -ENOMEM;
1475 goto release_node;
1476 }
1477
1478 delayed_item->index = index;
1479
1480 dir_item = (struct btrfs_dir_item *)delayed_item->data;
1481 dir_item->location = *disk_key;
1482 btrfs_set_stack_dir_transid(dir_item, trans->transid);
1483 btrfs_set_stack_dir_data_len(dir_item, 0);
1484 btrfs_set_stack_dir_name_len(dir_item, name_len);
1485 btrfs_set_stack_dir_type(dir_item, type);
1486 memcpy((char *)(dir_item + 1), name, name_len);
1487
1488 data_len = delayed_item->data_len + sizeof(struct btrfs_item);
1489
1490 mutex_lock(&delayed_node->mutex);
1491
1492 /*
1493 * First attempt to insert the delayed item. This is to make the error
1494 * handling path simpler in case we fail (-EEXIST). There's no risk of
1495 * any other task coming in and running the delayed item before we do
1496 * the metadata space reservation below, because we are holding the
1497 * delayed node's mutex and that mutex must also be locked before the
1498 * node's delayed items can be run.
1499 */
1500 ret = __btrfs_add_delayed_item(delayed_node, delayed_item);
1501 if (unlikely(ret)) {
1502 btrfs_err(trans->fs_info,
1503 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1504 name_len, name, index, btrfs_root_id(delayed_node->root),
1505 delayed_node->inode_id, dir->index_cnt,
1506 delayed_node->index_cnt, ret);
1507 btrfs_release_delayed_item(delayed_item);
1508 btrfs_release_dir_index_item_space(trans);
1509 mutex_unlock(&delayed_node->mutex);
1510 goto release_node;
1511 }
1512
1513 if (delayed_node->index_item_leaves == 0 ||
1514 delayed_node->curr_index_batch_size + data_len > leaf_data_size) {
1515 delayed_node->curr_index_batch_size = data_len;
1516 reserve_leaf_space = true;
1517 } else {
1518 delayed_node->curr_index_batch_size += data_len;
1519 reserve_leaf_space = false;
1520 }
1521
1522 if (reserve_leaf_space) {
1523 ret = btrfs_delayed_item_reserve_metadata(trans, delayed_item);
1524 /*
1525 * Space was reserved for a dir index item insertion when we
1526 * started the transaction, so getting a failure here should be
1527 * impossible.
1528 */
1529 if (WARN_ON(ret)) {
1530 btrfs_release_delayed_item(delayed_item);
1531 mutex_unlock(&delayed_node->mutex);
1532 goto release_node;
1533 }
1534
1535 delayed_node->index_item_leaves++;
1536 } else {
1537 btrfs_release_dir_index_item_space(trans);
1538 }
1539 mutex_unlock(&delayed_node->mutex);
1540
1541 release_node:
1542 btrfs_release_delayed_node(delayed_node);
1543 return ret;
1544 }
1545
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,u64 index)1546 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1547 struct btrfs_delayed_node *node,
1548 u64 index)
1549 {
1550 struct btrfs_delayed_item *item;
1551
1552 mutex_lock(&node->mutex);
1553 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1554 if (!item) {
1555 mutex_unlock(&node->mutex);
1556 return 1;
1557 }
1558
1559 /*
1560 * For delayed items to insert, we track reserved metadata bytes based
1561 * on the number of leaves that we will use.
1562 * See btrfs_insert_delayed_dir_index() and
1563 * btrfs_delayed_item_reserve_metadata()).
1564 */
1565 ASSERT(item->bytes_reserved == 0);
1566 ASSERT(node->index_item_leaves > 0);
1567
1568 /*
1569 * If there's only one leaf reserved, we can decrement this item from the
1570 * current batch, otherwise we can not because we don't know which leaf
1571 * it belongs to. With the current limit on delayed items, we rarely
1572 * accumulate enough dir index items to fill more than one leaf (even
1573 * when using a leaf size of 4K).
1574 */
1575 if (node->index_item_leaves == 1) {
1576 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1577
1578 ASSERT(node->curr_index_batch_size >= data_len);
1579 node->curr_index_batch_size -= data_len;
1580 }
1581
1582 btrfs_release_delayed_item(item);
1583
1584 /* If we now have no more dir index items, we can release all leaves. */
1585 if (RB_EMPTY_ROOT(&node->ins_root.rb_root)) {
1586 btrfs_delayed_item_release_leaves(node, node->index_item_leaves);
1587 node->index_item_leaves = 0;
1588 }
1589
1590 mutex_unlock(&node->mutex);
1591 return 0;
1592 }
1593
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_inode * dir,u64 index)1594 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1595 struct btrfs_inode *dir, u64 index)
1596 {
1597 struct btrfs_delayed_node *node;
1598 struct btrfs_delayed_item *item;
1599 int ret;
1600
1601 node = btrfs_get_or_create_delayed_node(dir);
1602 if (IS_ERR(node))
1603 return PTR_ERR(node);
1604
1605 ret = btrfs_delete_delayed_insertion_item(trans->fs_info, node, index);
1606 if (!ret)
1607 goto end;
1608
1609 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1610 if (!item) {
1611 ret = -ENOMEM;
1612 goto end;
1613 }
1614
1615 item->index = index;
1616
1617 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1618 /*
1619 * we have reserved enough space when we start a new transaction,
1620 * so reserving metadata failure is impossible.
1621 */
1622 if (ret < 0) {
1623 btrfs_err(trans->fs_info,
1624 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1625 btrfs_release_delayed_item(item);
1626 goto end;
1627 }
1628
1629 mutex_lock(&node->mutex);
1630 ret = __btrfs_add_delayed_item(node, item);
1631 if (unlikely(ret)) {
1632 btrfs_err(trans->fs_info,
1633 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1634 index, node->root->root_key.objectid,
1635 node->inode_id, ret);
1636 btrfs_delayed_item_release_metadata(dir->root, item);
1637 btrfs_release_delayed_item(item);
1638 }
1639 mutex_unlock(&node->mutex);
1640 end:
1641 btrfs_release_delayed_node(node);
1642 return ret;
1643 }
1644
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1645 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1646 {
1647 struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1648
1649 if (!delayed_node)
1650 return -ENOENT;
1651
1652 /*
1653 * Since we have held i_mutex of this directory, it is impossible that
1654 * a new directory index is added into the delayed node and index_cnt
1655 * is updated now. So we needn't lock the delayed node.
1656 */
1657 if (!delayed_node->index_cnt) {
1658 btrfs_release_delayed_node(delayed_node);
1659 return -EINVAL;
1660 }
1661
1662 inode->index_cnt = delayed_node->index_cnt;
1663 btrfs_release_delayed_node(delayed_node);
1664 return 0;
1665 }
1666
btrfs_readdir_get_delayed_items(struct inode * inode,u64 last_index,struct list_head * ins_list,struct list_head * del_list)1667 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1668 u64 last_index,
1669 struct list_head *ins_list,
1670 struct list_head *del_list)
1671 {
1672 struct btrfs_delayed_node *delayed_node;
1673 struct btrfs_delayed_item *item;
1674
1675 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1676 if (!delayed_node)
1677 return false;
1678
1679 /*
1680 * We can only do one readdir with delayed items at a time because of
1681 * item->readdir_list.
1682 */
1683 btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
1684 btrfs_inode_lock(inode, 0);
1685
1686 mutex_lock(&delayed_node->mutex);
1687 item = __btrfs_first_delayed_insertion_item(delayed_node);
1688 while (item && item->index <= last_index) {
1689 refcount_inc(&item->refs);
1690 list_add_tail(&item->readdir_list, ins_list);
1691 item = __btrfs_next_delayed_item(item);
1692 }
1693
1694 item = __btrfs_first_delayed_deletion_item(delayed_node);
1695 while (item && item->index <= last_index) {
1696 refcount_inc(&item->refs);
1697 list_add_tail(&item->readdir_list, del_list);
1698 item = __btrfs_next_delayed_item(item);
1699 }
1700 mutex_unlock(&delayed_node->mutex);
1701 /*
1702 * This delayed node is still cached in the btrfs inode, so refs
1703 * must be > 1 now, and we needn't check it is going to be freed
1704 * or not.
1705 *
1706 * Besides that, this function is used to read dir, we do not
1707 * insert/delete delayed items in this period. So we also needn't
1708 * requeue or dequeue this delayed node.
1709 */
1710 refcount_dec(&delayed_node->refs);
1711
1712 return true;
1713 }
1714
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1715 void btrfs_readdir_put_delayed_items(struct inode *inode,
1716 struct list_head *ins_list,
1717 struct list_head *del_list)
1718 {
1719 struct btrfs_delayed_item *curr, *next;
1720
1721 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1722 list_del(&curr->readdir_list);
1723 if (refcount_dec_and_test(&curr->refs))
1724 kfree(curr);
1725 }
1726
1727 list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1728 list_del(&curr->readdir_list);
1729 if (refcount_dec_and_test(&curr->refs))
1730 kfree(curr);
1731 }
1732
1733 /*
1734 * The VFS is going to do up_read(), so we need to downgrade back to a
1735 * read lock.
1736 */
1737 downgrade_write(&inode->i_rwsem);
1738 }
1739
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1740 int btrfs_should_delete_dir_index(struct list_head *del_list,
1741 u64 index)
1742 {
1743 struct btrfs_delayed_item *curr;
1744 int ret = 0;
1745
1746 list_for_each_entry(curr, del_list, readdir_list) {
1747 if (curr->index > index)
1748 break;
1749 if (curr->index == index) {
1750 ret = 1;
1751 break;
1752 }
1753 }
1754 return ret;
1755 }
1756
1757 /*
1758 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1759 *
1760 */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1761 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1762 struct list_head *ins_list)
1763 {
1764 struct btrfs_dir_item *di;
1765 struct btrfs_delayed_item *curr, *next;
1766 struct btrfs_key location;
1767 char *name;
1768 int name_len;
1769 int over = 0;
1770 unsigned char d_type;
1771
1772 if (list_empty(ins_list))
1773 return 0;
1774
1775 /*
1776 * Changing the data of the delayed item is impossible. So
1777 * we needn't lock them. And we have held i_mutex of the
1778 * directory, nobody can delete any directory indexes now.
1779 */
1780 list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1781 list_del(&curr->readdir_list);
1782
1783 if (curr->index < ctx->pos) {
1784 if (refcount_dec_and_test(&curr->refs))
1785 kfree(curr);
1786 continue;
1787 }
1788
1789 ctx->pos = curr->index;
1790
1791 di = (struct btrfs_dir_item *)curr->data;
1792 name = (char *)(di + 1);
1793 name_len = btrfs_stack_dir_name_len(di);
1794
1795 d_type = fs_ftype_to_dtype(di->type);
1796 btrfs_disk_key_to_cpu(&location, &di->location);
1797
1798 over = !dir_emit(ctx, name, name_len,
1799 location.objectid, d_type);
1800
1801 if (refcount_dec_and_test(&curr->refs))
1802 kfree(curr);
1803
1804 if (over)
1805 return 1;
1806 ctx->pos++;
1807 }
1808 return 0;
1809 }
1810
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1811 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1812 struct btrfs_inode_item *inode_item,
1813 struct inode *inode)
1814 {
1815 u64 flags;
1816
1817 btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1818 btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1819 btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1820 btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1821 btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1822 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1823 btrfs_set_stack_inode_generation(inode_item,
1824 BTRFS_I(inode)->generation);
1825 btrfs_set_stack_inode_sequence(inode_item,
1826 inode_peek_iversion(inode));
1827 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1828 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1829 flags = btrfs_inode_combine_flags(BTRFS_I(inode)->flags,
1830 BTRFS_I(inode)->ro_flags);
1831 btrfs_set_stack_inode_flags(inode_item, flags);
1832 btrfs_set_stack_inode_block_group(inode_item, 0);
1833
1834 btrfs_set_stack_timespec_sec(&inode_item->atime,
1835 inode->i_atime.tv_sec);
1836 btrfs_set_stack_timespec_nsec(&inode_item->atime,
1837 inode->i_atime.tv_nsec);
1838
1839 btrfs_set_stack_timespec_sec(&inode_item->mtime,
1840 inode->i_mtime.tv_sec);
1841 btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1842 inode->i_mtime.tv_nsec);
1843
1844 btrfs_set_stack_timespec_sec(&inode_item->ctime,
1845 inode->i_ctime.tv_sec);
1846 btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1847 inode->i_ctime.tv_nsec);
1848
1849 btrfs_set_stack_timespec_sec(&inode_item->otime,
1850 BTRFS_I(inode)->i_otime.tv_sec);
1851 btrfs_set_stack_timespec_nsec(&inode_item->otime,
1852 BTRFS_I(inode)->i_otime.tv_nsec);
1853 }
1854
btrfs_fill_inode(struct inode * inode,u32 * rdev)1855 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1856 {
1857 struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
1858 struct btrfs_delayed_node *delayed_node;
1859 struct btrfs_inode_item *inode_item;
1860
1861 delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1862 if (!delayed_node)
1863 return -ENOENT;
1864
1865 mutex_lock(&delayed_node->mutex);
1866 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1867 mutex_unlock(&delayed_node->mutex);
1868 btrfs_release_delayed_node(delayed_node);
1869 return -ENOENT;
1870 }
1871
1872 inode_item = &delayed_node->inode_item;
1873
1874 i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1875 i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1876 btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1877 btrfs_inode_set_file_extent_range(BTRFS_I(inode), 0,
1878 round_up(i_size_read(inode), fs_info->sectorsize));
1879 inode->i_mode = btrfs_stack_inode_mode(inode_item);
1880 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1881 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1882 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1883 BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1884
1885 inode_set_iversion_queried(inode,
1886 btrfs_stack_inode_sequence(inode_item));
1887 inode->i_rdev = 0;
1888 *rdev = btrfs_stack_inode_rdev(inode_item);
1889 btrfs_inode_split_flags(btrfs_stack_inode_flags(inode_item),
1890 &BTRFS_I(inode)->flags, &BTRFS_I(inode)->ro_flags);
1891
1892 inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1893 inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1894
1895 inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1896 inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1897
1898 inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1899 inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1900
1901 BTRFS_I(inode)->i_otime.tv_sec =
1902 btrfs_stack_timespec_sec(&inode_item->otime);
1903 BTRFS_I(inode)->i_otime.tv_nsec =
1904 btrfs_stack_timespec_nsec(&inode_item->otime);
1905
1906 inode->i_generation = BTRFS_I(inode)->generation;
1907 BTRFS_I(inode)->index_cnt = (u64)-1;
1908
1909 mutex_unlock(&delayed_node->mutex);
1910 btrfs_release_delayed_node(delayed_node);
1911 return 0;
1912 }
1913
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode)1914 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1915 struct btrfs_root *root,
1916 struct btrfs_inode *inode)
1917 {
1918 struct btrfs_delayed_node *delayed_node;
1919 int ret = 0;
1920
1921 delayed_node = btrfs_get_or_create_delayed_node(inode);
1922 if (IS_ERR(delayed_node))
1923 return PTR_ERR(delayed_node);
1924
1925 mutex_lock(&delayed_node->mutex);
1926 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1927 fill_stack_inode_item(trans, &delayed_node->inode_item,
1928 &inode->vfs_inode);
1929 goto release_node;
1930 }
1931
1932 ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
1933 if (ret)
1934 goto release_node;
1935
1936 fill_stack_inode_item(trans, &delayed_node->inode_item, &inode->vfs_inode);
1937 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1938 delayed_node->count++;
1939 atomic_inc(&root->fs_info->delayed_root->items);
1940 release_node:
1941 mutex_unlock(&delayed_node->mutex);
1942 btrfs_release_delayed_node(delayed_node);
1943 return ret;
1944 }
1945
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1946 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1947 {
1948 struct btrfs_fs_info *fs_info = inode->root->fs_info;
1949 struct btrfs_delayed_node *delayed_node;
1950
1951 /*
1952 * we don't do delayed inode updates during log recovery because it
1953 * leads to enospc problems. This means we also can't do
1954 * delayed inode refs
1955 */
1956 if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1957 return -EAGAIN;
1958
1959 delayed_node = btrfs_get_or_create_delayed_node(inode);
1960 if (IS_ERR(delayed_node))
1961 return PTR_ERR(delayed_node);
1962
1963 /*
1964 * We don't reserve space for inode ref deletion is because:
1965 * - We ONLY do async inode ref deletion for the inode who has only
1966 * one link(i_nlink == 1), it means there is only one inode ref.
1967 * And in most case, the inode ref and the inode item are in the
1968 * same leaf, and we will deal with them at the same time.
1969 * Since we are sure we will reserve the space for the inode item,
1970 * it is unnecessary to reserve space for inode ref deletion.
1971 * - If the inode ref and the inode item are not in the same leaf,
1972 * We also needn't worry about enospc problem, because we reserve
1973 * much more space for the inode update than it needs.
1974 * - At the worst, we can steal some space from the global reservation.
1975 * It is very rare.
1976 */
1977 mutex_lock(&delayed_node->mutex);
1978 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1979 goto release_node;
1980
1981 set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1982 delayed_node->count++;
1983 atomic_inc(&fs_info->delayed_root->items);
1984 release_node:
1985 mutex_unlock(&delayed_node->mutex);
1986 btrfs_release_delayed_node(delayed_node);
1987 return 0;
1988 }
1989
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1990 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1991 {
1992 struct btrfs_root *root = delayed_node->root;
1993 struct btrfs_fs_info *fs_info = root->fs_info;
1994 struct btrfs_delayed_item *curr_item, *prev_item;
1995
1996 mutex_lock(&delayed_node->mutex);
1997 curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1998 while (curr_item) {
1999 prev_item = curr_item;
2000 curr_item = __btrfs_next_delayed_item(prev_item);
2001 btrfs_release_delayed_item(prev_item);
2002 }
2003
2004 if (delayed_node->index_item_leaves > 0) {
2005 btrfs_delayed_item_release_leaves(delayed_node,
2006 delayed_node->index_item_leaves);
2007 delayed_node->index_item_leaves = 0;
2008 }
2009
2010 curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
2011 while (curr_item) {
2012 btrfs_delayed_item_release_metadata(root, curr_item);
2013 prev_item = curr_item;
2014 curr_item = __btrfs_next_delayed_item(prev_item);
2015 btrfs_release_delayed_item(prev_item);
2016 }
2017
2018 btrfs_release_delayed_iref(delayed_node);
2019
2020 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
2021 btrfs_delayed_inode_release_metadata(fs_info, delayed_node, false);
2022 btrfs_release_delayed_inode(delayed_node);
2023 }
2024 mutex_unlock(&delayed_node->mutex);
2025 }
2026
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)2027 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
2028 {
2029 struct btrfs_delayed_node *delayed_node;
2030
2031 delayed_node = btrfs_get_delayed_node(inode);
2032 if (!delayed_node)
2033 return;
2034
2035 __btrfs_kill_delayed_node(delayed_node);
2036 btrfs_release_delayed_node(delayed_node);
2037 }
2038
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)2039 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
2040 {
2041 u64 inode_id = 0;
2042 struct btrfs_delayed_node *delayed_nodes[8];
2043 int i, n;
2044
2045 while (1) {
2046 spin_lock(&root->inode_lock);
2047 n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
2048 (void **)delayed_nodes, inode_id,
2049 ARRAY_SIZE(delayed_nodes));
2050 if (!n) {
2051 spin_unlock(&root->inode_lock);
2052 break;
2053 }
2054
2055 inode_id = delayed_nodes[n - 1]->inode_id + 1;
2056 for (i = 0; i < n; i++) {
2057 /*
2058 * Don't increase refs in case the node is dead and
2059 * about to be removed from the tree in the loop below
2060 */
2061 if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
2062 delayed_nodes[i] = NULL;
2063 }
2064 spin_unlock(&root->inode_lock);
2065
2066 for (i = 0; i < n; i++) {
2067 if (!delayed_nodes[i])
2068 continue;
2069 __btrfs_kill_delayed_node(delayed_nodes[i]);
2070 btrfs_release_delayed_node(delayed_nodes[i]);
2071 }
2072 }
2073 }
2074
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)2075 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
2076 {
2077 struct btrfs_delayed_node *curr_node, *prev_node;
2078
2079 curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2080 while (curr_node) {
2081 __btrfs_kill_delayed_node(curr_node);
2082
2083 prev_node = curr_node;
2084 curr_node = btrfs_next_delayed_node(curr_node);
2085 btrfs_release_delayed_node(prev_node);
2086 }
2087 }
2088
btrfs_log_get_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2089 void btrfs_log_get_delayed_items(struct btrfs_inode *inode,
2090 struct list_head *ins_list,
2091 struct list_head *del_list)
2092 {
2093 struct btrfs_delayed_node *node;
2094 struct btrfs_delayed_item *item;
2095
2096 node = btrfs_get_delayed_node(inode);
2097 if (!node)
2098 return;
2099
2100 mutex_lock(&node->mutex);
2101 item = __btrfs_first_delayed_insertion_item(node);
2102 while (item) {
2103 /*
2104 * It's possible that the item is already in a log list. This
2105 * can happen in case two tasks are trying to log the same
2106 * directory. For example if we have tasks A and task B:
2107 *
2108 * Task A collected the delayed items into a log list while
2109 * under the inode's log_mutex (at btrfs_log_inode()), but it
2110 * only releases the items after logging the inodes they point
2111 * to (if they are new inodes), which happens after unlocking
2112 * the log mutex;
2113 *
2114 * Task B enters btrfs_log_inode() and acquires the log_mutex
2115 * of the same directory inode, before task B releases the
2116 * delayed items. This can happen for example when logging some
2117 * inode we need to trigger logging of its parent directory, so
2118 * logging two files that have the same parent directory can
2119 * lead to this.
2120 *
2121 * If this happens, just ignore delayed items already in a log
2122 * list. All the tasks logging the directory are under a log
2123 * transaction and whichever finishes first can not sync the log
2124 * before the other completes and leaves the log transaction.
2125 */
2126 if (!item->logged && list_empty(&item->log_list)) {
2127 refcount_inc(&item->refs);
2128 list_add_tail(&item->log_list, ins_list);
2129 }
2130 item = __btrfs_next_delayed_item(item);
2131 }
2132
2133 item = __btrfs_first_delayed_deletion_item(node);
2134 while (item) {
2135 /* It may be non-empty, for the same reason mentioned above. */
2136 if (!item->logged && list_empty(&item->log_list)) {
2137 refcount_inc(&item->refs);
2138 list_add_tail(&item->log_list, del_list);
2139 }
2140 item = __btrfs_next_delayed_item(item);
2141 }
2142 mutex_unlock(&node->mutex);
2143
2144 /*
2145 * We are called during inode logging, which means the inode is in use
2146 * and can not be evicted before we finish logging the inode. So we never
2147 * have the last reference on the delayed inode.
2148 * Also, we don't use btrfs_release_delayed_node() because that would
2149 * requeue the delayed inode (change its order in the list of prepared
2150 * nodes) and we don't want to do such change because we don't create or
2151 * delete delayed items.
2152 */
2153 ASSERT(refcount_read(&node->refs) > 1);
2154 refcount_dec(&node->refs);
2155 }
2156
btrfs_log_put_delayed_items(struct btrfs_inode * inode,struct list_head * ins_list,struct list_head * del_list)2157 void btrfs_log_put_delayed_items(struct btrfs_inode *inode,
2158 struct list_head *ins_list,
2159 struct list_head *del_list)
2160 {
2161 struct btrfs_delayed_node *node;
2162 struct btrfs_delayed_item *item;
2163 struct btrfs_delayed_item *next;
2164
2165 node = btrfs_get_delayed_node(inode);
2166 if (!node)
2167 return;
2168
2169 mutex_lock(&node->mutex);
2170
2171 list_for_each_entry_safe(item, next, ins_list, log_list) {
2172 item->logged = true;
2173 list_del_init(&item->log_list);
2174 if (refcount_dec_and_test(&item->refs))
2175 kfree(item);
2176 }
2177
2178 list_for_each_entry_safe(item, next, del_list, log_list) {
2179 item->logged = true;
2180 list_del_init(&item->log_list);
2181 if (refcount_dec_and_test(&item->refs))
2182 kfree(item);
2183 }
2184
2185 mutex_unlock(&node->mutex);
2186
2187 /*
2188 * We are called during inode logging, which means the inode is in use
2189 * and can not be evicted before we finish logging the inode. So we never
2190 * have the last reference on the delayed inode.
2191 * Also, we don't use btrfs_release_delayed_node() because that would
2192 * requeue the delayed inode (change its order in the list of prepared
2193 * nodes) and we don't want to do such change because we don't create or
2194 * delete delayed items.
2195 */
2196 ASSERT(refcount_read(&node->refs) > 1);
2197 refcount_dec(&node->refs);
2198 }
2199