• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2011 Fujitsu.  All rights reserved.
3  * Written by Miao Xie <miaox@cn.fujitsu.com>
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
22 #include "disk-io.h"
23 #include "transaction.h"
24 #include "ctree.h"
25 
26 #define BTRFS_DELAYED_WRITEBACK		512
27 #define BTRFS_DELAYED_BACKGROUND	128
28 #define BTRFS_DELAYED_BATCH		16
29 
30 static struct kmem_cache *delayed_node_cache;
31 
btrfs_delayed_inode_init(void)32 int __init btrfs_delayed_inode_init(void)
33 {
34 	delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
35 					sizeof(struct btrfs_delayed_node),
36 					0,
37 					SLAB_MEM_SPREAD,
38 					NULL);
39 	if (!delayed_node_cache)
40 		return -ENOMEM;
41 	return 0;
42 }
43 
btrfs_delayed_inode_exit(void)44 void btrfs_delayed_inode_exit(void)
45 {
46 	kmem_cache_destroy(delayed_node_cache);
47 }
48 
btrfs_init_delayed_node(struct btrfs_delayed_node * delayed_node,struct btrfs_root * root,u64 inode_id)49 static inline void btrfs_init_delayed_node(
50 				struct btrfs_delayed_node *delayed_node,
51 				struct btrfs_root *root, u64 inode_id)
52 {
53 	delayed_node->root = root;
54 	delayed_node->inode_id = inode_id;
55 	refcount_set(&delayed_node->refs, 0);
56 	delayed_node->ins_root = RB_ROOT;
57 	delayed_node->del_root = RB_ROOT;
58 	mutex_init(&delayed_node->mutex);
59 	INIT_LIST_HEAD(&delayed_node->n_list);
60 	INIT_LIST_HEAD(&delayed_node->p_list);
61 }
62 
btrfs_is_continuous_delayed_item(struct btrfs_delayed_item * item1,struct btrfs_delayed_item * item2)63 static inline int btrfs_is_continuous_delayed_item(
64 					struct btrfs_delayed_item *item1,
65 					struct btrfs_delayed_item *item2)
66 {
67 	if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
68 	    item1->key.objectid == item2->key.objectid &&
69 	    item1->key.type == item2->key.type &&
70 	    item1->key.offset + 1 == item2->key.offset)
71 		return 1;
72 	return 0;
73 }
74 
btrfs_get_delayed_node(struct btrfs_inode * btrfs_inode)75 static struct btrfs_delayed_node *btrfs_get_delayed_node(
76 		struct btrfs_inode *btrfs_inode)
77 {
78 	struct btrfs_root *root = btrfs_inode->root;
79 	u64 ino = btrfs_ino(btrfs_inode);
80 	struct btrfs_delayed_node *node;
81 
82 	node = READ_ONCE(btrfs_inode->delayed_node);
83 	if (node) {
84 		refcount_inc(&node->refs);
85 		return node;
86 	}
87 
88 	spin_lock(&root->inode_lock);
89 	node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
90 
91 	if (node) {
92 		if (btrfs_inode->delayed_node) {
93 			refcount_inc(&node->refs);	/* can be accessed */
94 			BUG_ON(btrfs_inode->delayed_node != node);
95 			spin_unlock(&root->inode_lock);
96 			return node;
97 		}
98 
99 		/*
100 		 * It's possible that we're racing into the middle of removing
101 		 * this node from the radix tree.  In this case, the refcount
102 		 * was zero and it should never go back to one.  Just return
103 		 * NULL like it was never in the radix at all; our release
104 		 * function is in the process of removing it.
105 		 *
106 		 * Some implementations of refcount_inc refuse to bump the
107 		 * refcount once it has hit zero.  If we don't do this dance
108 		 * here, refcount_inc() may decide to just WARN_ONCE() instead
109 		 * of actually bumping the refcount.
110 		 *
111 		 * If this node is properly in the radix, we want to bump the
112 		 * refcount twice, once for the inode and once for this get
113 		 * operation.
114 		 */
115 		if (refcount_inc_not_zero(&node->refs)) {
116 			refcount_inc(&node->refs);
117 			btrfs_inode->delayed_node = node;
118 		} else {
119 			node = NULL;
120 		}
121 
122 		spin_unlock(&root->inode_lock);
123 		return node;
124 	}
125 	spin_unlock(&root->inode_lock);
126 
127 	return NULL;
128 }
129 
130 /* Will return either the node or PTR_ERR(-ENOMEM) */
btrfs_get_or_create_delayed_node(struct btrfs_inode * btrfs_inode)131 static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
132 		struct btrfs_inode *btrfs_inode)
133 {
134 	struct btrfs_delayed_node *node;
135 	struct btrfs_root *root = btrfs_inode->root;
136 	u64 ino = btrfs_ino(btrfs_inode);
137 	int ret;
138 
139 again:
140 	node = btrfs_get_delayed_node(btrfs_inode);
141 	if (node)
142 		return node;
143 
144 	node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
145 	if (!node)
146 		return ERR_PTR(-ENOMEM);
147 	btrfs_init_delayed_node(node, root, ino);
148 
149 	/* cached in the btrfs inode and can be accessed */
150 	refcount_set(&node->refs, 2);
151 
152 	ret = radix_tree_preload(GFP_NOFS);
153 	if (ret) {
154 		kmem_cache_free(delayed_node_cache, node);
155 		return ERR_PTR(ret);
156 	}
157 
158 	spin_lock(&root->inode_lock);
159 	ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
160 	if (ret == -EEXIST) {
161 		spin_unlock(&root->inode_lock);
162 		kmem_cache_free(delayed_node_cache, node);
163 		radix_tree_preload_end();
164 		goto again;
165 	}
166 	btrfs_inode->delayed_node = node;
167 	spin_unlock(&root->inode_lock);
168 	radix_tree_preload_end();
169 
170 	return node;
171 }
172 
173 /*
174  * Call it when holding delayed_node->mutex
175  *
176  * If mod = 1, add this node into the prepared list.
177  */
btrfs_queue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node,int mod)178 static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
179 				     struct btrfs_delayed_node *node,
180 				     int mod)
181 {
182 	spin_lock(&root->lock);
183 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
184 		if (!list_empty(&node->p_list))
185 			list_move_tail(&node->p_list, &root->prepare_list);
186 		else if (mod)
187 			list_add_tail(&node->p_list, &root->prepare_list);
188 	} else {
189 		list_add_tail(&node->n_list, &root->node_list);
190 		list_add_tail(&node->p_list, &root->prepare_list);
191 		refcount_inc(&node->refs);	/* inserted into list */
192 		root->nodes++;
193 		set_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
194 	}
195 	spin_unlock(&root->lock);
196 }
197 
198 /* Call it when holding delayed_node->mutex */
btrfs_dequeue_delayed_node(struct btrfs_delayed_root * root,struct btrfs_delayed_node * node)199 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
200 				       struct btrfs_delayed_node *node)
201 {
202 	spin_lock(&root->lock);
203 	if (test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
204 		root->nodes--;
205 		refcount_dec(&node->refs);	/* not in the list */
206 		list_del_init(&node->n_list);
207 		if (!list_empty(&node->p_list))
208 			list_del_init(&node->p_list);
209 		clear_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags);
210 	}
211 	spin_unlock(&root->lock);
212 }
213 
btrfs_first_delayed_node(struct btrfs_delayed_root * delayed_root)214 static struct btrfs_delayed_node *btrfs_first_delayed_node(
215 			struct btrfs_delayed_root *delayed_root)
216 {
217 	struct list_head *p;
218 	struct btrfs_delayed_node *node = NULL;
219 
220 	spin_lock(&delayed_root->lock);
221 	if (list_empty(&delayed_root->node_list))
222 		goto out;
223 
224 	p = delayed_root->node_list.next;
225 	node = list_entry(p, struct btrfs_delayed_node, n_list);
226 	refcount_inc(&node->refs);
227 out:
228 	spin_unlock(&delayed_root->lock);
229 
230 	return node;
231 }
232 
btrfs_next_delayed_node(struct btrfs_delayed_node * node)233 static struct btrfs_delayed_node *btrfs_next_delayed_node(
234 						struct btrfs_delayed_node *node)
235 {
236 	struct btrfs_delayed_root *delayed_root;
237 	struct list_head *p;
238 	struct btrfs_delayed_node *next = NULL;
239 
240 	delayed_root = node->root->fs_info->delayed_root;
241 	spin_lock(&delayed_root->lock);
242 	if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST, &node->flags)) {
243 		/* not in the list */
244 		if (list_empty(&delayed_root->node_list))
245 			goto out;
246 		p = delayed_root->node_list.next;
247 	} else if (list_is_last(&node->n_list, &delayed_root->node_list))
248 		goto out;
249 	else
250 		p = node->n_list.next;
251 
252 	next = list_entry(p, struct btrfs_delayed_node, n_list);
253 	refcount_inc(&next->refs);
254 out:
255 	spin_unlock(&delayed_root->lock);
256 
257 	return next;
258 }
259 
__btrfs_release_delayed_node(struct btrfs_delayed_node * delayed_node,int mod)260 static void __btrfs_release_delayed_node(
261 				struct btrfs_delayed_node *delayed_node,
262 				int mod)
263 {
264 	struct btrfs_delayed_root *delayed_root;
265 
266 	if (!delayed_node)
267 		return;
268 
269 	delayed_root = delayed_node->root->fs_info->delayed_root;
270 
271 	mutex_lock(&delayed_node->mutex);
272 	if (delayed_node->count)
273 		btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
274 	else
275 		btrfs_dequeue_delayed_node(delayed_root, delayed_node);
276 	mutex_unlock(&delayed_node->mutex);
277 
278 	if (refcount_dec_and_test(&delayed_node->refs)) {
279 		struct btrfs_root *root = delayed_node->root;
280 
281 		spin_lock(&root->inode_lock);
282 		/*
283 		 * Once our refcount goes to zero, nobody is allowed to bump it
284 		 * back up.  We can delete it now.
285 		 */
286 		ASSERT(refcount_read(&delayed_node->refs) == 0);
287 		radix_tree_delete(&root->delayed_nodes_tree,
288 				  delayed_node->inode_id);
289 		spin_unlock(&root->inode_lock);
290 		kmem_cache_free(delayed_node_cache, delayed_node);
291 	}
292 }
293 
btrfs_release_delayed_node(struct btrfs_delayed_node * node)294 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
295 {
296 	__btrfs_release_delayed_node(node, 0);
297 }
298 
btrfs_first_prepared_delayed_node(struct btrfs_delayed_root * delayed_root)299 static struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
300 					struct btrfs_delayed_root *delayed_root)
301 {
302 	struct list_head *p;
303 	struct btrfs_delayed_node *node = NULL;
304 
305 	spin_lock(&delayed_root->lock);
306 	if (list_empty(&delayed_root->prepare_list))
307 		goto out;
308 
309 	p = delayed_root->prepare_list.next;
310 	list_del_init(p);
311 	node = list_entry(p, struct btrfs_delayed_node, p_list);
312 	refcount_inc(&node->refs);
313 out:
314 	spin_unlock(&delayed_root->lock);
315 
316 	return node;
317 }
318 
btrfs_release_prepared_delayed_node(struct btrfs_delayed_node * node)319 static inline void btrfs_release_prepared_delayed_node(
320 					struct btrfs_delayed_node *node)
321 {
322 	__btrfs_release_delayed_node(node, 1);
323 }
324 
btrfs_alloc_delayed_item(u32 data_len)325 static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
326 {
327 	struct btrfs_delayed_item *item;
328 	item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
329 	if (item) {
330 		item->data_len = data_len;
331 		item->ins_or_del = 0;
332 		item->bytes_reserved = 0;
333 		item->delayed_node = NULL;
334 		refcount_set(&item->refs, 1);
335 	}
336 	return item;
337 }
338 
339 /*
340  * __btrfs_lookup_delayed_item - look up the delayed item by key
341  * @delayed_node: pointer to the delayed node
342  * @key:	  the key to look up
343  * @prev:	  used to store the prev item if the right item isn't found
344  * @next:	  used to store the next item if the right item isn't found
345  *
346  * Note: if we don't find the right item, we will return the prev item and
347  * the next item.
348  */
__btrfs_lookup_delayed_item(struct rb_root * root,struct btrfs_key * key,struct btrfs_delayed_item ** prev,struct btrfs_delayed_item ** next)349 static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
350 				struct rb_root *root,
351 				struct btrfs_key *key,
352 				struct btrfs_delayed_item **prev,
353 				struct btrfs_delayed_item **next)
354 {
355 	struct rb_node *node, *prev_node = NULL;
356 	struct btrfs_delayed_item *delayed_item = NULL;
357 	int ret = 0;
358 
359 	node = root->rb_node;
360 
361 	while (node) {
362 		delayed_item = rb_entry(node, struct btrfs_delayed_item,
363 					rb_node);
364 		prev_node = node;
365 		ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
366 		if (ret < 0)
367 			node = node->rb_right;
368 		else if (ret > 0)
369 			node = node->rb_left;
370 		else
371 			return delayed_item;
372 	}
373 
374 	if (prev) {
375 		if (!prev_node)
376 			*prev = NULL;
377 		else if (ret < 0)
378 			*prev = delayed_item;
379 		else if ((node = rb_prev(prev_node)) != NULL) {
380 			*prev = rb_entry(node, struct btrfs_delayed_item,
381 					 rb_node);
382 		} else
383 			*prev = NULL;
384 	}
385 
386 	if (next) {
387 		if (!prev_node)
388 			*next = NULL;
389 		else if (ret > 0)
390 			*next = delayed_item;
391 		else if ((node = rb_next(prev_node)) != NULL) {
392 			*next = rb_entry(node, struct btrfs_delayed_item,
393 					 rb_node);
394 		} else
395 			*next = NULL;
396 	}
397 	return NULL;
398 }
399 
__btrfs_lookup_delayed_insertion_item(struct btrfs_delayed_node * delayed_node,struct btrfs_key * key)400 static struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
401 					struct btrfs_delayed_node *delayed_node,
402 					struct btrfs_key *key)
403 {
404 	return __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
405 					   NULL, NULL);
406 }
407 
__btrfs_add_delayed_item(struct btrfs_delayed_node * delayed_node,struct btrfs_delayed_item * ins,int action)408 static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
409 				    struct btrfs_delayed_item *ins,
410 				    int action)
411 {
412 	struct rb_node **p, *node;
413 	struct rb_node *parent_node = NULL;
414 	struct rb_root *root;
415 	struct btrfs_delayed_item *item;
416 	int cmp;
417 
418 	if (action == BTRFS_DELAYED_INSERTION_ITEM)
419 		root = &delayed_node->ins_root;
420 	else if (action == BTRFS_DELAYED_DELETION_ITEM)
421 		root = &delayed_node->del_root;
422 	else
423 		BUG();
424 	p = &root->rb_node;
425 	node = &ins->rb_node;
426 
427 	while (*p) {
428 		parent_node = *p;
429 		item = rb_entry(parent_node, struct btrfs_delayed_item,
430 				 rb_node);
431 
432 		cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
433 		if (cmp < 0)
434 			p = &(*p)->rb_right;
435 		else if (cmp > 0)
436 			p = &(*p)->rb_left;
437 		else
438 			return -EEXIST;
439 	}
440 
441 	rb_link_node(node, parent_node, p);
442 	rb_insert_color(node, root);
443 	ins->delayed_node = delayed_node;
444 	ins->ins_or_del = action;
445 
446 	if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
447 	    action == BTRFS_DELAYED_INSERTION_ITEM &&
448 	    ins->key.offset >= delayed_node->index_cnt)
449 			delayed_node->index_cnt = ins->key.offset + 1;
450 
451 	delayed_node->count++;
452 	atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
453 	return 0;
454 }
455 
__btrfs_add_delayed_insertion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)456 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
457 					      struct btrfs_delayed_item *item)
458 {
459 	return __btrfs_add_delayed_item(node, item,
460 					BTRFS_DELAYED_INSERTION_ITEM);
461 }
462 
__btrfs_add_delayed_deletion_item(struct btrfs_delayed_node * node,struct btrfs_delayed_item * item)463 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
464 					     struct btrfs_delayed_item *item)
465 {
466 	return __btrfs_add_delayed_item(node, item,
467 					BTRFS_DELAYED_DELETION_ITEM);
468 }
469 
finish_one_item(struct btrfs_delayed_root * delayed_root)470 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
471 {
472 	int seq = atomic_inc_return(&delayed_root->items_seq);
473 
474 	/*
475 	 * atomic_dec_return implies a barrier for waitqueue_active
476 	 */
477 	if ((atomic_dec_return(&delayed_root->items) <
478 	    BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
479 	    waitqueue_active(&delayed_root->wait))
480 		wake_up(&delayed_root->wait);
481 }
482 
__btrfs_remove_delayed_item(struct btrfs_delayed_item * delayed_item)483 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
484 {
485 	struct rb_root *root;
486 	struct btrfs_delayed_root *delayed_root;
487 
488 	delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
489 
490 	BUG_ON(!delayed_root);
491 	BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
492 	       delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
493 
494 	if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
495 		root = &delayed_item->delayed_node->ins_root;
496 	else
497 		root = &delayed_item->delayed_node->del_root;
498 
499 	rb_erase(&delayed_item->rb_node, root);
500 	delayed_item->delayed_node->count--;
501 
502 	finish_one_item(delayed_root);
503 }
504 
btrfs_release_delayed_item(struct btrfs_delayed_item * item)505 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
506 {
507 	if (item) {
508 		__btrfs_remove_delayed_item(item);
509 		if (refcount_dec_and_test(&item->refs))
510 			kfree(item);
511 	}
512 }
513 
__btrfs_first_delayed_insertion_item(struct btrfs_delayed_node * delayed_node)514 static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
515 					struct btrfs_delayed_node *delayed_node)
516 {
517 	struct rb_node *p;
518 	struct btrfs_delayed_item *item = NULL;
519 
520 	p = rb_first(&delayed_node->ins_root);
521 	if (p)
522 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
523 
524 	return item;
525 }
526 
__btrfs_first_delayed_deletion_item(struct btrfs_delayed_node * delayed_node)527 static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
528 					struct btrfs_delayed_node *delayed_node)
529 {
530 	struct rb_node *p;
531 	struct btrfs_delayed_item *item = NULL;
532 
533 	p = rb_first(&delayed_node->del_root);
534 	if (p)
535 		item = rb_entry(p, struct btrfs_delayed_item, rb_node);
536 
537 	return item;
538 }
539 
__btrfs_next_delayed_item(struct btrfs_delayed_item * item)540 static struct btrfs_delayed_item *__btrfs_next_delayed_item(
541 						struct btrfs_delayed_item *item)
542 {
543 	struct rb_node *p;
544 	struct btrfs_delayed_item *next = NULL;
545 
546 	p = rb_next(&item->rb_node);
547 	if (p)
548 		next = rb_entry(p, struct btrfs_delayed_item, rb_node);
549 
550 	return next;
551 }
552 
btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_delayed_item * item)553 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
554 					       struct btrfs_fs_info *fs_info,
555 					       struct btrfs_delayed_item *item)
556 {
557 	struct btrfs_block_rsv *src_rsv;
558 	struct btrfs_block_rsv *dst_rsv;
559 	u64 num_bytes;
560 	int ret;
561 
562 	if (!trans->bytes_reserved)
563 		return 0;
564 
565 	src_rsv = trans->block_rsv;
566 	dst_rsv = &fs_info->delayed_block_rsv;
567 
568 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
569 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
570 	if (!ret) {
571 		trace_btrfs_space_reservation(fs_info, "delayed_item",
572 					      item->key.objectid,
573 					      num_bytes, 1);
574 		item->bytes_reserved = num_bytes;
575 	}
576 
577 	return ret;
578 }
579 
btrfs_delayed_item_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_item * item)580 static void btrfs_delayed_item_release_metadata(struct btrfs_fs_info *fs_info,
581 						struct btrfs_delayed_item *item)
582 {
583 	struct btrfs_block_rsv *rsv;
584 
585 	if (!item->bytes_reserved)
586 		return;
587 
588 	rsv = &fs_info->delayed_block_rsv;
589 	trace_btrfs_space_reservation(fs_info, "delayed_item",
590 				      item->key.objectid, item->bytes_reserved,
591 				      0);
592 	btrfs_block_rsv_release(fs_info, rsv,
593 				item->bytes_reserved);
594 }
595 
btrfs_delayed_inode_reserve_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_inode * inode,struct btrfs_delayed_node * node)596 static int btrfs_delayed_inode_reserve_metadata(
597 					struct btrfs_trans_handle *trans,
598 					struct btrfs_root *root,
599 					struct btrfs_inode *inode,
600 					struct btrfs_delayed_node *node)
601 {
602 	struct btrfs_fs_info *fs_info = root->fs_info;
603 	struct btrfs_block_rsv *src_rsv;
604 	struct btrfs_block_rsv *dst_rsv;
605 	u64 num_bytes;
606 	int ret;
607 	bool release = false;
608 
609 	src_rsv = trans->block_rsv;
610 	dst_rsv = &fs_info->delayed_block_rsv;
611 
612 	num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1);
613 
614 	/*
615 	 * If our block_rsv is the delalloc block reserve then check and see if
616 	 * we have our extra reservation for updating the inode.  If not fall
617 	 * through and try to reserve space quickly.
618 	 *
619 	 * We used to try and steal from the delalloc block rsv or the global
620 	 * reserve, but we'd steal a full reservation, which isn't kind.  We are
621 	 * here through delalloc which means we've likely just cowed down close
622 	 * to the leaf that contains the inode, so we would steal less just
623 	 * doing the fallback inode update, so if we do end up having to steal
624 	 * from the global block rsv we hopefully only steal one or two blocks
625 	 * worth which is less likely to hurt us.
626 	 */
627 	if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
628 		spin_lock(&inode->lock);
629 		if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
630 				       &inode->runtime_flags))
631 			release = true;
632 		else
633 			src_rsv = NULL;
634 		spin_unlock(&inode->lock);
635 	}
636 
637 	/*
638 	 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
639 	 * which doesn't reserve space for speed.  This is a problem since we
640 	 * still need to reserve space for this update, so try to reserve the
641 	 * space.
642 	 *
643 	 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
644 	 * we're accounted for.
645 	 */
646 	if (!src_rsv || (!trans->bytes_reserved &&
647 			 src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
648 		ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
649 					  BTRFS_RESERVE_NO_FLUSH);
650 		/*
651 		 * Since we're under a transaction reserve_metadata_bytes could
652 		 * try to commit the transaction which will make it return
653 		 * EAGAIN to make us stop the transaction we have, so return
654 		 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
655 		 */
656 		if (ret == -EAGAIN)
657 			ret = -ENOSPC;
658 		if (!ret) {
659 			node->bytes_reserved = num_bytes;
660 			trace_btrfs_space_reservation(fs_info,
661 						      "delayed_inode",
662 						      btrfs_ino(inode),
663 						      num_bytes, 1);
664 		}
665 		return ret;
666 	}
667 
668 	ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
669 
670 	/*
671 	 * Migrate only takes a reservation, it doesn't touch the size of the
672 	 * block_rsv.  This is to simplify people who don't normally have things
673 	 * migrated from their block rsv.  If they go to release their
674 	 * reservation, that will decrease the size as well, so if migrate
675 	 * reduced size we'd end up with a negative size.  But for the
676 	 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
677 	 * but we could in fact do this reserve/migrate dance several times
678 	 * between the time we did the original reservation and we'd clean it
679 	 * up.  So to take care of this, release the space for the meta
680 	 * reservation here.  I think it may be time for a documentation page on
681 	 * how block rsvs. work.
682 	 */
683 	if (!ret) {
684 		trace_btrfs_space_reservation(fs_info, "delayed_inode",
685 					      btrfs_ino(inode), num_bytes, 1);
686 		node->bytes_reserved = num_bytes;
687 	}
688 
689 	if (release) {
690 		trace_btrfs_space_reservation(fs_info, "delalloc",
691 					      btrfs_ino(inode), num_bytes, 0);
692 		btrfs_block_rsv_release(fs_info, src_rsv, num_bytes);
693 	}
694 
695 	return ret;
696 }
697 
btrfs_delayed_inode_release_metadata(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node)698 static void btrfs_delayed_inode_release_metadata(struct btrfs_fs_info *fs_info,
699 						struct btrfs_delayed_node *node)
700 {
701 	struct btrfs_block_rsv *rsv;
702 
703 	if (!node->bytes_reserved)
704 		return;
705 
706 	rsv = &fs_info->delayed_block_rsv;
707 	trace_btrfs_space_reservation(fs_info, "delayed_inode",
708 				      node->inode_id, node->bytes_reserved, 0);
709 	btrfs_block_rsv_release(fs_info, rsv,
710 				node->bytes_reserved);
711 	node->bytes_reserved = 0;
712 }
713 
714 /*
715  * This helper will insert some continuous items into the same leaf according
716  * to the free space of the leaf.
717  */
btrfs_batch_insert_items(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)718 static int btrfs_batch_insert_items(struct btrfs_root *root,
719 				    struct btrfs_path *path,
720 				    struct btrfs_delayed_item *item)
721 {
722 	struct btrfs_fs_info *fs_info = root->fs_info;
723 	struct btrfs_delayed_item *curr, *next;
724 	int free_space;
725 	int total_data_size = 0, total_size = 0;
726 	struct extent_buffer *leaf;
727 	char *data_ptr;
728 	struct btrfs_key *keys;
729 	u32 *data_size;
730 	struct list_head head;
731 	int slot;
732 	int nitems;
733 	int i;
734 	int ret = 0;
735 
736 	BUG_ON(!path->nodes[0]);
737 
738 	leaf = path->nodes[0];
739 	free_space = btrfs_leaf_free_space(fs_info, leaf);
740 	INIT_LIST_HEAD(&head);
741 
742 	next = item;
743 	nitems = 0;
744 
745 	/*
746 	 * count the number of the continuous items that we can insert in batch
747 	 */
748 	while (total_size + next->data_len + sizeof(struct btrfs_item) <=
749 	       free_space) {
750 		total_data_size += next->data_len;
751 		total_size += next->data_len + sizeof(struct btrfs_item);
752 		list_add_tail(&next->tree_list, &head);
753 		nitems++;
754 
755 		curr = next;
756 		next = __btrfs_next_delayed_item(curr);
757 		if (!next)
758 			break;
759 
760 		if (!btrfs_is_continuous_delayed_item(curr, next))
761 			break;
762 	}
763 
764 	if (!nitems) {
765 		ret = 0;
766 		goto out;
767 	}
768 
769 	/*
770 	 * we need allocate some memory space, but it might cause the task
771 	 * to sleep, so we set all locked nodes in the path to blocking locks
772 	 * first.
773 	 */
774 	btrfs_set_path_blocking(path);
775 
776 	keys = kmalloc_array(nitems, sizeof(struct btrfs_key), GFP_NOFS);
777 	if (!keys) {
778 		ret = -ENOMEM;
779 		goto out;
780 	}
781 
782 	data_size = kmalloc_array(nitems, sizeof(u32), GFP_NOFS);
783 	if (!data_size) {
784 		ret = -ENOMEM;
785 		goto error;
786 	}
787 
788 	/* get keys of all the delayed items */
789 	i = 0;
790 	list_for_each_entry(next, &head, tree_list) {
791 		keys[i] = next->key;
792 		data_size[i] = next->data_len;
793 		i++;
794 	}
795 
796 	/* reset all the locked nodes in the patch to spinning locks. */
797 	btrfs_clear_path_blocking(path, NULL, 0);
798 
799 	/* insert the keys of the items */
800 	setup_items_for_insert(root, path, keys, data_size,
801 			       total_data_size, total_size, nitems);
802 
803 	/* insert the dir index items */
804 	slot = path->slots[0];
805 	list_for_each_entry_safe(curr, next, &head, tree_list) {
806 		data_ptr = btrfs_item_ptr(leaf, slot, char);
807 		write_extent_buffer(leaf, &curr->data,
808 				    (unsigned long)data_ptr,
809 				    curr->data_len);
810 		slot++;
811 
812 		btrfs_delayed_item_release_metadata(fs_info, curr);
813 
814 		list_del(&curr->tree_list);
815 		btrfs_release_delayed_item(curr);
816 	}
817 
818 error:
819 	kfree(data_size);
820 	kfree(keys);
821 out:
822 	return ret;
823 }
824 
825 /*
826  * This helper can just do simple insertion that needn't extend item for new
827  * data, such as directory name index insertion, inode insertion.
828  */
btrfs_insert_delayed_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * delayed_item)829 static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
830 				     struct btrfs_root *root,
831 				     struct btrfs_path *path,
832 				     struct btrfs_delayed_item *delayed_item)
833 {
834 	struct btrfs_fs_info *fs_info = root->fs_info;
835 	struct extent_buffer *leaf;
836 	char *ptr;
837 	int ret;
838 
839 	ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
840 				      delayed_item->data_len);
841 	if (ret < 0 && ret != -EEXIST)
842 		return ret;
843 
844 	leaf = path->nodes[0];
845 
846 	ptr = btrfs_item_ptr(leaf, path->slots[0], char);
847 
848 	write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
849 			    delayed_item->data_len);
850 	btrfs_mark_buffer_dirty(leaf);
851 
852 	btrfs_delayed_item_release_metadata(fs_info, delayed_item);
853 	return 0;
854 }
855 
856 /*
857  * we insert an item first, then if there are some continuous items, we try
858  * to insert those items into the same leaf.
859  */
btrfs_insert_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)860 static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
861 				      struct btrfs_path *path,
862 				      struct btrfs_root *root,
863 				      struct btrfs_delayed_node *node)
864 {
865 	struct btrfs_delayed_item *curr, *prev;
866 	int ret = 0;
867 
868 do_again:
869 	mutex_lock(&node->mutex);
870 	curr = __btrfs_first_delayed_insertion_item(node);
871 	if (!curr)
872 		goto insert_end;
873 
874 	ret = btrfs_insert_delayed_item(trans, root, path, curr);
875 	if (ret < 0) {
876 		btrfs_release_path(path);
877 		goto insert_end;
878 	}
879 
880 	prev = curr;
881 	curr = __btrfs_next_delayed_item(prev);
882 	if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
883 		/* insert the continuous items into the same leaf */
884 		path->slots[0]++;
885 		btrfs_batch_insert_items(root, path, curr);
886 	}
887 	btrfs_release_delayed_item(prev);
888 	btrfs_mark_buffer_dirty(path->nodes[0]);
889 
890 	btrfs_release_path(path);
891 	mutex_unlock(&node->mutex);
892 	goto do_again;
893 
894 insert_end:
895 	mutex_unlock(&node->mutex);
896 	return ret;
897 }
898 
btrfs_batch_delete_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_item * item)899 static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
900 				    struct btrfs_root *root,
901 				    struct btrfs_path *path,
902 				    struct btrfs_delayed_item *item)
903 {
904 	struct btrfs_fs_info *fs_info = root->fs_info;
905 	struct btrfs_delayed_item *curr, *next;
906 	struct extent_buffer *leaf;
907 	struct btrfs_key key;
908 	struct list_head head;
909 	int nitems, i, last_item;
910 	int ret = 0;
911 
912 	BUG_ON(!path->nodes[0]);
913 
914 	leaf = path->nodes[0];
915 
916 	i = path->slots[0];
917 	last_item = btrfs_header_nritems(leaf) - 1;
918 	if (i > last_item)
919 		return -ENOENT;	/* FIXME: Is errno suitable? */
920 
921 	next = item;
922 	INIT_LIST_HEAD(&head);
923 	btrfs_item_key_to_cpu(leaf, &key, i);
924 	nitems = 0;
925 	/*
926 	 * count the number of the dir index items that we can delete in batch
927 	 */
928 	while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
929 		list_add_tail(&next->tree_list, &head);
930 		nitems++;
931 
932 		curr = next;
933 		next = __btrfs_next_delayed_item(curr);
934 		if (!next)
935 			break;
936 
937 		if (!btrfs_is_continuous_delayed_item(curr, next))
938 			break;
939 
940 		i++;
941 		if (i > last_item)
942 			break;
943 		btrfs_item_key_to_cpu(leaf, &key, i);
944 	}
945 
946 	if (!nitems)
947 		return 0;
948 
949 	ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
950 	if (ret)
951 		goto out;
952 
953 	list_for_each_entry_safe(curr, next, &head, tree_list) {
954 		btrfs_delayed_item_release_metadata(fs_info, curr);
955 		list_del(&curr->tree_list);
956 		btrfs_release_delayed_item(curr);
957 	}
958 
959 out:
960 	return ret;
961 }
962 
btrfs_delete_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_root * root,struct btrfs_delayed_node * node)963 static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
964 				      struct btrfs_path *path,
965 				      struct btrfs_root *root,
966 				      struct btrfs_delayed_node *node)
967 {
968 	struct btrfs_delayed_item *curr, *prev;
969 	int ret = 0;
970 
971 do_again:
972 	mutex_lock(&node->mutex);
973 	curr = __btrfs_first_delayed_deletion_item(node);
974 	if (!curr)
975 		goto delete_fail;
976 
977 	ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
978 	if (ret < 0)
979 		goto delete_fail;
980 	else if (ret > 0) {
981 		/*
982 		 * can't find the item which the node points to, so this node
983 		 * is invalid, just drop it.
984 		 */
985 		prev = curr;
986 		curr = __btrfs_next_delayed_item(prev);
987 		btrfs_release_delayed_item(prev);
988 		ret = 0;
989 		btrfs_release_path(path);
990 		if (curr) {
991 			mutex_unlock(&node->mutex);
992 			goto do_again;
993 		} else
994 			goto delete_fail;
995 	}
996 
997 	btrfs_batch_delete_items(trans, root, path, curr);
998 	btrfs_release_path(path);
999 	mutex_unlock(&node->mutex);
1000 	goto do_again;
1001 
1002 delete_fail:
1003 	btrfs_release_path(path);
1004 	mutex_unlock(&node->mutex);
1005 	return ret;
1006 }
1007 
btrfs_release_delayed_inode(struct btrfs_delayed_node * delayed_node)1008 static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
1009 {
1010 	struct btrfs_delayed_root *delayed_root;
1011 
1012 	if (delayed_node &&
1013 	    test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1014 		BUG_ON(!delayed_node->root);
1015 		clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1016 		delayed_node->count--;
1017 
1018 		delayed_root = delayed_node->root->fs_info->delayed_root;
1019 		finish_one_item(delayed_root);
1020 	}
1021 }
1022 
btrfs_release_delayed_iref(struct btrfs_delayed_node * delayed_node)1023 static void btrfs_release_delayed_iref(struct btrfs_delayed_node *delayed_node)
1024 {
1025 	struct btrfs_delayed_root *delayed_root;
1026 
1027 	ASSERT(delayed_node->root);
1028 	clear_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1029 	delayed_node->count--;
1030 
1031 	delayed_root = delayed_node->root->fs_info->delayed_root;
1032 	finish_one_item(delayed_root);
1033 }
1034 
__btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1035 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1036 					struct btrfs_root *root,
1037 					struct btrfs_path *path,
1038 					struct btrfs_delayed_node *node)
1039 {
1040 	struct btrfs_fs_info *fs_info = root->fs_info;
1041 	struct btrfs_key key;
1042 	struct btrfs_inode_item *inode_item;
1043 	struct extent_buffer *leaf;
1044 	int mod;
1045 	int ret;
1046 
1047 	key.objectid = node->inode_id;
1048 	key.type = BTRFS_INODE_ITEM_KEY;
1049 	key.offset = 0;
1050 
1051 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1052 		mod = -1;
1053 	else
1054 		mod = 1;
1055 
1056 	ret = btrfs_lookup_inode(trans, root, path, &key, mod);
1057 	if (ret > 0) {
1058 		btrfs_release_path(path);
1059 		return -ENOENT;
1060 	} else if (ret < 0) {
1061 		return ret;
1062 	}
1063 
1064 	leaf = path->nodes[0];
1065 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
1066 				    struct btrfs_inode_item);
1067 	write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
1068 			    sizeof(struct btrfs_inode_item));
1069 	btrfs_mark_buffer_dirty(leaf);
1070 
1071 	if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &node->flags))
1072 		goto no_iref;
1073 
1074 	path->slots[0]++;
1075 	if (path->slots[0] >= btrfs_header_nritems(leaf))
1076 		goto search;
1077 again:
1078 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1079 	if (key.objectid != node->inode_id)
1080 		goto out;
1081 
1082 	if (key.type != BTRFS_INODE_REF_KEY &&
1083 	    key.type != BTRFS_INODE_EXTREF_KEY)
1084 		goto out;
1085 
1086 	/*
1087 	 * Delayed iref deletion is for the inode who has only one link,
1088 	 * so there is only one iref. The case that several irefs are
1089 	 * in the same item doesn't exist.
1090 	 */
1091 	btrfs_del_item(trans, root, path);
1092 out:
1093 	btrfs_release_delayed_iref(node);
1094 no_iref:
1095 	btrfs_release_path(path);
1096 err_out:
1097 	btrfs_delayed_inode_release_metadata(fs_info, node);
1098 	btrfs_release_delayed_inode(node);
1099 
1100 	return ret;
1101 
1102 search:
1103 	btrfs_release_path(path);
1104 
1105 	key.type = BTRFS_INODE_EXTREF_KEY;
1106 	key.offset = -1;
1107 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1108 	if (ret < 0)
1109 		goto err_out;
1110 	ASSERT(ret);
1111 
1112 	ret = 0;
1113 	leaf = path->nodes[0];
1114 	path->slots[0]--;
1115 	goto again;
1116 }
1117 
btrfs_update_delayed_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_delayed_node * node)1118 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
1119 					     struct btrfs_root *root,
1120 					     struct btrfs_path *path,
1121 					     struct btrfs_delayed_node *node)
1122 {
1123 	int ret;
1124 
1125 	mutex_lock(&node->mutex);
1126 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &node->flags)) {
1127 		mutex_unlock(&node->mutex);
1128 		return 0;
1129 	}
1130 
1131 	ret = __btrfs_update_delayed_inode(trans, root, path, node);
1132 	mutex_unlock(&node->mutex);
1133 	return ret;
1134 }
1135 
1136 static inline int
__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_delayed_node * node)1137 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1138 				   struct btrfs_path *path,
1139 				   struct btrfs_delayed_node *node)
1140 {
1141 	int ret;
1142 
1143 	ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1144 	if (ret)
1145 		return ret;
1146 
1147 	ret = btrfs_delete_delayed_items(trans, path, node->root, node);
1148 	if (ret)
1149 		return ret;
1150 
1151 	ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1152 	return ret;
1153 }
1154 
1155 /*
1156  * Called when committing the transaction.
1157  * Returns 0 on success.
1158  * Returns < 0 on error and returns with an aborted transaction with any
1159  * outstanding delayed items cleaned up.
1160  */
__btrfs_run_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,int nr)1161 static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1162 				     struct btrfs_fs_info *fs_info, int nr)
1163 {
1164 	struct btrfs_delayed_root *delayed_root;
1165 	struct btrfs_delayed_node *curr_node, *prev_node;
1166 	struct btrfs_path *path;
1167 	struct btrfs_block_rsv *block_rsv;
1168 	int ret = 0;
1169 	bool count = (nr > 0);
1170 
1171 	if (trans->aborted)
1172 		return -EIO;
1173 
1174 	path = btrfs_alloc_path();
1175 	if (!path)
1176 		return -ENOMEM;
1177 	path->leave_spinning = 1;
1178 
1179 	block_rsv = trans->block_rsv;
1180 	trans->block_rsv = &fs_info->delayed_block_rsv;
1181 
1182 	delayed_root = fs_info->delayed_root;
1183 
1184 	curr_node = btrfs_first_delayed_node(delayed_root);
1185 	while (curr_node && (!count || (count && nr--))) {
1186 		ret = __btrfs_commit_inode_delayed_items(trans, path,
1187 							 curr_node);
1188 		if (ret) {
1189 			btrfs_release_delayed_node(curr_node);
1190 			curr_node = NULL;
1191 			btrfs_abort_transaction(trans, ret);
1192 			break;
1193 		}
1194 
1195 		prev_node = curr_node;
1196 		curr_node = btrfs_next_delayed_node(curr_node);
1197 		btrfs_release_delayed_node(prev_node);
1198 	}
1199 
1200 	if (curr_node)
1201 		btrfs_release_delayed_node(curr_node);
1202 	btrfs_free_path(path);
1203 	trans->block_rsv = block_rsv;
1204 
1205 	return ret;
1206 }
1207 
btrfs_run_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info)1208 int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1209 			    struct btrfs_fs_info *fs_info)
1210 {
1211 	return __btrfs_run_delayed_items(trans, fs_info, -1);
1212 }
1213 
btrfs_run_delayed_items_nr(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,int nr)1214 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
1215 			       struct btrfs_fs_info *fs_info, int nr)
1216 {
1217 	return __btrfs_run_delayed_items(trans, fs_info, nr);
1218 }
1219 
btrfs_commit_inode_delayed_items(struct btrfs_trans_handle * trans,struct btrfs_inode * inode)1220 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1221 				     struct btrfs_inode *inode)
1222 {
1223 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1224 	struct btrfs_path *path;
1225 	struct btrfs_block_rsv *block_rsv;
1226 	int ret;
1227 
1228 	if (!delayed_node)
1229 		return 0;
1230 
1231 	mutex_lock(&delayed_node->mutex);
1232 	if (!delayed_node->count) {
1233 		mutex_unlock(&delayed_node->mutex);
1234 		btrfs_release_delayed_node(delayed_node);
1235 		return 0;
1236 	}
1237 	mutex_unlock(&delayed_node->mutex);
1238 
1239 	path = btrfs_alloc_path();
1240 	if (!path) {
1241 		btrfs_release_delayed_node(delayed_node);
1242 		return -ENOMEM;
1243 	}
1244 	path->leave_spinning = 1;
1245 
1246 	block_rsv = trans->block_rsv;
1247 	trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
1248 
1249 	ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1250 
1251 	btrfs_release_delayed_node(delayed_node);
1252 	btrfs_free_path(path);
1253 	trans->block_rsv = block_rsv;
1254 
1255 	return ret;
1256 }
1257 
btrfs_commit_inode_delayed_inode(struct btrfs_inode * inode)1258 int btrfs_commit_inode_delayed_inode(struct btrfs_inode *inode)
1259 {
1260 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1261 	struct btrfs_trans_handle *trans;
1262 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1263 	struct btrfs_path *path;
1264 	struct btrfs_block_rsv *block_rsv;
1265 	int ret;
1266 
1267 	if (!delayed_node)
1268 		return 0;
1269 
1270 	mutex_lock(&delayed_node->mutex);
1271 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1272 		mutex_unlock(&delayed_node->mutex);
1273 		btrfs_release_delayed_node(delayed_node);
1274 		return 0;
1275 	}
1276 	mutex_unlock(&delayed_node->mutex);
1277 
1278 	trans = btrfs_join_transaction(delayed_node->root);
1279 	if (IS_ERR(trans)) {
1280 		ret = PTR_ERR(trans);
1281 		goto out;
1282 	}
1283 
1284 	path = btrfs_alloc_path();
1285 	if (!path) {
1286 		ret = -ENOMEM;
1287 		goto trans_out;
1288 	}
1289 	path->leave_spinning = 1;
1290 
1291 	block_rsv = trans->block_rsv;
1292 	trans->block_rsv = &fs_info->delayed_block_rsv;
1293 
1294 	mutex_lock(&delayed_node->mutex);
1295 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags))
1296 		ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
1297 						   path, delayed_node);
1298 	else
1299 		ret = 0;
1300 	mutex_unlock(&delayed_node->mutex);
1301 
1302 	btrfs_free_path(path);
1303 	trans->block_rsv = block_rsv;
1304 trans_out:
1305 	btrfs_end_transaction(trans);
1306 	btrfs_btree_balance_dirty(fs_info);
1307 out:
1308 	btrfs_release_delayed_node(delayed_node);
1309 
1310 	return ret;
1311 }
1312 
btrfs_remove_delayed_node(struct btrfs_inode * inode)1313 void btrfs_remove_delayed_node(struct btrfs_inode *inode)
1314 {
1315 	struct btrfs_delayed_node *delayed_node;
1316 
1317 	delayed_node = READ_ONCE(inode->delayed_node);
1318 	if (!delayed_node)
1319 		return;
1320 
1321 	inode->delayed_node = NULL;
1322 	btrfs_release_delayed_node(delayed_node);
1323 }
1324 
1325 struct btrfs_async_delayed_work {
1326 	struct btrfs_delayed_root *delayed_root;
1327 	int nr;
1328 	struct btrfs_work work;
1329 };
1330 
btrfs_async_run_delayed_root(struct btrfs_work * work)1331 static void btrfs_async_run_delayed_root(struct btrfs_work *work)
1332 {
1333 	struct btrfs_async_delayed_work *async_work;
1334 	struct btrfs_delayed_root *delayed_root;
1335 	struct btrfs_trans_handle *trans;
1336 	struct btrfs_path *path;
1337 	struct btrfs_delayed_node *delayed_node = NULL;
1338 	struct btrfs_root *root;
1339 	struct btrfs_block_rsv *block_rsv;
1340 	int total_done = 0;
1341 
1342 	async_work = container_of(work, struct btrfs_async_delayed_work, work);
1343 	delayed_root = async_work->delayed_root;
1344 
1345 	path = btrfs_alloc_path();
1346 	if (!path)
1347 		goto out;
1348 
1349 again:
1350 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND / 2)
1351 		goto free_path;
1352 
1353 	delayed_node = btrfs_first_prepared_delayed_node(delayed_root);
1354 	if (!delayed_node)
1355 		goto free_path;
1356 
1357 	path->leave_spinning = 1;
1358 	root = delayed_node->root;
1359 
1360 	trans = btrfs_join_transaction(root);
1361 	if (IS_ERR(trans))
1362 		goto release_path;
1363 
1364 	block_rsv = trans->block_rsv;
1365 	trans->block_rsv = &root->fs_info->delayed_block_rsv;
1366 
1367 	__btrfs_commit_inode_delayed_items(trans, path, delayed_node);
1368 
1369 	trans->block_rsv = block_rsv;
1370 	btrfs_end_transaction(trans);
1371 	btrfs_btree_balance_dirty_nodelay(root->fs_info);
1372 
1373 release_path:
1374 	btrfs_release_path(path);
1375 	total_done++;
1376 
1377 	btrfs_release_prepared_delayed_node(delayed_node);
1378 	if ((async_work->nr == 0 && total_done < BTRFS_DELAYED_WRITEBACK) ||
1379 	    total_done < async_work->nr)
1380 		goto again;
1381 
1382 free_path:
1383 	btrfs_free_path(path);
1384 out:
1385 	wake_up(&delayed_root->wait);
1386 	kfree(async_work);
1387 }
1388 
1389 
btrfs_wq_run_delayed_node(struct btrfs_delayed_root * delayed_root,struct btrfs_fs_info * fs_info,int nr)1390 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
1391 				     struct btrfs_fs_info *fs_info, int nr)
1392 {
1393 	struct btrfs_async_delayed_work *async_work;
1394 
1395 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND ||
1396 	    btrfs_workqueue_normal_congested(fs_info->delayed_workers))
1397 		return 0;
1398 
1399 	async_work = kmalloc(sizeof(*async_work), GFP_NOFS);
1400 	if (!async_work)
1401 		return -ENOMEM;
1402 
1403 	async_work->delayed_root = delayed_root;
1404 	btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper,
1405 			btrfs_async_run_delayed_root, NULL, NULL);
1406 	async_work->nr = nr;
1407 
1408 	btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
1409 	return 0;
1410 }
1411 
btrfs_assert_delayed_root_empty(struct btrfs_fs_info * fs_info)1412 void btrfs_assert_delayed_root_empty(struct btrfs_fs_info *fs_info)
1413 {
1414 	WARN_ON(btrfs_first_delayed_node(fs_info->delayed_root));
1415 }
1416 
could_end_wait(struct btrfs_delayed_root * delayed_root,int seq)1417 static int could_end_wait(struct btrfs_delayed_root *delayed_root, int seq)
1418 {
1419 	int val = atomic_read(&delayed_root->items_seq);
1420 
1421 	if (val < seq || val >= seq + BTRFS_DELAYED_BATCH)
1422 		return 1;
1423 
1424 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1425 		return 1;
1426 
1427 	return 0;
1428 }
1429 
btrfs_balance_delayed_items(struct btrfs_fs_info * fs_info)1430 void btrfs_balance_delayed_items(struct btrfs_fs_info *fs_info)
1431 {
1432 	struct btrfs_delayed_root *delayed_root = fs_info->delayed_root;
1433 
1434 	if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
1435 		return;
1436 
1437 	if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
1438 		int seq;
1439 		int ret;
1440 
1441 		seq = atomic_read(&delayed_root->items_seq);
1442 
1443 		ret = btrfs_wq_run_delayed_node(delayed_root, fs_info, 0);
1444 		if (ret)
1445 			return;
1446 
1447 		wait_event_interruptible(delayed_root->wait,
1448 					 could_end_wait(delayed_root, seq));
1449 		return;
1450 	}
1451 
1452 	btrfs_wq_run_delayed_node(delayed_root, fs_info, BTRFS_DELAYED_BATCH);
1453 }
1454 
1455 /* Will return 0 or -ENOMEM */
btrfs_insert_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,const char * name,int name_len,struct btrfs_inode * dir,struct btrfs_disk_key * disk_key,u8 type,u64 index)1456 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
1457 				   struct btrfs_fs_info *fs_info,
1458 				   const char *name, int name_len,
1459 				   struct btrfs_inode *dir,
1460 				   struct btrfs_disk_key *disk_key, u8 type,
1461 				   u64 index)
1462 {
1463 	struct btrfs_delayed_node *delayed_node;
1464 	struct btrfs_delayed_item *delayed_item;
1465 	struct btrfs_dir_item *dir_item;
1466 	int ret;
1467 
1468 	delayed_node = btrfs_get_or_create_delayed_node(dir);
1469 	if (IS_ERR(delayed_node))
1470 		return PTR_ERR(delayed_node);
1471 
1472 	delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
1473 	if (!delayed_item) {
1474 		ret = -ENOMEM;
1475 		goto release_node;
1476 	}
1477 
1478 	delayed_item->key.objectid = btrfs_ino(dir);
1479 	delayed_item->key.type = BTRFS_DIR_INDEX_KEY;
1480 	delayed_item->key.offset = index;
1481 
1482 	dir_item = (struct btrfs_dir_item *)delayed_item->data;
1483 	dir_item->location = *disk_key;
1484 	btrfs_set_stack_dir_transid(dir_item, trans->transid);
1485 	btrfs_set_stack_dir_data_len(dir_item, 0);
1486 	btrfs_set_stack_dir_name_len(dir_item, name_len);
1487 	btrfs_set_stack_dir_type(dir_item, type);
1488 	memcpy((char *)(dir_item + 1), name, name_len);
1489 
1490 	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, delayed_item);
1491 	/*
1492 	 * we have reserved enough space when we start a new transaction,
1493 	 * so reserving metadata failure is impossible
1494 	 */
1495 	BUG_ON(ret);
1496 
1497 
1498 	mutex_lock(&delayed_node->mutex);
1499 	ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
1500 	if (unlikely(ret)) {
1501 		btrfs_err(fs_info,
1502 			  "err add delayed dir index item(name: %.*s) into the insertion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1503 			  name_len, name, delayed_node->root->objectid,
1504 			  delayed_node->inode_id, ret);
1505 		BUG();
1506 	}
1507 	mutex_unlock(&delayed_node->mutex);
1508 
1509 release_node:
1510 	btrfs_release_delayed_node(delayed_node);
1511 	return ret;
1512 }
1513 
btrfs_delete_delayed_insertion_item(struct btrfs_fs_info * fs_info,struct btrfs_delayed_node * node,struct btrfs_key * key)1514 static int btrfs_delete_delayed_insertion_item(struct btrfs_fs_info *fs_info,
1515 					       struct btrfs_delayed_node *node,
1516 					       struct btrfs_key *key)
1517 {
1518 	struct btrfs_delayed_item *item;
1519 
1520 	mutex_lock(&node->mutex);
1521 	item = __btrfs_lookup_delayed_insertion_item(node, key);
1522 	if (!item) {
1523 		mutex_unlock(&node->mutex);
1524 		return 1;
1525 	}
1526 
1527 	btrfs_delayed_item_release_metadata(fs_info, item);
1528 	btrfs_release_delayed_item(item);
1529 	mutex_unlock(&node->mutex);
1530 	return 0;
1531 }
1532 
btrfs_delete_delayed_dir_index(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_inode * dir,u64 index)1533 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
1534 				   struct btrfs_fs_info *fs_info,
1535 				   struct btrfs_inode *dir, u64 index)
1536 {
1537 	struct btrfs_delayed_node *node;
1538 	struct btrfs_delayed_item *item;
1539 	struct btrfs_key item_key;
1540 	int ret;
1541 
1542 	node = btrfs_get_or_create_delayed_node(dir);
1543 	if (IS_ERR(node))
1544 		return PTR_ERR(node);
1545 
1546 	item_key.objectid = btrfs_ino(dir);
1547 	item_key.type = BTRFS_DIR_INDEX_KEY;
1548 	item_key.offset = index;
1549 
1550 	ret = btrfs_delete_delayed_insertion_item(fs_info, node, &item_key);
1551 	if (!ret)
1552 		goto end;
1553 
1554 	item = btrfs_alloc_delayed_item(0);
1555 	if (!item) {
1556 		ret = -ENOMEM;
1557 		goto end;
1558 	}
1559 
1560 	item->key = item_key;
1561 
1562 	ret = btrfs_delayed_item_reserve_metadata(trans, fs_info, item);
1563 	/*
1564 	 * we have reserved enough space when we start a new transaction,
1565 	 * so reserving metadata failure is impossible.
1566 	 */
1567 	BUG_ON(ret);
1568 
1569 	mutex_lock(&node->mutex);
1570 	ret = __btrfs_add_delayed_deletion_item(node, item);
1571 	if (unlikely(ret)) {
1572 		btrfs_err(fs_info,
1573 			  "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1574 			  index, node->root->objectid, node->inode_id, ret);
1575 		BUG();
1576 	}
1577 	mutex_unlock(&node->mutex);
1578 end:
1579 	btrfs_release_delayed_node(node);
1580 	return ret;
1581 }
1582 
btrfs_inode_delayed_dir_index_count(struct btrfs_inode * inode)1583 int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
1584 {
1585 	struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
1586 
1587 	if (!delayed_node)
1588 		return -ENOENT;
1589 
1590 	/*
1591 	 * Since we have held i_mutex of this directory, it is impossible that
1592 	 * a new directory index is added into the delayed node and index_cnt
1593 	 * is updated now. So we needn't lock the delayed node.
1594 	 */
1595 	if (!delayed_node->index_cnt) {
1596 		btrfs_release_delayed_node(delayed_node);
1597 		return -EINVAL;
1598 	}
1599 
1600 	inode->index_cnt = delayed_node->index_cnt;
1601 	btrfs_release_delayed_node(delayed_node);
1602 	return 0;
1603 }
1604 
btrfs_readdir_get_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1605 bool btrfs_readdir_get_delayed_items(struct inode *inode,
1606 				     struct list_head *ins_list,
1607 				     struct list_head *del_list)
1608 {
1609 	struct btrfs_delayed_node *delayed_node;
1610 	struct btrfs_delayed_item *item;
1611 
1612 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1613 	if (!delayed_node)
1614 		return false;
1615 
1616 	/*
1617 	 * We can only do one readdir with delayed items at a time because of
1618 	 * item->readdir_list.
1619 	 */
1620 	inode_unlock_shared(inode);
1621 	inode_lock(inode);
1622 
1623 	mutex_lock(&delayed_node->mutex);
1624 	item = __btrfs_first_delayed_insertion_item(delayed_node);
1625 	while (item) {
1626 		refcount_inc(&item->refs);
1627 		list_add_tail(&item->readdir_list, ins_list);
1628 		item = __btrfs_next_delayed_item(item);
1629 	}
1630 
1631 	item = __btrfs_first_delayed_deletion_item(delayed_node);
1632 	while (item) {
1633 		refcount_inc(&item->refs);
1634 		list_add_tail(&item->readdir_list, del_list);
1635 		item = __btrfs_next_delayed_item(item);
1636 	}
1637 	mutex_unlock(&delayed_node->mutex);
1638 	/*
1639 	 * This delayed node is still cached in the btrfs inode, so refs
1640 	 * must be > 1 now, and we needn't check it is going to be freed
1641 	 * or not.
1642 	 *
1643 	 * Besides that, this function is used to read dir, we do not
1644 	 * insert/delete delayed items in this period. So we also needn't
1645 	 * requeue or dequeue this delayed node.
1646 	 */
1647 	refcount_dec(&delayed_node->refs);
1648 
1649 	return true;
1650 }
1651 
btrfs_readdir_put_delayed_items(struct inode * inode,struct list_head * ins_list,struct list_head * del_list)1652 void btrfs_readdir_put_delayed_items(struct inode *inode,
1653 				     struct list_head *ins_list,
1654 				     struct list_head *del_list)
1655 {
1656 	struct btrfs_delayed_item *curr, *next;
1657 
1658 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1659 		list_del(&curr->readdir_list);
1660 		if (refcount_dec_and_test(&curr->refs))
1661 			kfree(curr);
1662 	}
1663 
1664 	list_for_each_entry_safe(curr, next, del_list, readdir_list) {
1665 		list_del(&curr->readdir_list);
1666 		if (refcount_dec_and_test(&curr->refs))
1667 			kfree(curr);
1668 	}
1669 
1670 	/*
1671 	 * The VFS is going to do up_read(), so we need to downgrade back to a
1672 	 * read lock.
1673 	 */
1674 	downgrade_write(&inode->i_rwsem);
1675 }
1676 
btrfs_should_delete_dir_index(struct list_head * del_list,u64 index)1677 int btrfs_should_delete_dir_index(struct list_head *del_list,
1678 				  u64 index)
1679 {
1680 	struct btrfs_delayed_item *curr;
1681 	int ret = 0;
1682 
1683 	list_for_each_entry(curr, del_list, readdir_list) {
1684 		if (curr->key.offset > index)
1685 			break;
1686 		if (curr->key.offset == index) {
1687 			ret = 1;
1688 			break;
1689 		}
1690 	}
1691 	return ret;
1692 }
1693 
1694 /*
1695  * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1696  *
1697  */
btrfs_readdir_delayed_dir_index(struct dir_context * ctx,struct list_head * ins_list)1698 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
1699 				    struct list_head *ins_list)
1700 {
1701 	struct btrfs_dir_item *di;
1702 	struct btrfs_delayed_item *curr, *next;
1703 	struct btrfs_key location;
1704 	char *name;
1705 	int name_len;
1706 	int over = 0;
1707 	unsigned char d_type;
1708 
1709 	if (list_empty(ins_list))
1710 		return 0;
1711 
1712 	/*
1713 	 * Changing the data of the delayed item is impossible. So
1714 	 * we needn't lock them. And we have held i_mutex of the
1715 	 * directory, nobody can delete any directory indexes now.
1716 	 */
1717 	list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
1718 		list_del(&curr->readdir_list);
1719 
1720 		if (curr->key.offset < ctx->pos) {
1721 			if (refcount_dec_and_test(&curr->refs))
1722 				kfree(curr);
1723 			continue;
1724 		}
1725 
1726 		ctx->pos = curr->key.offset;
1727 
1728 		di = (struct btrfs_dir_item *)curr->data;
1729 		name = (char *)(di + 1);
1730 		name_len = btrfs_stack_dir_name_len(di);
1731 
1732 		d_type = btrfs_filetype_table[di->type];
1733 		btrfs_disk_key_to_cpu(&location, &di->location);
1734 
1735 		over = !dir_emit(ctx, name, name_len,
1736 			       location.objectid, d_type);
1737 
1738 		if (refcount_dec_and_test(&curr->refs))
1739 			kfree(curr);
1740 
1741 		if (over)
1742 			return 1;
1743 		ctx->pos++;
1744 	}
1745 	return 0;
1746 }
1747 
fill_stack_inode_item(struct btrfs_trans_handle * trans,struct btrfs_inode_item * inode_item,struct inode * inode)1748 static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1749 				  struct btrfs_inode_item *inode_item,
1750 				  struct inode *inode)
1751 {
1752 	btrfs_set_stack_inode_uid(inode_item, i_uid_read(inode));
1753 	btrfs_set_stack_inode_gid(inode_item, i_gid_read(inode));
1754 	btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
1755 	btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
1756 	btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
1757 	btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1758 	btrfs_set_stack_inode_generation(inode_item,
1759 					 BTRFS_I(inode)->generation);
1760 	btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1761 	btrfs_set_stack_inode_transid(inode_item, trans->transid);
1762 	btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1763 	btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1764 	btrfs_set_stack_inode_block_group(inode_item, 0);
1765 
1766 	btrfs_set_stack_timespec_sec(&inode_item->atime,
1767 				     inode->i_atime.tv_sec);
1768 	btrfs_set_stack_timespec_nsec(&inode_item->atime,
1769 				      inode->i_atime.tv_nsec);
1770 
1771 	btrfs_set_stack_timespec_sec(&inode_item->mtime,
1772 				     inode->i_mtime.tv_sec);
1773 	btrfs_set_stack_timespec_nsec(&inode_item->mtime,
1774 				      inode->i_mtime.tv_nsec);
1775 
1776 	btrfs_set_stack_timespec_sec(&inode_item->ctime,
1777 				     inode->i_ctime.tv_sec);
1778 	btrfs_set_stack_timespec_nsec(&inode_item->ctime,
1779 				      inode->i_ctime.tv_nsec);
1780 
1781 	btrfs_set_stack_timespec_sec(&inode_item->otime,
1782 				     BTRFS_I(inode)->i_otime.tv_sec);
1783 	btrfs_set_stack_timespec_nsec(&inode_item->otime,
1784 				     BTRFS_I(inode)->i_otime.tv_nsec);
1785 }
1786 
btrfs_fill_inode(struct inode * inode,u32 * rdev)1787 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1788 {
1789 	struct btrfs_delayed_node *delayed_node;
1790 	struct btrfs_inode_item *inode_item;
1791 
1792 	delayed_node = btrfs_get_delayed_node(BTRFS_I(inode));
1793 	if (!delayed_node)
1794 		return -ENOENT;
1795 
1796 	mutex_lock(&delayed_node->mutex);
1797 	if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1798 		mutex_unlock(&delayed_node->mutex);
1799 		btrfs_release_delayed_node(delayed_node);
1800 		return -ENOENT;
1801 	}
1802 
1803 	inode_item = &delayed_node->inode_item;
1804 
1805 	i_uid_write(inode, btrfs_stack_inode_uid(inode_item));
1806 	i_gid_write(inode, btrfs_stack_inode_gid(inode_item));
1807 	btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item));
1808 	inode->i_mode = btrfs_stack_inode_mode(inode_item);
1809 	set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1810 	inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1811 	BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1812         BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
1813 
1814 	inode->i_version = btrfs_stack_inode_sequence(inode_item);
1815 	inode->i_rdev = 0;
1816 	*rdev = btrfs_stack_inode_rdev(inode_item);
1817 	BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
1818 
1819 	inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
1820 	inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
1821 
1822 	inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
1823 	inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
1824 
1825 	inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
1826 	inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
1827 
1828 	BTRFS_I(inode)->i_otime.tv_sec =
1829 		btrfs_stack_timespec_sec(&inode_item->otime);
1830 	BTRFS_I(inode)->i_otime.tv_nsec =
1831 		btrfs_stack_timespec_nsec(&inode_item->otime);
1832 
1833 	inode->i_generation = BTRFS_I(inode)->generation;
1834 	BTRFS_I(inode)->index_cnt = (u64)-1;
1835 
1836 	mutex_unlock(&delayed_node->mutex);
1837 	btrfs_release_delayed_node(delayed_node);
1838 	return 0;
1839 }
1840 
btrfs_delayed_update_inode(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode)1841 int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1842 			       struct btrfs_root *root, struct inode *inode)
1843 {
1844 	struct btrfs_delayed_node *delayed_node;
1845 	int ret = 0;
1846 
1847 	delayed_node = btrfs_get_or_create_delayed_node(BTRFS_I(inode));
1848 	if (IS_ERR(delayed_node))
1849 		return PTR_ERR(delayed_node);
1850 
1851 	mutex_lock(&delayed_node->mutex);
1852 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1853 		fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1854 		goto release_node;
1855 	}
1856 
1857 	ret = btrfs_delayed_inode_reserve_metadata(trans, root, BTRFS_I(inode),
1858 						   delayed_node);
1859 	if (ret)
1860 		goto release_node;
1861 
1862 	fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
1863 	set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags);
1864 	delayed_node->count++;
1865 	atomic_inc(&root->fs_info->delayed_root->items);
1866 release_node:
1867 	mutex_unlock(&delayed_node->mutex);
1868 	btrfs_release_delayed_node(delayed_node);
1869 	return ret;
1870 }
1871 
btrfs_delayed_delete_inode_ref(struct btrfs_inode * inode)1872 int btrfs_delayed_delete_inode_ref(struct btrfs_inode *inode)
1873 {
1874 	struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
1875 	struct btrfs_delayed_node *delayed_node;
1876 
1877 	/*
1878 	 * we don't do delayed inode updates during log recovery because it
1879 	 * leads to enospc problems.  This means we also can't do
1880 	 * delayed inode refs
1881 	 */
1882 	if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags))
1883 		return -EAGAIN;
1884 
1885 	delayed_node = btrfs_get_or_create_delayed_node(inode);
1886 	if (IS_ERR(delayed_node))
1887 		return PTR_ERR(delayed_node);
1888 
1889 	/*
1890 	 * We don't reserve space for inode ref deletion is because:
1891 	 * - We ONLY do async inode ref deletion for the inode who has only
1892 	 *   one link(i_nlink == 1), it means there is only one inode ref.
1893 	 *   And in most case, the inode ref and the inode item are in the
1894 	 *   same leaf, and we will deal with them at the same time.
1895 	 *   Since we are sure we will reserve the space for the inode item,
1896 	 *   it is unnecessary to reserve space for inode ref deletion.
1897 	 * - If the inode ref and the inode item are not in the same leaf,
1898 	 *   We also needn't worry about enospc problem, because we reserve
1899 	 *   much more space for the inode update than it needs.
1900 	 * - At the worst, we can steal some space from the global reservation.
1901 	 *   It is very rare.
1902 	 */
1903 	mutex_lock(&delayed_node->mutex);
1904 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1905 		goto release_node;
1906 
1907 	set_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags);
1908 	delayed_node->count++;
1909 	atomic_inc(&fs_info->delayed_root->items);
1910 release_node:
1911 	mutex_unlock(&delayed_node->mutex);
1912 	btrfs_release_delayed_node(delayed_node);
1913 	return 0;
1914 }
1915 
__btrfs_kill_delayed_node(struct btrfs_delayed_node * delayed_node)1916 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
1917 {
1918 	struct btrfs_root *root = delayed_node->root;
1919 	struct btrfs_fs_info *fs_info = root->fs_info;
1920 	struct btrfs_delayed_item *curr_item, *prev_item;
1921 
1922 	mutex_lock(&delayed_node->mutex);
1923 	curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
1924 	while (curr_item) {
1925 		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1926 		prev_item = curr_item;
1927 		curr_item = __btrfs_next_delayed_item(prev_item);
1928 		btrfs_release_delayed_item(prev_item);
1929 	}
1930 
1931 	curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
1932 	while (curr_item) {
1933 		btrfs_delayed_item_release_metadata(fs_info, curr_item);
1934 		prev_item = curr_item;
1935 		curr_item = __btrfs_next_delayed_item(prev_item);
1936 		btrfs_release_delayed_item(prev_item);
1937 	}
1938 
1939 	if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF, &delayed_node->flags))
1940 		btrfs_release_delayed_iref(delayed_node);
1941 
1942 	if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY, &delayed_node->flags)) {
1943 		btrfs_delayed_inode_release_metadata(fs_info, delayed_node);
1944 		btrfs_release_delayed_inode(delayed_node);
1945 	}
1946 	mutex_unlock(&delayed_node->mutex);
1947 }
1948 
btrfs_kill_delayed_inode_items(struct btrfs_inode * inode)1949 void btrfs_kill_delayed_inode_items(struct btrfs_inode *inode)
1950 {
1951 	struct btrfs_delayed_node *delayed_node;
1952 
1953 	delayed_node = btrfs_get_delayed_node(inode);
1954 	if (!delayed_node)
1955 		return;
1956 
1957 	__btrfs_kill_delayed_node(delayed_node);
1958 	btrfs_release_delayed_node(delayed_node);
1959 }
1960 
btrfs_kill_all_delayed_nodes(struct btrfs_root * root)1961 void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1962 {
1963 	u64 inode_id = 0;
1964 	struct btrfs_delayed_node *delayed_nodes[8];
1965 	int i, n;
1966 
1967 	while (1) {
1968 		spin_lock(&root->inode_lock);
1969 		n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
1970 					   (void **)delayed_nodes, inode_id,
1971 					   ARRAY_SIZE(delayed_nodes));
1972 		if (!n) {
1973 			spin_unlock(&root->inode_lock);
1974 			break;
1975 		}
1976 
1977 		inode_id = delayed_nodes[n - 1]->inode_id + 1;
1978 		for (i = 0; i < n; i++) {
1979 			/*
1980 			 * Don't increase refs in case the node is dead and
1981 			 * about to be removed from the tree in the loop below
1982 			 */
1983 			if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
1984 				delayed_nodes[i] = NULL;
1985 		}
1986 		spin_unlock(&root->inode_lock);
1987 
1988 		for (i = 0; i < n; i++) {
1989 			if (!delayed_nodes[i])
1990 				continue;
1991 			__btrfs_kill_delayed_node(delayed_nodes[i]);
1992 			btrfs_release_delayed_node(delayed_nodes[i]);
1993 		}
1994 	}
1995 }
1996 
btrfs_destroy_delayed_inodes(struct btrfs_fs_info * fs_info)1997 void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info)
1998 {
1999 	struct btrfs_delayed_node *curr_node, *prev_node;
2000 
2001 	curr_node = btrfs_first_delayed_node(fs_info->delayed_root);
2002 	while (curr_node) {
2003 		__btrfs_kill_delayed_node(curr_node);
2004 
2005 		prev_node = curr_node;
2006 		curr_node = btrfs_next_delayed_node(curr_node);
2007 		btrfs_release_delayed_node(prev_node);
2008 	}
2009 }
2010 
2011