• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 #include "qgroup.h"
26 
27 struct kmem_cache *btrfs_delayed_ref_head_cachep;
28 struct kmem_cache *btrfs_delayed_tree_ref_cachep;
29 struct kmem_cache *btrfs_delayed_data_ref_cachep;
30 struct kmem_cache *btrfs_delayed_extent_op_cachep;
31 /*
32  * delayed back reference update tracking.  For subvolume trees
33  * we queue up extent allocations and backref maintenance for
34  * delayed processing.   This avoids deep call chains where we
35  * add extents in the middle of btrfs_search_slot, and it allows
36  * us to buffer up frequently modified backrefs in an rb tree instead
37  * of hammering updates on the extent allocation tree.
38  */
39 
40 /*
41  * compare two delayed tree backrefs with same bytenr and type
42  */
comp_tree_refs(struct btrfs_delayed_tree_ref * ref2,struct btrfs_delayed_tree_ref * ref1,int type)43 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
44 			  struct btrfs_delayed_tree_ref *ref1, int type)
45 {
46 	if (type == BTRFS_TREE_BLOCK_REF_KEY) {
47 		if (ref1->root < ref2->root)
48 			return -1;
49 		if (ref1->root > ref2->root)
50 			return 1;
51 	} else {
52 		if (ref1->parent < ref2->parent)
53 			return -1;
54 		if (ref1->parent > ref2->parent)
55 			return 1;
56 	}
57 	return 0;
58 }
59 
60 /*
61  * compare two delayed data backrefs with same bytenr and type
62  */
comp_data_refs(struct btrfs_delayed_data_ref * ref2,struct btrfs_delayed_data_ref * ref1)63 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
64 			  struct btrfs_delayed_data_ref *ref1)
65 {
66 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
67 		if (ref1->root < ref2->root)
68 			return -1;
69 		if (ref1->root > ref2->root)
70 			return 1;
71 		if (ref1->objectid < ref2->objectid)
72 			return -1;
73 		if (ref1->objectid > ref2->objectid)
74 			return 1;
75 		if (ref1->offset < ref2->offset)
76 			return -1;
77 		if (ref1->offset > ref2->offset)
78 			return 1;
79 	} else {
80 		if (ref1->parent < ref2->parent)
81 			return -1;
82 		if (ref1->parent > ref2->parent)
83 			return 1;
84 	}
85 	return 0;
86 }
87 
88 /* insert a new ref to head ref rbtree */
htree_insert(struct rb_root * root,struct rb_node * node)89 static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
90 						   struct rb_node *node)
91 {
92 	struct rb_node **p = &root->rb_node;
93 	struct rb_node *parent_node = NULL;
94 	struct btrfs_delayed_ref_head *entry;
95 	struct btrfs_delayed_ref_head *ins;
96 	u64 bytenr;
97 
98 	ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
99 	bytenr = ins->node.bytenr;
100 	while (*p) {
101 		parent_node = *p;
102 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
103 				 href_node);
104 
105 		if (bytenr < entry->node.bytenr)
106 			p = &(*p)->rb_left;
107 		else if (bytenr > entry->node.bytenr)
108 			p = &(*p)->rb_right;
109 		else
110 			return entry;
111 	}
112 
113 	rb_link_node(node, parent_node, p);
114 	rb_insert_color(node, root);
115 	return NULL;
116 }
117 
118 /*
119  * find an head entry based on bytenr. This returns the delayed ref
120  * head if it was able to find one, or NULL if nothing was in that spot.
121  * If return_bigger is given, the next bigger entry is returned if no exact
122  * match is found.
123  */
124 static struct btrfs_delayed_ref_head *
find_ref_head(struct rb_root * root,u64 bytenr,int return_bigger)125 find_ref_head(struct rb_root *root, u64 bytenr,
126 	      int return_bigger)
127 {
128 	struct rb_node *n;
129 	struct btrfs_delayed_ref_head *entry;
130 
131 	n = root->rb_node;
132 	entry = NULL;
133 	while (n) {
134 		entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
135 
136 		if (bytenr < entry->node.bytenr)
137 			n = n->rb_left;
138 		else if (bytenr > entry->node.bytenr)
139 			n = n->rb_right;
140 		else
141 			return entry;
142 	}
143 	if (entry && return_bigger) {
144 		if (bytenr > entry->node.bytenr) {
145 			n = rb_next(&entry->href_node);
146 			if (!n)
147 				n = rb_first(root);
148 			entry = rb_entry(n, struct btrfs_delayed_ref_head,
149 					 href_node);
150 			return entry;
151 		}
152 		return entry;
153 	}
154 	return NULL;
155 }
156 
btrfs_delayed_ref_lock(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)157 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
158 			   struct btrfs_delayed_ref_head *head)
159 {
160 	struct btrfs_delayed_ref_root *delayed_refs;
161 
162 	delayed_refs = &trans->transaction->delayed_refs;
163 	assert_spin_locked(&delayed_refs->lock);
164 	if (mutex_trylock(&head->mutex))
165 		return 0;
166 
167 	atomic_inc(&head->node.refs);
168 	spin_unlock(&delayed_refs->lock);
169 
170 	mutex_lock(&head->mutex);
171 	spin_lock(&delayed_refs->lock);
172 	if (!head->node.in_tree) {
173 		mutex_unlock(&head->mutex);
174 		btrfs_put_delayed_ref(&head->node);
175 		return -EAGAIN;
176 	}
177 	btrfs_put_delayed_ref(&head->node);
178 	return 0;
179 }
180 
drop_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref)181 static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
182 				    struct btrfs_delayed_ref_root *delayed_refs,
183 				    struct btrfs_delayed_ref_head *head,
184 				    struct btrfs_delayed_ref_node *ref)
185 {
186 	if (btrfs_delayed_ref_is_head(ref)) {
187 		head = btrfs_delayed_node_to_head(ref);
188 		rb_erase(&head->href_node, &delayed_refs->href_root);
189 	} else {
190 		assert_spin_locked(&head->lock);
191 		list_del(&ref->list);
192 	}
193 	ref->in_tree = 0;
194 	btrfs_put_delayed_ref(ref);
195 	atomic_dec(&delayed_refs->num_entries);
196 }
197 
merge_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head,struct btrfs_delayed_ref_node * ref,u64 seq)198 static bool merge_ref(struct btrfs_trans_handle *trans,
199 		      struct btrfs_delayed_ref_root *delayed_refs,
200 		      struct btrfs_delayed_ref_head *head,
201 		      struct btrfs_delayed_ref_node *ref,
202 		      u64 seq)
203 {
204 	struct btrfs_delayed_ref_node *next;
205 	bool done = false;
206 
207 	next = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
208 				list);
209 	while (!done && &next->list != &head->ref_list) {
210 		int mod;
211 		struct btrfs_delayed_ref_node *next2;
212 
213 		next2 = list_next_entry(next, list);
214 
215 		if (next == ref)
216 			goto next;
217 
218 		if (seq && next->seq >= seq)
219 			goto next;
220 
221 		if (next->type != ref->type)
222 			goto next;
223 
224 		if ((ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
225 		     ref->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
226 		    comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref),
227 				   btrfs_delayed_node_to_tree_ref(next),
228 				   ref->type))
229 			goto next;
230 		if ((ref->type == BTRFS_EXTENT_DATA_REF_KEY ||
231 		     ref->type == BTRFS_SHARED_DATA_REF_KEY) &&
232 		    comp_data_refs(btrfs_delayed_node_to_data_ref(ref),
233 				   btrfs_delayed_node_to_data_ref(next)))
234 			goto next;
235 
236 		if (ref->action == next->action) {
237 			mod = next->ref_mod;
238 		} else {
239 			if (ref->ref_mod < next->ref_mod) {
240 				swap(ref, next);
241 				done = true;
242 			}
243 			mod = -next->ref_mod;
244 		}
245 
246 		drop_delayed_ref(trans, delayed_refs, head, next);
247 		ref->ref_mod += mod;
248 		if (ref->ref_mod == 0) {
249 			drop_delayed_ref(trans, delayed_refs, head, ref);
250 			done = true;
251 		} else {
252 			/*
253 			 * Can't have multiples of the same ref on a tree block.
254 			 */
255 			WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
256 				ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
257 		}
258 next:
259 		next = next2;
260 	}
261 
262 	return done;
263 }
264 
btrfs_merge_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_head * head)265 void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
266 			      struct btrfs_fs_info *fs_info,
267 			      struct btrfs_delayed_ref_root *delayed_refs,
268 			      struct btrfs_delayed_ref_head *head)
269 {
270 	struct btrfs_delayed_ref_node *ref;
271 	u64 seq = 0;
272 
273 	assert_spin_locked(&head->lock);
274 
275 	if (list_empty(&head->ref_list))
276 		return;
277 
278 	/* We don't have too many refs to merge for data. */
279 	if (head->is_data)
280 		return;
281 
282 	read_lock(&fs_info->tree_mod_log_lock);
283 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
284 		struct seq_list *elem;
285 
286 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
287 					struct seq_list, list);
288 		seq = elem->seq;
289 	}
290 	read_unlock(&fs_info->tree_mod_log_lock);
291 
292 	ref = list_first_entry(&head->ref_list, struct btrfs_delayed_ref_node,
293 			       list);
294 	while (&ref->list != &head->ref_list) {
295 		if (seq && ref->seq >= seq)
296 			goto next;
297 
298 		if (merge_ref(trans, delayed_refs, head, ref, seq)) {
299 			if (list_empty(&head->ref_list))
300 				break;
301 			ref = list_first_entry(&head->ref_list,
302 					       struct btrfs_delayed_ref_node,
303 					       list);
304 			continue;
305 		}
306 next:
307 		ref = list_next_entry(ref, list);
308 	}
309 }
310 
btrfs_check_delayed_seq(struct btrfs_fs_info * fs_info,struct btrfs_delayed_ref_root * delayed_refs,u64 seq)311 int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
312 			    struct btrfs_delayed_ref_root *delayed_refs,
313 			    u64 seq)
314 {
315 	struct seq_list *elem;
316 	int ret = 0;
317 
318 	read_lock(&fs_info->tree_mod_log_lock);
319 	if (!list_empty(&fs_info->tree_mod_seq_list)) {
320 		elem = list_first_entry(&fs_info->tree_mod_seq_list,
321 					struct seq_list, list);
322 		if (seq >= elem->seq) {
323 			pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
324 				 (u32)(seq >> 32), (u32)seq,
325 				 (u32)(elem->seq >> 32), (u32)elem->seq,
326 				 delayed_refs);
327 			ret = 1;
328 		}
329 	}
330 
331 	read_unlock(&fs_info->tree_mod_log_lock);
332 	return ret;
333 }
334 
335 struct btrfs_delayed_ref_head *
btrfs_select_ref_head(struct btrfs_trans_handle * trans)336 btrfs_select_ref_head(struct btrfs_trans_handle *trans)
337 {
338 	struct btrfs_delayed_ref_root *delayed_refs;
339 	struct btrfs_delayed_ref_head *head;
340 	u64 start;
341 	bool loop = false;
342 
343 	delayed_refs = &trans->transaction->delayed_refs;
344 
345 again:
346 	start = delayed_refs->run_delayed_start;
347 	head = find_ref_head(&delayed_refs->href_root, start, 1);
348 	if (!head && !loop) {
349 		delayed_refs->run_delayed_start = 0;
350 		start = 0;
351 		loop = true;
352 		head = find_ref_head(&delayed_refs->href_root, start, 1);
353 		if (!head)
354 			return NULL;
355 	} else if (!head && loop) {
356 		return NULL;
357 	}
358 
359 	while (head->processing) {
360 		struct rb_node *node;
361 
362 		node = rb_next(&head->href_node);
363 		if (!node) {
364 			if (loop)
365 				return NULL;
366 			delayed_refs->run_delayed_start = 0;
367 			start = 0;
368 			loop = true;
369 			goto again;
370 		}
371 		head = rb_entry(node, struct btrfs_delayed_ref_head,
372 				href_node);
373 	}
374 
375 	head->processing = 1;
376 	WARN_ON(delayed_refs->num_heads_ready == 0);
377 	delayed_refs->num_heads_ready--;
378 	delayed_refs->run_delayed_start = head->node.bytenr +
379 		head->node.num_bytes;
380 	return head;
381 }
382 
383 /*
384  * Helper to insert the ref_node to the tail or merge with tail.
385  *
386  * Return 0 for insert.
387  * Return >0 for merge.
388  */
389 static int
add_delayed_ref_tail_merge(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * root,struct btrfs_delayed_ref_head * href,struct btrfs_delayed_ref_node * ref)390 add_delayed_ref_tail_merge(struct btrfs_trans_handle *trans,
391 			   struct btrfs_delayed_ref_root *root,
392 			   struct btrfs_delayed_ref_head *href,
393 			   struct btrfs_delayed_ref_node *ref)
394 {
395 	struct btrfs_delayed_ref_node *exist;
396 	int mod;
397 	int ret = 0;
398 
399 	spin_lock(&href->lock);
400 	/* Check whether we can merge the tail node with ref */
401 	if (list_empty(&href->ref_list))
402 		goto add_tail;
403 	exist = list_entry(href->ref_list.prev, struct btrfs_delayed_ref_node,
404 			   list);
405 	/* No need to compare bytenr nor is_head */
406 	if (exist->type != ref->type || exist->seq != ref->seq)
407 		goto add_tail;
408 
409 	if ((exist->type == BTRFS_TREE_BLOCK_REF_KEY ||
410 	     exist->type == BTRFS_SHARED_BLOCK_REF_KEY) &&
411 	    comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist),
412 			   btrfs_delayed_node_to_tree_ref(ref),
413 			   ref->type))
414 		goto add_tail;
415 	if ((exist->type == BTRFS_EXTENT_DATA_REF_KEY ||
416 	     exist->type == BTRFS_SHARED_DATA_REF_KEY) &&
417 	    comp_data_refs(btrfs_delayed_node_to_data_ref(exist),
418 			   btrfs_delayed_node_to_data_ref(ref)))
419 		goto add_tail;
420 
421 	/* Now we are sure we can merge */
422 	ret = 1;
423 	if (exist->action == ref->action) {
424 		mod = ref->ref_mod;
425 	} else {
426 		/* Need to change action */
427 		if (exist->ref_mod < ref->ref_mod) {
428 			exist->action = ref->action;
429 			mod = -exist->ref_mod;
430 			exist->ref_mod = ref->ref_mod;
431 		} else
432 			mod = -ref->ref_mod;
433 	}
434 	exist->ref_mod += mod;
435 
436 	/* remove existing tail if its ref_mod is zero */
437 	if (exist->ref_mod == 0)
438 		drop_delayed_ref(trans, root, href, exist);
439 	spin_unlock(&href->lock);
440 	return ret;
441 
442 add_tail:
443 	list_add_tail(&ref->list, &href->ref_list);
444 	atomic_inc(&root->num_entries);
445 	spin_unlock(&href->lock);
446 	return ret;
447 }
448 
449 /*
450  * helper function to update the accounting in the head ref
451  * existing and update must have the same bytenr
452  */
453 static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_node * existing,struct btrfs_delayed_ref_node * update)454 update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
455 			 struct btrfs_delayed_ref_node *existing,
456 			 struct btrfs_delayed_ref_node *update)
457 {
458 	struct btrfs_delayed_ref_head *existing_ref;
459 	struct btrfs_delayed_ref_head *ref;
460 	int old_ref_mod;
461 
462 	existing_ref = btrfs_delayed_node_to_head(existing);
463 	ref = btrfs_delayed_node_to_head(update);
464 	BUG_ON(existing_ref->is_data != ref->is_data);
465 
466 	spin_lock(&existing_ref->lock);
467 	if (ref->must_insert_reserved) {
468 		/* if the extent was freed and then
469 		 * reallocated before the delayed ref
470 		 * entries were processed, we can end up
471 		 * with an existing head ref without
472 		 * the must_insert_reserved flag set.
473 		 * Set it again here
474 		 */
475 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
476 
477 		/*
478 		 * update the num_bytes so we make sure the accounting
479 		 * is done correctly
480 		 */
481 		existing->num_bytes = update->num_bytes;
482 
483 	}
484 
485 	if (ref->extent_op) {
486 		if (!existing_ref->extent_op) {
487 			existing_ref->extent_op = ref->extent_op;
488 		} else {
489 			if (ref->extent_op->update_key) {
490 				memcpy(&existing_ref->extent_op->key,
491 				       &ref->extent_op->key,
492 				       sizeof(ref->extent_op->key));
493 				existing_ref->extent_op->update_key = 1;
494 			}
495 			if (ref->extent_op->update_flags) {
496 				existing_ref->extent_op->flags_to_set |=
497 					ref->extent_op->flags_to_set;
498 				existing_ref->extent_op->update_flags = 1;
499 			}
500 			btrfs_free_delayed_extent_op(ref->extent_op);
501 		}
502 	}
503 	/*
504 	 * update the reference mod on the head to reflect this new operation,
505 	 * only need the lock for this case cause we could be processing it
506 	 * currently, for refs we just added we know we're a-ok.
507 	 */
508 	old_ref_mod = existing_ref->total_ref_mod;
509 	existing->ref_mod += update->ref_mod;
510 	existing_ref->total_ref_mod += update->ref_mod;
511 
512 	/*
513 	 * If we are going to from a positive ref mod to a negative or vice
514 	 * versa we need to make sure to adjust pending_csums accordingly.
515 	 */
516 	if (existing_ref->is_data) {
517 		if (existing_ref->total_ref_mod >= 0 && old_ref_mod < 0)
518 			delayed_refs->pending_csums -= existing->num_bytes;
519 		if (existing_ref->total_ref_mod < 0 && old_ref_mod >= 0)
520 			delayed_refs->pending_csums += existing->num_bytes;
521 	}
522 	spin_unlock(&existing_ref->lock);
523 }
524 
525 /*
526  * helper function to actually insert a head node into the rbtree.
527  * this does all the dirty work in terms of maintaining the correct
528  * overall modification count.
529  */
530 static noinline struct btrfs_delayed_ref_head *
add_delayed_ref_head(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,struct btrfs_qgroup_extent_record * qrecord,u64 bytenr,u64 num_bytes,u64 ref_root,u64 reserved,int action,int is_data)531 add_delayed_ref_head(struct btrfs_fs_info *fs_info,
532 		     struct btrfs_trans_handle *trans,
533 		     struct btrfs_delayed_ref_node *ref,
534 		     struct btrfs_qgroup_extent_record *qrecord,
535 		     u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
536 		     int action, int is_data)
537 {
538 	struct btrfs_delayed_ref_head *existing;
539 	struct btrfs_delayed_ref_head *head_ref = NULL;
540 	struct btrfs_delayed_ref_root *delayed_refs;
541 	struct btrfs_qgroup_extent_record *qexisting;
542 	int count_mod = 1;
543 	int must_insert_reserved = 0;
544 
545 	/* If reserved is provided, it must be a data extent. */
546 	BUG_ON(!is_data && reserved);
547 
548 	/*
549 	 * the head node stores the sum of all the mods, so dropping a ref
550 	 * should drop the sum in the head node by one.
551 	 */
552 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
553 		count_mod = 0;
554 	else if (action == BTRFS_DROP_DELAYED_REF)
555 		count_mod = -1;
556 
557 	/*
558 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
559 	 * the reserved accounting when the extent is finally added, or
560 	 * if a later modification deletes the delayed ref without ever
561 	 * inserting the extent into the extent allocation tree.
562 	 * ref->must_insert_reserved is the flag used to record
563 	 * that accounting mods are required.
564 	 *
565 	 * Once we record must_insert_reserved, switch the action to
566 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
567 	 */
568 	if (action == BTRFS_ADD_DELAYED_EXTENT)
569 		must_insert_reserved = 1;
570 	else
571 		must_insert_reserved = 0;
572 
573 	delayed_refs = &trans->transaction->delayed_refs;
574 
575 	/* first set the basic ref node struct up */
576 	atomic_set(&ref->refs, 1);
577 	ref->bytenr = bytenr;
578 	ref->num_bytes = num_bytes;
579 	ref->ref_mod = count_mod;
580 	ref->type  = 0;
581 	ref->action  = 0;
582 	ref->is_head = 1;
583 	ref->in_tree = 1;
584 	ref->seq = 0;
585 
586 	head_ref = btrfs_delayed_node_to_head(ref);
587 	head_ref->must_insert_reserved = must_insert_reserved;
588 	head_ref->is_data = is_data;
589 	INIT_LIST_HEAD(&head_ref->ref_list);
590 	head_ref->processing = 0;
591 	head_ref->total_ref_mod = count_mod;
592 	head_ref->qgroup_reserved = 0;
593 	head_ref->qgroup_ref_root = 0;
594 
595 	/* Record qgroup extent info if provided */
596 	if (qrecord) {
597 		if (ref_root && reserved) {
598 			head_ref->qgroup_ref_root = ref_root;
599 			head_ref->qgroup_reserved = reserved;
600 		}
601 
602 		qrecord->bytenr = bytenr;
603 		qrecord->num_bytes = num_bytes;
604 		qrecord->old_roots = NULL;
605 
606 		qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
607 							     qrecord);
608 		if (qexisting)
609 			kfree(qrecord);
610 	}
611 
612 	spin_lock_init(&head_ref->lock);
613 	mutex_init(&head_ref->mutex);
614 
615 	trace_add_delayed_ref_head(ref, head_ref, action);
616 
617 	existing = htree_insert(&delayed_refs->href_root,
618 				&head_ref->href_node);
619 	if (existing) {
620 		WARN_ON(ref_root && reserved && existing->qgroup_ref_root
621 			&& existing->qgroup_reserved);
622 		update_existing_head_ref(delayed_refs, &existing->node, ref);
623 		/*
624 		 * we've updated the existing ref, free the newly
625 		 * allocated ref
626 		 */
627 		kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
628 		head_ref = existing;
629 	} else {
630 		if (is_data && count_mod < 0)
631 			delayed_refs->pending_csums += num_bytes;
632 		delayed_refs->num_heads++;
633 		delayed_refs->num_heads_ready++;
634 		atomic_inc(&delayed_refs->num_entries);
635 		trans->delayed_ref_updates++;
636 	}
637 	return head_ref;
638 }
639 
640 /*
641  * helper to insert a delayed tree ref into the rbtree.
642  */
643 static noinline void
add_delayed_tree_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action)644 add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
645 		     struct btrfs_trans_handle *trans,
646 		     struct btrfs_delayed_ref_head *head_ref,
647 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
648 		     u64 num_bytes, u64 parent, u64 ref_root, int level,
649 		     int action)
650 {
651 	struct btrfs_delayed_tree_ref *full_ref;
652 	struct btrfs_delayed_ref_root *delayed_refs;
653 	u64 seq = 0;
654 	int ret;
655 
656 	if (action == BTRFS_ADD_DELAYED_EXTENT)
657 		action = BTRFS_ADD_DELAYED_REF;
658 
659 	if (is_fstree(ref_root))
660 		seq = atomic64_read(&fs_info->tree_mod_seq);
661 	delayed_refs = &trans->transaction->delayed_refs;
662 
663 	/* first set the basic ref node struct up */
664 	atomic_set(&ref->refs, 1);
665 	ref->bytenr = bytenr;
666 	ref->num_bytes = num_bytes;
667 	ref->ref_mod = 1;
668 	ref->action = action;
669 	ref->is_head = 0;
670 	ref->in_tree = 1;
671 	ref->seq = seq;
672 
673 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
674 	full_ref->parent = parent;
675 	full_ref->root = ref_root;
676 	if (parent)
677 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
678 	else
679 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
680 	full_ref->level = level;
681 
682 	trace_add_delayed_tree_ref(ref, full_ref, action);
683 
684 	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
685 
686 	/*
687 	 * XXX: memory should be freed at the same level allocated.
688 	 * But bad practice is anywhere... Follow it now. Need cleanup.
689 	 */
690 	if (ret > 0)
691 		kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
692 }
693 
694 /*
695  * helper to insert a delayed data ref into the rbtree.
696  */
697 static noinline void
add_delayed_data_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head_ref,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,int action)698 add_delayed_data_ref(struct btrfs_fs_info *fs_info,
699 		     struct btrfs_trans_handle *trans,
700 		     struct btrfs_delayed_ref_head *head_ref,
701 		     struct btrfs_delayed_ref_node *ref, u64 bytenr,
702 		     u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
703 		     u64 offset, int action)
704 {
705 	struct btrfs_delayed_data_ref *full_ref;
706 	struct btrfs_delayed_ref_root *delayed_refs;
707 	u64 seq = 0;
708 	int ret;
709 
710 	if (action == BTRFS_ADD_DELAYED_EXTENT)
711 		action = BTRFS_ADD_DELAYED_REF;
712 
713 	delayed_refs = &trans->transaction->delayed_refs;
714 
715 	if (is_fstree(ref_root))
716 		seq = atomic64_read(&fs_info->tree_mod_seq);
717 
718 	/* first set the basic ref node struct up */
719 	atomic_set(&ref->refs, 1);
720 	ref->bytenr = bytenr;
721 	ref->num_bytes = num_bytes;
722 	ref->ref_mod = 1;
723 	ref->action = action;
724 	ref->is_head = 0;
725 	ref->in_tree = 1;
726 	ref->seq = seq;
727 
728 	full_ref = btrfs_delayed_node_to_data_ref(ref);
729 	full_ref->parent = parent;
730 	full_ref->root = ref_root;
731 	if (parent)
732 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
733 	else
734 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
735 
736 	full_ref->objectid = owner;
737 	full_ref->offset = offset;
738 
739 	trace_add_delayed_data_ref(ref, full_ref, action);
740 
741 	ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
742 
743 	if (ret > 0)
744 		kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
745 }
746 
747 /*
748  * add a delayed tree ref.  This does all of the accounting required
749  * to make sure the delayed ref is eventually processed before this
750  * transaction commits.
751  */
btrfs_add_delayed_tree_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action,struct btrfs_delayed_extent_op * extent_op)752 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
753 			       struct btrfs_trans_handle *trans,
754 			       u64 bytenr, u64 num_bytes, u64 parent,
755 			       u64 ref_root,  int level, int action,
756 			       struct btrfs_delayed_extent_op *extent_op)
757 {
758 	struct btrfs_delayed_tree_ref *ref;
759 	struct btrfs_delayed_ref_head *head_ref;
760 	struct btrfs_delayed_ref_root *delayed_refs;
761 	struct btrfs_qgroup_extent_record *record = NULL;
762 
763 	BUG_ON(extent_op && extent_op->is_data);
764 	ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
765 	if (!ref)
766 		return -ENOMEM;
767 
768 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
769 	if (!head_ref)
770 		goto free_ref;
771 
772 	if (fs_info->quota_enabled && is_fstree(ref_root)) {
773 		record = kmalloc(sizeof(*record), GFP_NOFS);
774 		if (!record)
775 			goto free_head_ref;
776 	}
777 
778 	head_ref->extent_op = extent_op;
779 
780 	delayed_refs = &trans->transaction->delayed_refs;
781 	spin_lock(&delayed_refs->lock);
782 
783 	/*
784 	 * insert both the head node and the new ref without dropping
785 	 * the spin lock
786 	 */
787 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
788 					bytenr, num_bytes, 0, 0, action, 0);
789 
790 	add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
791 			     num_bytes, parent, ref_root, level, action);
792 	spin_unlock(&delayed_refs->lock);
793 
794 	return 0;
795 
796 free_head_ref:
797 	kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
798 free_ref:
799 	kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
800 
801 	return -ENOMEM;
802 }
803 
804 /*
805  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
806  */
btrfs_add_delayed_data_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,u64 reserved,int action,struct btrfs_delayed_extent_op * extent_op)807 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
808 			       struct btrfs_trans_handle *trans,
809 			       u64 bytenr, u64 num_bytes,
810 			       u64 parent, u64 ref_root,
811 			       u64 owner, u64 offset, u64 reserved, int action,
812 			       struct btrfs_delayed_extent_op *extent_op)
813 {
814 	struct btrfs_delayed_data_ref *ref;
815 	struct btrfs_delayed_ref_head *head_ref;
816 	struct btrfs_delayed_ref_root *delayed_refs;
817 	struct btrfs_qgroup_extent_record *record = NULL;
818 
819 	BUG_ON(extent_op && !extent_op->is_data);
820 	ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
821 	if (!ref)
822 		return -ENOMEM;
823 
824 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
825 	if (!head_ref) {
826 		kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
827 		return -ENOMEM;
828 	}
829 
830 	if (fs_info->quota_enabled && is_fstree(ref_root)) {
831 		record = kmalloc(sizeof(*record), GFP_NOFS);
832 		if (!record) {
833 			kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
834 			kmem_cache_free(btrfs_delayed_ref_head_cachep,
835 					head_ref);
836 			return -ENOMEM;
837 		}
838 	}
839 
840 	head_ref->extent_op = extent_op;
841 
842 	delayed_refs = &trans->transaction->delayed_refs;
843 	spin_lock(&delayed_refs->lock);
844 
845 	/*
846 	 * insert both the head node and the new ref without dropping
847 	 * the spin lock
848 	 */
849 	head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node, record,
850 					bytenr, num_bytes, ref_root, reserved,
851 					action, 1);
852 
853 	add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
854 				   num_bytes, parent, ref_root, owner, offset,
855 				   action);
856 	spin_unlock(&delayed_refs->lock);
857 
858 	return 0;
859 }
860 
btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 ref_root,u64 bytenr,u64 num_bytes)861 int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
862 				     struct btrfs_trans_handle *trans,
863 				     u64 ref_root, u64 bytenr, u64 num_bytes)
864 {
865 	struct btrfs_delayed_ref_root *delayed_refs;
866 	struct btrfs_delayed_ref_head *ref_head;
867 	int ret = 0;
868 
869 	if (!fs_info->quota_enabled || !is_fstree(ref_root))
870 		return 0;
871 
872 	delayed_refs = &trans->transaction->delayed_refs;
873 
874 	spin_lock(&delayed_refs->lock);
875 	ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
876 	if (!ref_head) {
877 		ret = -ENOENT;
878 		goto out;
879 	}
880 	WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
881 	ref_head->qgroup_ref_root = ref_root;
882 	ref_head->qgroup_reserved = num_bytes;
883 out:
884 	spin_unlock(&delayed_refs->lock);
885 	return ret;
886 }
887 
btrfs_add_delayed_extent_op(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct btrfs_delayed_extent_op * extent_op)888 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
889 				struct btrfs_trans_handle *trans,
890 				u64 bytenr, u64 num_bytes,
891 				struct btrfs_delayed_extent_op *extent_op)
892 {
893 	struct btrfs_delayed_ref_head *head_ref;
894 	struct btrfs_delayed_ref_root *delayed_refs;
895 
896 	head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
897 	if (!head_ref)
898 		return -ENOMEM;
899 
900 	head_ref->extent_op = extent_op;
901 
902 	delayed_refs = &trans->transaction->delayed_refs;
903 	spin_lock(&delayed_refs->lock);
904 
905 	add_delayed_ref_head(fs_info, trans, &head_ref->node, NULL, bytenr,
906 			     num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
907 			     extent_op->is_data);
908 
909 	spin_unlock(&delayed_refs->lock);
910 	return 0;
911 }
912 
913 /*
914  * this does a simple search for the head node for a given extent.
915  * It must be called with the delayed ref spinlock held, and it returns
916  * the head node if any where found, or NULL if not.
917  */
918 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle * trans,u64 bytenr)919 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
920 {
921 	struct btrfs_delayed_ref_root *delayed_refs;
922 
923 	delayed_refs = &trans->transaction->delayed_refs;
924 	return find_ref_head(&delayed_refs->href_root, bytenr, 0);
925 }
926 
btrfs_delayed_ref_exit(void)927 void btrfs_delayed_ref_exit(void)
928 {
929 	if (btrfs_delayed_ref_head_cachep)
930 		kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
931 	if (btrfs_delayed_tree_ref_cachep)
932 		kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
933 	if (btrfs_delayed_data_ref_cachep)
934 		kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
935 	if (btrfs_delayed_extent_op_cachep)
936 		kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
937 }
938 
btrfs_delayed_ref_init(void)939 int btrfs_delayed_ref_init(void)
940 {
941 	btrfs_delayed_ref_head_cachep = kmem_cache_create(
942 				"btrfs_delayed_ref_head",
943 				sizeof(struct btrfs_delayed_ref_head), 0,
944 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
945 	if (!btrfs_delayed_ref_head_cachep)
946 		goto fail;
947 
948 	btrfs_delayed_tree_ref_cachep = kmem_cache_create(
949 				"btrfs_delayed_tree_ref",
950 				sizeof(struct btrfs_delayed_tree_ref), 0,
951 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
952 	if (!btrfs_delayed_tree_ref_cachep)
953 		goto fail;
954 
955 	btrfs_delayed_data_ref_cachep = kmem_cache_create(
956 				"btrfs_delayed_data_ref",
957 				sizeof(struct btrfs_delayed_data_ref), 0,
958 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
959 	if (!btrfs_delayed_data_ref_cachep)
960 		goto fail;
961 
962 	btrfs_delayed_extent_op_cachep = kmem_cache_create(
963 				"btrfs_delayed_extent_op",
964 				sizeof(struct btrfs_delayed_extent_op), 0,
965 				SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
966 	if (!btrfs_delayed_extent_op_cachep)
967 		goto fail;
968 
969 	return 0;
970 fail:
971 	btrfs_delayed_ref_exit();
972 	return -ENOMEM;
973 }
974