• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2009 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
22 #include "ctree.h"
23 #include "delayed-ref.h"
24 #include "transaction.h"
25 
26 /*
27  * delayed back reference update tracking.  For subvolume trees
28  * we queue up extent allocations and backref maintenance for
29  * delayed processing.   This avoids deep call chains where we
30  * add extents in the middle of btrfs_search_slot, and it allows
31  * us to buffer up frequently modified backrefs in an rb tree instead
32  * of hammering updates on the extent allocation tree.
33  */
34 
35 /*
36  * compare two delayed tree backrefs with same bytenr and type
37  */
comp_tree_refs(struct btrfs_delayed_tree_ref * ref2,struct btrfs_delayed_tree_ref * ref1)38 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 			  struct btrfs_delayed_tree_ref *ref1)
40 {
41 	if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 		if (ref1->root < ref2->root)
43 			return -1;
44 		if (ref1->root > ref2->root)
45 			return 1;
46 	} else {
47 		if (ref1->parent < ref2->parent)
48 			return -1;
49 		if (ref1->parent > ref2->parent)
50 			return 1;
51 	}
52 	return 0;
53 }
54 
55 /*
56  * compare two delayed data backrefs with same bytenr and type
57  */
comp_data_refs(struct btrfs_delayed_data_ref * ref2,struct btrfs_delayed_data_ref * ref1)58 static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 			  struct btrfs_delayed_data_ref *ref1)
60 {
61 	if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 		if (ref1->root < ref2->root)
63 			return -1;
64 		if (ref1->root > ref2->root)
65 			return 1;
66 		if (ref1->objectid < ref2->objectid)
67 			return -1;
68 		if (ref1->objectid > ref2->objectid)
69 			return 1;
70 		if (ref1->offset < ref2->offset)
71 			return -1;
72 		if (ref1->offset > ref2->offset)
73 			return 1;
74 	} else {
75 		if (ref1->parent < ref2->parent)
76 			return -1;
77 		if (ref1->parent > ref2->parent)
78 			return 1;
79 	}
80 	return 0;
81 }
82 
83 /*
84  * entries in the rb tree are ordered by the byte number of the extent,
85  * type of the delayed backrefs and content of delayed backrefs.
86  */
comp_entry(struct btrfs_delayed_ref_node * ref2,struct btrfs_delayed_ref_node * ref1)87 static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 		      struct btrfs_delayed_ref_node *ref1)
89 {
90 	if (ref1->bytenr < ref2->bytenr)
91 		return -1;
92 	if (ref1->bytenr > ref2->bytenr)
93 		return 1;
94 	if (ref1->is_head && ref2->is_head)
95 		return 0;
96 	if (ref2->is_head)
97 		return -1;
98 	if (ref1->is_head)
99 		return 1;
100 	if (ref1->type < ref2->type)
101 		return -1;
102 	if (ref1->type > ref2->type)
103 		return 1;
104 	/* merging of sequenced refs is not allowed */
105 	if (ref1->seq < ref2->seq)
106 		return -1;
107 	if (ref1->seq > ref2->seq)
108 		return 1;
109 	if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 	    ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 		return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 				      btrfs_delayed_node_to_tree_ref(ref1));
113 	} else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 		   ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 		return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 				      btrfs_delayed_node_to_data_ref(ref1));
117 	}
118 	BUG();
119 	return 0;
120 }
121 
122 /*
123  * insert a new ref into the rbtree.  This returns any existing refs
124  * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125  * inserted.
126  */
tree_insert(struct rb_root * root,struct rb_node * node)127 static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 						  struct rb_node *node)
129 {
130 	struct rb_node **p = &root->rb_node;
131 	struct rb_node *parent_node = NULL;
132 	struct btrfs_delayed_ref_node *entry;
133 	struct btrfs_delayed_ref_node *ins;
134 	int cmp;
135 
136 	ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
137 	while (*p) {
138 		parent_node = *p;
139 		entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 				 rb_node);
141 
142 		cmp = comp_entry(entry, ins);
143 		if (cmp < 0)
144 			p = &(*p)->rb_left;
145 		else if (cmp > 0)
146 			p = &(*p)->rb_right;
147 		else
148 			return entry;
149 	}
150 
151 	rb_link_node(node, parent_node, p);
152 	rb_insert_color(node, root);
153 	return NULL;
154 }
155 
156 /*
157  * find an head entry based on bytenr. This returns the delayed ref
158  * head if it was able to find one, or NULL if nothing was in that spot.
159  * If return_bigger is given, the next bigger entry is returned if no exact
160  * match is found.
161  */
find_ref_head(struct rb_root * root,u64 bytenr,struct btrfs_delayed_ref_node ** last,int return_bigger)162 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163 				  u64 bytenr,
164 				  struct btrfs_delayed_ref_node **last,
165 				  int return_bigger)
166 {
167 	struct rb_node *n;
168 	struct btrfs_delayed_ref_node *entry;
169 	int cmp = 0;
170 
171 again:
172 	n = root->rb_node;
173 	entry = NULL;
174 	while (n) {
175 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 		WARN_ON(!entry->in_tree);
177 		if (last)
178 			*last = entry;
179 
180 		if (bytenr < entry->bytenr)
181 			cmp = -1;
182 		else if (bytenr > entry->bytenr)
183 			cmp = 1;
184 		else if (!btrfs_delayed_ref_is_head(entry))
185 			cmp = 1;
186 		else
187 			cmp = 0;
188 
189 		if (cmp < 0)
190 			n = n->rb_left;
191 		else if (cmp > 0)
192 			n = n->rb_right;
193 		else
194 			return entry;
195 	}
196 	if (entry && return_bigger) {
197 		if (cmp > 0) {
198 			n = rb_next(&entry->rb_node);
199 			if (!n)
200 				n = rb_first(root);
201 			entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 					 rb_node);
203 			bytenr = entry->bytenr;
204 			return_bigger = 0;
205 			goto again;
206 		}
207 		return entry;
208 	}
209 	return NULL;
210 }
211 
btrfs_delayed_ref_lock(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_head * head)212 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 			   struct btrfs_delayed_ref_head *head)
214 {
215 	struct btrfs_delayed_ref_root *delayed_refs;
216 
217 	delayed_refs = &trans->transaction->delayed_refs;
218 	assert_spin_locked(&delayed_refs->lock);
219 	if (mutex_trylock(&head->mutex))
220 		return 0;
221 
222 	atomic_inc(&head->node.refs);
223 	spin_unlock(&delayed_refs->lock);
224 
225 	mutex_lock(&head->mutex);
226 	spin_lock(&delayed_refs->lock);
227 	if (!head->node.in_tree) {
228 		mutex_unlock(&head->mutex);
229 		btrfs_put_delayed_ref(&head->node);
230 		return -EAGAIN;
231 	}
232 	btrfs_put_delayed_ref(&head->node);
233 	return 0;
234 }
235 
btrfs_check_delayed_seq(struct btrfs_delayed_ref_root * delayed_refs,u64 seq)236 int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
237 			    u64 seq)
238 {
239 	struct seq_list *elem;
240 
241 	assert_spin_locked(&delayed_refs->lock);
242 	if (list_empty(&delayed_refs->seq_head))
243 		return 0;
244 
245 	elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
246 	if (seq >= elem->seq) {
247 		pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
248 			 seq, elem->seq, delayed_refs);
249 		return 1;
250 	}
251 	return 0;
252 }
253 
btrfs_find_ref_cluster(struct btrfs_trans_handle * trans,struct list_head * cluster,u64 start)254 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
255 			   struct list_head *cluster, u64 start)
256 {
257 	int count = 0;
258 	struct btrfs_delayed_ref_root *delayed_refs;
259 	struct rb_node *node;
260 	struct btrfs_delayed_ref_node *ref;
261 	struct btrfs_delayed_ref_head *head;
262 
263 	delayed_refs = &trans->transaction->delayed_refs;
264 	if (start == 0) {
265 		node = rb_first(&delayed_refs->root);
266 	} else {
267 		ref = NULL;
268 		find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
269 		if (ref) {
270 			node = &ref->rb_node;
271 		} else
272 			node = rb_first(&delayed_refs->root);
273 	}
274 again:
275 	while (node && count < 32) {
276 		ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
277 		if (btrfs_delayed_ref_is_head(ref)) {
278 			head = btrfs_delayed_node_to_head(ref);
279 			if (list_empty(&head->cluster)) {
280 				list_add_tail(&head->cluster, cluster);
281 				delayed_refs->run_delayed_start =
282 					head->node.bytenr;
283 				count++;
284 
285 				WARN_ON(delayed_refs->num_heads_ready == 0);
286 				delayed_refs->num_heads_ready--;
287 			} else if (count) {
288 				/* the goal of the clustering is to find extents
289 				 * that are likely to end up in the same extent
290 				 * leaf on disk.  So, we don't want them spread
291 				 * all over the tree.  Stop now if we've hit
292 				 * a head that was already in use
293 				 */
294 				break;
295 			}
296 		}
297 		node = rb_next(node);
298 	}
299 	if (count) {
300 		return 0;
301 	} else if (start) {
302 		/*
303 		 * we've gone to the end of the rbtree without finding any
304 		 * clusters.  start from the beginning and try again
305 		 */
306 		start = 0;
307 		node = rb_first(&delayed_refs->root);
308 		goto again;
309 	}
310 	return 1;
311 }
312 
313 /*
314  * helper function to update an extent delayed ref in the
315  * rbtree.  existing and update must both have the same
316  * bytenr and parent
317  *
318  * This may free existing if the update cancels out whatever
319  * operation it was doing.
320  */
321 static noinline void
update_existing_ref(struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_root * delayed_refs,struct btrfs_delayed_ref_node * existing,struct btrfs_delayed_ref_node * update)322 update_existing_ref(struct btrfs_trans_handle *trans,
323 		    struct btrfs_delayed_ref_root *delayed_refs,
324 		    struct btrfs_delayed_ref_node *existing,
325 		    struct btrfs_delayed_ref_node *update)
326 {
327 	if (update->action != existing->action) {
328 		/*
329 		 * this is effectively undoing either an add or a
330 		 * drop.  We decrement the ref_mod, and if it goes
331 		 * down to zero we just delete the entry without
332 		 * every changing the extent allocation tree.
333 		 */
334 		existing->ref_mod--;
335 		if (existing->ref_mod == 0) {
336 			rb_erase(&existing->rb_node,
337 				 &delayed_refs->root);
338 			existing->in_tree = 0;
339 			btrfs_put_delayed_ref(existing);
340 			delayed_refs->num_entries--;
341 			if (trans->delayed_ref_updates)
342 				trans->delayed_ref_updates--;
343 		} else {
344 			WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
345 				existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
346 		}
347 	} else {
348 		WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 			existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350 		/*
351 		 * the action on the existing ref matches
352 		 * the action on the ref we're trying to add.
353 		 * Bump the ref_mod by one so the backref that
354 		 * is eventually added/removed has the correct
355 		 * reference count
356 		 */
357 		existing->ref_mod += update->ref_mod;
358 	}
359 }
360 
361 /*
362  * helper function to update the accounting in the head ref
363  * existing and update must have the same bytenr
364  */
365 static noinline void
update_existing_head_ref(struct btrfs_delayed_ref_node * existing,struct btrfs_delayed_ref_node * update)366 update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
367 			 struct btrfs_delayed_ref_node *update)
368 {
369 	struct btrfs_delayed_ref_head *existing_ref;
370 	struct btrfs_delayed_ref_head *ref;
371 
372 	existing_ref = btrfs_delayed_node_to_head(existing);
373 	ref = btrfs_delayed_node_to_head(update);
374 	BUG_ON(existing_ref->is_data != ref->is_data);
375 
376 	if (ref->must_insert_reserved) {
377 		/* if the extent was freed and then
378 		 * reallocated before the delayed ref
379 		 * entries were processed, we can end up
380 		 * with an existing head ref without
381 		 * the must_insert_reserved flag set.
382 		 * Set it again here
383 		 */
384 		existing_ref->must_insert_reserved = ref->must_insert_reserved;
385 
386 		/*
387 		 * update the num_bytes so we make sure the accounting
388 		 * is done correctly
389 		 */
390 		existing->num_bytes = update->num_bytes;
391 
392 	}
393 
394 	if (ref->extent_op) {
395 		if (!existing_ref->extent_op) {
396 			existing_ref->extent_op = ref->extent_op;
397 		} else {
398 			if (ref->extent_op->update_key) {
399 				memcpy(&existing_ref->extent_op->key,
400 				       &ref->extent_op->key,
401 				       sizeof(ref->extent_op->key));
402 				existing_ref->extent_op->update_key = 1;
403 			}
404 			if (ref->extent_op->update_flags) {
405 				existing_ref->extent_op->flags_to_set |=
406 					ref->extent_op->flags_to_set;
407 				existing_ref->extent_op->update_flags = 1;
408 			}
409 			kfree(ref->extent_op);
410 		}
411 	}
412 	/*
413 	 * update the reference mod on the head to reflect this new operation
414 	 */
415 	existing->ref_mod += update->ref_mod;
416 }
417 
418 /*
419  * helper function to actually insert a head node into the rbtree.
420  * this does all the dirty work in terms of maintaining the correct
421  * overall modification count.
422  */
add_delayed_ref_head(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,int action,int is_data)423 static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 					struct btrfs_trans_handle *trans,
425 					struct btrfs_delayed_ref_node *ref,
426 					u64 bytenr, u64 num_bytes,
427 					int action, int is_data)
428 {
429 	struct btrfs_delayed_ref_node *existing;
430 	struct btrfs_delayed_ref_head *head_ref = NULL;
431 	struct btrfs_delayed_ref_root *delayed_refs;
432 	int count_mod = 1;
433 	int must_insert_reserved = 0;
434 
435 	/*
436 	 * the head node stores the sum of all the mods, so dropping a ref
437 	 * should drop the sum in the head node by one.
438 	 */
439 	if (action == BTRFS_UPDATE_DELAYED_HEAD)
440 		count_mod = 0;
441 	else if (action == BTRFS_DROP_DELAYED_REF)
442 		count_mod = -1;
443 
444 	/*
445 	 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446 	 * the reserved accounting when the extent is finally added, or
447 	 * if a later modification deletes the delayed ref without ever
448 	 * inserting the extent into the extent allocation tree.
449 	 * ref->must_insert_reserved is the flag used to record
450 	 * that accounting mods are required.
451 	 *
452 	 * Once we record must_insert_reserved, switch the action to
453 	 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
454 	 */
455 	if (action == BTRFS_ADD_DELAYED_EXTENT)
456 		must_insert_reserved = 1;
457 	else
458 		must_insert_reserved = 0;
459 
460 	delayed_refs = &trans->transaction->delayed_refs;
461 
462 	/* first set the basic ref node struct up */
463 	atomic_set(&ref->refs, 1);
464 	ref->bytenr = bytenr;
465 	ref->num_bytes = num_bytes;
466 	ref->ref_mod = count_mod;
467 	ref->type  = 0;
468 	ref->action  = 0;
469 	ref->is_head = 1;
470 	ref->in_tree = 1;
471 	ref->seq = 0;
472 
473 	head_ref = btrfs_delayed_node_to_head(ref);
474 	head_ref->must_insert_reserved = must_insert_reserved;
475 	head_ref->is_data = is_data;
476 
477 	INIT_LIST_HEAD(&head_ref->cluster);
478 	mutex_init(&head_ref->mutex);
479 
480 	trace_btrfs_delayed_ref_head(ref, head_ref, action);
481 
482 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
483 
484 	if (existing) {
485 		update_existing_head_ref(existing, ref);
486 		/*
487 		 * we've updated the existing ref, free the newly
488 		 * allocated ref
489 		 */
490 		kfree(head_ref);
491 	} else {
492 		delayed_refs->num_heads++;
493 		delayed_refs->num_heads_ready++;
494 		delayed_refs->num_entries++;
495 		trans->delayed_ref_updates++;
496 	}
497 }
498 
499 /*
500  * helper to insert a delayed tree ref into the rbtree.
501  */
add_delayed_tree_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action,int for_cow)502 static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
503 					 struct btrfs_trans_handle *trans,
504 					 struct btrfs_delayed_ref_node *ref,
505 					 u64 bytenr, u64 num_bytes, u64 parent,
506 					 u64 ref_root, int level, int action,
507 					 int for_cow)
508 {
509 	struct btrfs_delayed_ref_node *existing;
510 	struct btrfs_delayed_tree_ref *full_ref;
511 	struct btrfs_delayed_ref_root *delayed_refs;
512 	u64 seq = 0;
513 
514 	if (action == BTRFS_ADD_DELAYED_EXTENT)
515 		action = BTRFS_ADD_DELAYED_REF;
516 
517 	delayed_refs = &trans->transaction->delayed_refs;
518 
519 	/* first set the basic ref node struct up */
520 	atomic_set(&ref->refs, 1);
521 	ref->bytenr = bytenr;
522 	ref->num_bytes = num_bytes;
523 	ref->ref_mod = 1;
524 	ref->action = action;
525 	ref->is_head = 0;
526 	ref->in_tree = 1;
527 
528 	if (need_ref_seq(for_cow, ref_root))
529 		seq = inc_delayed_seq(delayed_refs);
530 	ref->seq = seq;
531 
532 	full_ref = btrfs_delayed_node_to_tree_ref(ref);
533 	full_ref->parent = parent;
534 	full_ref->root = ref_root;
535 	if (parent)
536 		ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
537 	else
538 		ref->type = BTRFS_TREE_BLOCK_REF_KEY;
539 	full_ref->level = level;
540 
541 	trace_btrfs_delayed_tree_ref(ref, full_ref, action);
542 
543 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
544 
545 	if (existing) {
546 		update_existing_ref(trans, delayed_refs, existing, ref);
547 		/*
548 		 * we've updated the existing ref, free the newly
549 		 * allocated ref
550 		 */
551 		kfree(full_ref);
552 	} else {
553 		delayed_refs->num_entries++;
554 		trans->delayed_ref_updates++;
555 	}
556 }
557 
558 /*
559  * helper to insert a delayed data ref into the rbtree.
560  */
add_delayed_data_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,struct btrfs_delayed_ref_node * ref,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,int action,int for_cow)561 static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
562 					 struct btrfs_trans_handle *trans,
563 					 struct btrfs_delayed_ref_node *ref,
564 					 u64 bytenr, u64 num_bytes, u64 parent,
565 					 u64 ref_root, u64 owner, u64 offset,
566 					 int action, int for_cow)
567 {
568 	struct btrfs_delayed_ref_node *existing;
569 	struct btrfs_delayed_data_ref *full_ref;
570 	struct btrfs_delayed_ref_root *delayed_refs;
571 	u64 seq = 0;
572 
573 	if (action == BTRFS_ADD_DELAYED_EXTENT)
574 		action = BTRFS_ADD_DELAYED_REF;
575 
576 	delayed_refs = &trans->transaction->delayed_refs;
577 
578 	/* first set the basic ref node struct up */
579 	atomic_set(&ref->refs, 1);
580 	ref->bytenr = bytenr;
581 	ref->num_bytes = num_bytes;
582 	ref->ref_mod = 1;
583 	ref->action = action;
584 	ref->is_head = 0;
585 	ref->in_tree = 1;
586 
587 	if (need_ref_seq(for_cow, ref_root))
588 		seq = inc_delayed_seq(delayed_refs);
589 	ref->seq = seq;
590 
591 	full_ref = btrfs_delayed_node_to_data_ref(ref);
592 	full_ref->parent = parent;
593 	full_ref->root = ref_root;
594 	if (parent)
595 		ref->type = BTRFS_SHARED_DATA_REF_KEY;
596 	else
597 		ref->type = BTRFS_EXTENT_DATA_REF_KEY;
598 
599 	full_ref->objectid = owner;
600 	full_ref->offset = offset;
601 
602 	trace_btrfs_delayed_data_ref(ref, full_ref, action);
603 
604 	existing = tree_insert(&delayed_refs->root, &ref->rb_node);
605 
606 	if (existing) {
607 		update_existing_ref(trans, delayed_refs, existing, ref);
608 		/*
609 		 * we've updated the existing ref, free the newly
610 		 * allocated ref
611 		 */
612 		kfree(full_ref);
613 	} else {
614 		delayed_refs->num_entries++;
615 		trans->delayed_ref_updates++;
616 	}
617 }
618 
619 /*
620  * add a delayed tree ref.  This does all of the accounting required
621  * to make sure the delayed ref is eventually processed before this
622  * transaction commits.
623  */
btrfs_add_delayed_tree_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,int level,int action,struct btrfs_delayed_extent_op * extent_op,int for_cow)624 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
625 			       struct btrfs_trans_handle *trans,
626 			       u64 bytenr, u64 num_bytes, u64 parent,
627 			       u64 ref_root,  int level, int action,
628 			       struct btrfs_delayed_extent_op *extent_op,
629 			       int for_cow)
630 {
631 	struct btrfs_delayed_tree_ref *ref;
632 	struct btrfs_delayed_ref_head *head_ref;
633 	struct btrfs_delayed_ref_root *delayed_refs;
634 
635 	BUG_ON(extent_op && extent_op->is_data);
636 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
637 	if (!ref)
638 		return -ENOMEM;
639 
640 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
641 	if (!head_ref) {
642 		kfree(ref);
643 		return -ENOMEM;
644 	}
645 
646 	head_ref->extent_op = extent_op;
647 
648 	delayed_refs = &trans->transaction->delayed_refs;
649 	spin_lock(&delayed_refs->lock);
650 
651 	/*
652 	 * insert both the head node and the new ref without dropping
653 	 * the spin lock
654 	 */
655 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
656 				   num_bytes, action, 0);
657 
658 	add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
659 				   num_bytes, parent, ref_root, level, action,
660 				   for_cow);
661 	if (!need_ref_seq(for_cow, ref_root) &&
662 	    waitqueue_active(&delayed_refs->seq_wait))
663 		wake_up(&delayed_refs->seq_wait);
664 	spin_unlock(&delayed_refs->lock);
665 	return 0;
666 }
667 
668 /*
669  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
670  */
btrfs_add_delayed_data_ref(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,u64 parent,u64 ref_root,u64 owner,u64 offset,int action,struct btrfs_delayed_extent_op * extent_op,int for_cow)671 int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
672 			       struct btrfs_trans_handle *trans,
673 			       u64 bytenr, u64 num_bytes,
674 			       u64 parent, u64 ref_root,
675 			       u64 owner, u64 offset, int action,
676 			       struct btrfs_delayed_extent_op *extent_op,
677 			       int for_cow)
678 {
679 	struct btrfs_delayed_data_ref *ref;
680 	struct btrfs_delayed_ref_head *head_ref;
681 	struct btrfs_delayed_ref_root *delayed_refs;
682 
683 	BUG_ON(extent_op && !extent_op->is_data);
684 	ref = kmalloc(sizeof(*ref), GFP_NOFS);
685 	if (!ref)
686 		return -ENOMEM;
687 
688 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
689 	if (!head_ref) {
690 		kfree(ref);
691 		return -ENOMEM;
692 	}
693 
694 	head_ref->extent_op = extent_op;
695 
696 	delayed_refs = &trans->transaction->delayed_refs;
697 	spin_lock(&delayed_refs->lock);
698 
699 	/*
700 	 * insert both the head node and the new ref without dropping
701 	 * the spin lock
702 	 */
703 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
704 				   num_bytes, action, 1);
705 
706 	add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
707 				   num_bytes, parent, ref_root, owner, offset,
708 				   action, for_cow);
709 	if (!need_ref_seq(for_cow, ref_root) &&
710 	    waitqueue_active(&delayed_refs->seq_wait))
711 		wake_up(&delayed_refs->seq_wait);
712 	spin_unlock(&delayed_refs->lock);
713 	return 0;
714 }
715 
btrfs_add_delayed_extent_op(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,struct btrfs_delayed_extent_op * extent_op)716 int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
717 				struct btrfs_trans_handle *trans,
718 				u64 bytenr, u64 num_bytes,
719 				struct btrfs_delayed_extent_op *extent_op)
720 {
721 	struct btrfs_delayed_ref_head *head_ref;
722 	struct btrfs_delayed_ref_root *delayed_refs;
723 
724 	head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
725 	if (!head_ref)
726 		return -ENOMEM;
727 
728 	head_ref->extent_op = extent_op;
729 
730 	delayed_refs = &trans->transaction->delayed_refs;
731 	spin_lock(&delayed_refs->lock);
732 
733 	add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
734 				   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
735 				   extent_op->is_data);
736 
737 	if (waitqueue_active(&delayed_refs->seq_wait))
738 		wake_up(&delayed_refs->seq_wait);
739 	spin_unlock(&delayed_refs->lock);
740 	return 0;
741 }
742 
743 /*
744  * this does a simple search for the head node for a given extent.
745  * It must be called with the delayed ref spinlock held, and it returns
746  * the head node if any where found, or NULL if not.
747  */
748 struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle * trans,u64 bytenr)749 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
750 {
751 	struct btrfs_delayed_ref_node *ref;
752 	struct btrfs_delayed_ref_root *delayed_refs;
753 
754 	delayed_refs = &trans->transaction->delayed_refs;
755 	ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
756 	if (ref)
757 		return btrfs_delayed_node_to_head(ref);
758 	return NULL;
759 }
760