• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007,2008 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/mm.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "print-tree.h"
27 #include "locking.h"
28 
29 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
30 		      *root, struct btrfs_path *path, int level);
31 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root *root,
32 		      const struct btrfs_key *ins_key, struct btrfs_path *path,
33 		      int data_size, int extend);
34 static int push_node_left(struct btrfs_trans_handle *trans,
35 			  struct btrfs_fs_info *fs_info,
36 			  struct extent_buffer *dst,
37 			  struct extent_buffer *src, int empty);
38 static int balance_node_right(struct btrfs_trans_handle *trans,
39 			      struct btrfs_fs_info *fs_info,
40 			      struct extent_buffer *dst_buf,
41 			      struct extent_buffer *src_buf);
42 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
43 		    int level, int slot);
44 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
45 				 struct extent_buffer *eb);
46 
btrfs_alloc_path(void)47 struct btrfs_path *btrfs_alloc_path(void)
48 {
49 	return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 }
51 
52 /*
53  * set all locked nodes in the path to blocking locks.  This should
54  * be done before scheduling
55  */
btrfs_set_path_blocking(struct btrfs_path * p)56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 	int i;
59 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 		if (!p->nodes[i] || !p->locks[i])
61 			continue;
62 		btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 		if (p->locks[i] == BTRFS_READ_LOCK)
64 			p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 		else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 			p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 	}
68 }
69 
70 /*
71  * reset all the locked nodes in the patch to spinning locks.
72  *
73  * held is used to keep lockdep happy, when lockdep is enabled
74  * we set held to a blocking lock before we go around and
75  * retake all the spinlocks in the path.  You can safely use NULL
76  * for held
77  */
btrfs_clear_path_blocking(struct btrfs_path * p,struct extent_buffer * held,int held_rw)78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 					struct extent_buffer *held, int held_rw)
80 {
81 	int i;
82 
83 	if (held) {
84 		btrfs_set_lock_blocking_rw(held, held_rw);
85 		if (held_rw == BTRFS_WRITE_LOCK)
86 			held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 		else if (held_rw == BTRFS_READ_LOCK)
88 			held_rw = BTRFS_READ_LOCK_BLOCKING;
89 	}
90 	btrfs_set_path_blocking(p);
91 
92 	for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 		if (p->nodes[i] && p->locks[i]) {
94 			btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 			if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 				p->locks[i] = BTRFS_WRITE_LOCK;
97 			else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 				p->locks[i] = BTRFS_READ_LOCK;
99 		}
100 	}
101 
102 	if (held)
103 		btrfs_clear_lock_blocking_rw(held, held_rw);
104 }
105 
106 /* this also releases the path */
btrfs_free_path(struct btrfs_path * p)107 void btrfs_free_path(struct btrfs_path *p)
108 {
109 	if (!p)
110 		return;
111 	btrfs_release_path(p);
112 	kmem_cache_free(btrfs_path_cachep, p);
113 }
114 
115 /*
116  * path release drops references on the extent buffers in the path
117  * and it drops any locks held by this path
118  *
119  * It is safe to call this on paths that no locks or extent buffers held.
120  */
btrfs_release_path(struct btrfs_path * p)121 noinline void btrfs_release_path(struct btrfs_path *p)
122 {
123 	int i;
124 
125 	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 		p->slots[i] = 0;
127 		if (!p->nodes[i])
128 			continue;
129 		if (p->locks[i]) {
130 			btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 			p->locks[i] = 0;
132 		}
133 		free_extent_buffer(p->nodes[i]);
134 		p->nodes[i] = NULL;
135 	}
136 }
137 
138 /*
139  * safely gets a reference on the root node of a tree.  A lock
140  * is not taken, so a concurrent writer may put a different node
141  * at the root of the tree.  See btrfs_lock_root_node for the
142  * looping required.
143  *
144  * The extent buffer returned by this has a reference taken, so
145  * it won't disappear.  It may stop being the root of the tree
146  * at any time because there are no locks held.
147  */
btrfs_root_node(struct btrfs_root * root)148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 {
150 	struct extent_buffer *eb;
151 
152 	while (1) {
153 		rcu_read_lock();
154 		eb = rcu_dereference(root->node);
155 
156 		/*
157 		 * RCU really hurts here, we could free up the root node because
158 		 * it was COWed but we may not get the new root node yet so do
159 		 * the inc_not_zero dance and if it doesn't work then
160 		 * synchronize_rcu and try again.
161 		 */
162 		if (atomic_inc_not_zero(&eb->refs)) {
163 			rcu_read_unlock();
164 			break;
165 		}
166 		rcu_read_unlock();
167 		synchronize_rcu();
168 	}
169 	return eb;
170 }
171 
172 /* loop around taking references on and locking the root node of the
173  * tree until you end up with a lock on the root.  A locked buffer
174  * is returned, with a reference held.
175  */
btrfs_lock_root_node(struct btrfs_root * root)176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
177 {
178 	struct extent_buffer *eb;
179 
180 	while (1) {
181 		eb = btrfs_root_node(root);
182 		btrfs_tree_lock(eb);
183 		if (eb == root->node)
184 			break;
185 		btrfs_tree_unlock(eb);
186 		free_extent_buffer(eb);
187 	}
188 	return eb;
189 }
190 
191 /* loop around taking references on and locking the root node of the
192  * tree until you end up with a lock on the root.  A locked buffer
193  * is returned, with a reference held.
194  */
btrfs_read_lock_root_node(struct btrfs_root * root)195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
196 {
197 	struct extent_buffer *eb;
198 
199 	while (1) {
200 		eb = btrfs_root_node(root);
201 		btrfs_tree_read_lock(eb);
202 		if (eb == root->node)
203 			break;
204 		btrfs_tree_read_unlock(eb);
205 		free_extent_buffer(eb);
206 	}
207 	return eb;
208 }
209 
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211  * put onto a simple dirty list.  transaction.c walks this to make sure they
212  * get properly updated on disk.
213  */
add_root_to_dirty_list(struct btrfs_root * root)214 static void add_root_to_dirty_list(struct btrfs_root *root)
215 {
216 	struct btrfs_fs_info *fs_info = root->fs_info;
217 
218 	if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
219 	    !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
220 		return;
221 
222 	spin_lock(&fs_info->trans_lock);
223 	if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
224 		/* Want the extent tree to be the last on the list */
225 		if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
226 			list_move_tail(&root->dirty_list,
227 				       &fs_info->dirty_cowonly_roots);
228 		else
229 			list_move(&root->dirty_list,
230 				  &fs_info->dirty_cowonly_roots);
231 	}
232 	spin_unlock(&fs_info->trans_lock);
233 }
234 
235 /*
236  * used by snapshot creation to make a copy of a root for a tree with
237  * a given objectid.  The buffer with the new root node is returned in
238  * cow_ret, and this func returns zero on success or a negative error code.
239  */
btrfs_copy_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer ** cow_ret,u64 new_root_objectid)240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 		      struct btrfs_root *root,
242 		      struct extent_buffer *buf,
243 		      struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 	struct btrfs_fs_info *fs_info = root->fs_info;
246 	struct extent_buffer *cow;
247 	int ret = 0;
248 	int level;
249 	struct btrfs_disk_key disk_key;
250 
251 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
252 		trans->transid != fs_info->running_transaction->transid);
253 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
254 		trans->transid != root->last_trans);
255 
256 	level = btrfs_header_level(buf);
257 	if (level == 0)
258 		btrfs_item_key(buf, &disk_key, 0);
259 	else
260 		btrfs_node_key(buf, &disk_key, 0);
261 
262 	cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
263 			&disk_key, level, buf->start, 0);
264 	if (IS_ERR(cow))
265 		return PTR_ERR(cow);
266 
267 	copy_extent_buffer_full(cow, buf);
268 	btrfs_set_header_bytenr(cow, cow->start);
269 	btrfs_set_header_generation(cow, trans->transid);
270 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
271 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
272 				     BTRFS_HEADER_FLAG_RELOC);
273 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
274 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
275 	else
276 		btrfs_set_header_owner(cow, new_root_objectid);
277 
278 	write_extent_buffer_fsid(cow, fs_info->fsid);
279 
280 	WARN_ON(btrfs_header_generation(buf) > trans->transid);
281 	if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
282 		ret = btrfs_inc_ref(trans, root, cow, 1);
283 	else
284 		ret = btrfs_inc_ref(trans, root, cow, 0);
285 
286 	if (ret)
287 		return ret;
288 
289 	btrfs_mark_buffer_dirty(cow);
290 	*cow_ret = cow;
291 	return 0;
292 }
293 
294 enum mod_log_op {
295 	MOD_LOG_KEY_REPLACE,
296 	MOD_LOG_KEY_ADD,
297 	MOD_LOG_KEY_REMOVE,
298 	MOD_LOG_KEY_REMOVE_WHILE_FREEING,
299 	MOD_LOG_KEY_REMOVE_WHILE_MOVING,
300 	MOD_LOG_MOVE_KEYS,
301 	MOD_LOG_ROOT_REPLACE,
302 };
303 
304 struct tree_mod_move {
305 	int dst_slot;
306 	int nr_items;
307 };
308 
309 struct tree_mod_root {
310 	u64 logical;
311 	u8 level;
312 };
313 
314 struct tree_mod_elem {
315 	struct rb_node node;
316 	u64 logical;
317 	u64 seq;
318 	enum mod_log_op op;
319 
320 	/* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
321 	int slot;
322 
323 	/* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
324 	u64 generation;
325 
326 	/* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
327 	struct btrfs_disk_key key;
328 	u64 blockptr;
329 
330 	/* this is used for op == MOD_LOG_MOVE_KEYS */
331 	struct tree_mod_move move;
332 
333 	/* this is used for op == MOD_LOG_ROOT_REPLACE */
334 	struct tree_mod_root old_root;
335 };
336 
337 /*
338  * Pull a new tree mod seq number for our operation.
339  */
btrfs_inc_tree_mod_seq(struct btrfs_fs_info * fs_info)340 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
341 {
342 	return atomic64_inc_return(&fs_info->tree_mod_seq);
343 }
344 
345 /*
346  * This adds a new blocker to the tree mod log's blocker list if the @elem
347  * passed does not already have a sequence number set. So when a caller expects
348  * to record tree modifications, it should ensure to set elem->seq to zero
349  * before calling btrfs_get_tree_mod_seq.
350  * Returns a fresh, unused tree log modification sequence number, even if no new
351  * blocker was added.
352  */
btrfs_get_tree_mod_seq(struct btrfs_fs_info * fs_info,struct seq_list * elem)353 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
354 			   struct seq_list *elem)
355 {
356 	write_lock(&fs_info->tree_mod_log_lock);
357 	if (!elem->seq) {
358 		elem->seq = btrfs_inc_tree_mod_seq(fs_info);
359 		list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
360 	}
361 	write_unlock(&fs_info->tree_mod_log_lock);
362 
363 	return elem->seq;
364 }
365 
btrfs_put_tree_mod_seq(struct btrfs_fs_info * fs_info,struct seq_list * elem)366 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
367 			    struct seq_list *elem)
368 {
369 	struct rb_root *tm_root;
370 	struct rb_node *node;
371 	struct rb_node *next;
372 	struct seq_list *cur_elem;
373 	struct tree_mod_elem *tm;
374 	u64 min_seq = (u64)-1;
375 	u64 seq_putting = elem->seq;
376 
377 	if (!seq_putting)
378 		return;
379 
380 	write_lock(&fs_info->tree_mod_log_lock);
381 	list_del(&elem->list);
382 	elem->seq = 0;
383 
384 	list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
385 		if (cur_elem->seq < min_seq) {
386 			if (seq_putting > cur_elem->seq) {
387 				/*
388 				 * blocker with lower sequence number exists, we
389 				 * cannot remove anything from the log
390 				 */
391 				write_unlock(&fs_info->tree_mod_log_lock);
392 				return;
393 			}
394 			min_seq = cur_elem->seq;
395 		}
396 	}
397 
398 	/*
399 	 * anything that's lower than the lowest existing (read: blocked)
400 	 * sequence number can be removed from the tree.
401 	 */
402 	tm_root = &fs_info->tree_mod_log;
403 	for (node = rb_first(tm_root); node; node = next) {
404 		next = rb_next(node);
405 		tm = rb_entry(node, struct tree_mod_elem, node);
406 		if (tm->seq >= min_seq)
407 			continue;
408 		rb_erase(node, tm_root);
409 		kfree(tm);
410 	}
411 	write_unlock(&fs_info->tree_mod_log_lock);
412 }
413 
414 /*
415  * key order of the log:
416  *       node/leaf start address -> sequence
417  *
418  * The 'start address' is the logical address of the *new* root node
419  * for root replace operations, or the logical address of the affected
420  * block for all other operations.
421  *
422  * Note: must be called with write lock for fs_info::tree_mod_log_lock.
423  */
424 static noinline int
__tree_mod_log_insert(struct btrfs_fs_info * fs_info,struct tree_mod_elem * tm)425 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
426 {
427 	struct rb_root *tm_root;
428 	struct rb_node **new;
429 	struct rb_node *parent = NULL;
430 	struct tree_mod_elem *cur;
431 
432 	tm->seq = btrfs_inc_tree_mod_seq(fs_info);
433 
434 	tm_root = &fs_info->tree_mod_log;
435 	new = &tm_root->rb_node;
436 	while (*new) {
437 		cur = rb_entry(*new, struct tree_mod_elem, node);
438 		parent = *new;
439 		if (cur->logical < tm->logical)
440 			new = &((*new)->rb_left);
441 		else if (cur->logical > tm->logical)
442 			new = &((*new)->rb_right);
443 		else if (cur->seq < tm->seq)
444 			new = &((*new)->rb_left);
445 		else if (cur->seq > tm->seq)
446 			new = &((*new)->rb_right);
447 		else
448 			return -EEXIST;
449 	}
450 
451 	rb_link_node(&tm->node, parent, new);
452 	rb_insert_color(&tm->node, tm_root);
453 	return 0;
454 }
455 
456 /*
457  * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
458  * returns zero with the tree_mod_log_lock acquired. The caller must hold
459  * this until all tree mod log insertions are recorded in the rb tree and then
460  * write unlock fs_info::tree_mod_log_lock.
461  */
tree_mod_dont_log(struct btrfs_fs_info * fs_info,struct extent_buffer * eb)462 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
463 				    struct extent_buffer *eb) {
464 	smp_mb();
465 	if (list_empty(&(fs_info)->tree_mod_seq_list))
466 		return 1;
467 	if (eb && btrfs_header_level(eb) == 0)
468 		return 1;
469 
470 	write_lock(&fs_info->tree_mod_log_lock);
471 	if (list_empty(&(fs_info)->tree_mod_seq_list)) {
472 		write_unlock(&fs_info->tree_mod_log_lock);
473 		return 1;
474 	}
475 
476 	return 0;
477 }
478 
479 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
tree_mod_need_log(const struct btrfs_fs_info * fs_info,struct extent_buffer * eb)480 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
481 				    struct extent_buffer *eb)
482 {
483 	smp_mb();
484 	if (list_empty(&(fs_info)->tree_mod_seq_list))
485 		return 0;
486 	if (eb && btrfs_header_level(eb) == 0)
487 		return 0;
488 
489 	return 1;
490 }
491 
492 static struct tree_mod_elem *
alloc_tree_mod_elem(struct extent_buffer * eb,int slot,enum mod_log_op op,gfp_t flags)493 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
494 		    enum mod_log_op op, gfp_t flags)
495 {
496 	struct tree_mod_elem *tm;
497 
498 	tm = kzalloc(sizeof(*tm), flags);
499 	if (!tm)
500 		return NULL;
501 
502 	tm->logical = eb->start;
503 	if (op != MOD_LOG_KEY_ADD) {
504 		btrfs_node_key(eb, &tm->key, slot);
505 		tm->blockptr = btrfs_node_blockptr(eb, slot);
506 	}
507 	tm->op = op;
508 	tm->slot = slot;
509 	tm->generation = btrfs_node_ptr_generation(eb, slot);
510 	RB_CLEAR_NODE(&tm->node);
511 
512 	return tm;
513 }
514 
515 static noinline int
tree_mod_log_insert_key(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,int slot,enum mod_log_op op,gfp_t flags)516 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
517 			struct extent_buffer *eb, int slot,
518 			enum mod_log_op op, gfp_t flags)
519 {
520 	struct tree_mod_elem *tm;
521 	int ret;
522 
523 	if (!tree_mod_need_log(fs_info, eb))
524 		return 0;
525 
526 	tm = alloc_tree_mod_elem(eb, slot, op, flags);
527 	if (!tm)
528 		return -ENOMEM;
529 
530 	if (tree_mod_dont_log(fs_info, eb)) {
531 		kfree(tm);
532 		return 0;
533 	}
534 
535 	ret = __tree_mod_log_insert(fs_info, tm);
536 	write_unlock(&eb->fs_info->tree_mod_log_lock);
537 	if (ret)
538 		kfree(tm);
539 
540 	return ret;
541 }
542 
543 static noinline int
tree_mod_log_insert_move(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,int dst_slot,int src_slot,int nr_items)544 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
545 			 struct extent_buffer *eb, int dst_slot, int src_slot,
546 			 int nr_items)
547 {
548 	struct tree_mod_elem *tm = NULL;
549 	struct tree_mod_elem **tm_list = NULL;
550 	int ret = 0;
551 	int i;
552 	int locked = 0;
553 
554 	if (!tree_mod_need_log(fs_info, eb))
555 		return 0;
556 
557 	tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), GFP_NOFS);
558 	if (!tm_list)
559 		return -ENOMEM;
560 
561 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
562 	if (!tm) {
563 		ret = -ENOMEM;
564 		goto free_tms;
565 	}
566 
567 	tm->logical = eb->start;
568 	tm->slot = src_slot;
569 	tm->move.dst_slot = dst_slot;
570 	tm->move.nr_items = nr_items;
571 	tm->op = MOD_LOG_MOVE_KEYS;
572 
573 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
574 		tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
575 		    MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
576 		if (!tm_list[i]) {
577 			ret = -ENOMEM;
578 			goto free_tms;
579 		}
580 	}
581 
582 	if (tree_mod_dont_log(fs_info, eb))
583 		goto free_tms;
584 	locked = 1;
585 
586 	/*
587 	 * When we override something during the move, we log these removals.
588 	 * This can only happen when we move towards the beginning of the
589 	 * buffer, i.e. dst_slot < src_slot.
590 	 */
591 	for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
592 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
593 		if (ret)
594 			goto free_tms;
595 	}
596 
597 	ret = __tree_mod_log_insert(fs_info, tm);
598 	if (ret)
599 		goto free_tms;
600 	write_unlock(&eb->fs_info->tree_mod_log_lock);
601 	kfree(tm_list);
602 
603 	return 0;
604 free_tms:
605 	for (i = 0; i < nr_items; i++) {
606 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
607 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
608 		kfree(tm_list[i]);
609 	}
610 	if (locked)
611 		write_unlock(&eb->fs_info->tree_mod_log_lock);
612 	kfree(tm_list);
613 	kfree(tm);
614 
615 	return ret;
616 }
617 
618 static inline int
__tree_mod_log_free_eb(struct btrfs_fs_info * fs_info,struct tree_mod_elem ** tm_list,int nritems)619 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
620 		       struct tree_mod_elem **tm_list,
621 		       int nritems)
622 {
623 	int i, j;
624 	int ret;
625 
626 	for (i = nritems - 1; i >= 0; i--) {
627 		ret = __tree_mod_log_insert(fs_info, tm_list[i]);
628 		if (ret) {
629 			for (j = nritems - 1; j > i; j--)
630 				rb_erase(&tm_list[j]->node,
631 					 &fs_info->tree_mod_log);
632 			return ret;
633 		}
634 	}
635 
636 	return 0;
637 }
638 
639 static noinline int
tree_mod_log_insert_root(struct btrfs_fs_info * fs_info,struct extent_buffer * old_root,struct extent_buffer * new_root,int log_removal)640 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
641 			 struct extent_buffer *old_root,
642 			 struct extent_buffer *new_root,
643 			 int log_removal)
644 {
645 	struct tree_mod_elem *tm = NULL;
646 	struct tree_mod_elem **tm_list = NULL;
647 	int nritems = 0;
648 	int ret = 0;
649 	int i;
650 
651 	if (!tree_mod_need_log(fs_info, NULL))
652 		return 0;
653 
654 	if (log_removal && btrfs_header_level(old_root) > 0) {
655 		nritems = btrfs_header_nritems(old_root);
656 		tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
657 				  GFP_NOFS);
658 		if (!tm_list) {
659 			ret = -ENOMEM;
660 			goto free_tms;
661 		}
662 		for (i = 0; i < nritems; i++) {
663 			tm_list[i] = alloc_tree_mod_elem(old_root, i,
664 			    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
665 			if (!tm_list[i]) {
666 				ret = -ENOMEM;
667 				goto free_tms;
668 			}
669 		}
670 	}
671 
672 	tm = kzalloc(sizeof(*tm), GFP_NOFS);
673 	if (!tm) {
674 		ret = -ENOMEM;
675 		goto free_tms;
676 	}
677 
678 	tm->logical = new_root->start;
679 	tm->old_root.logical = old_root->start;
680 	tm->old_root.level = btrfs_header_level(old_root);
681 	tm->generation = btrfs_header_generation(old_root);
682 	tm->op = MOD_LOG_ROOT_REPLACE;
683 
684 	if (tree_mod_dont_log(fs_info, NULL))
685 		goto free_tms;
686 
687 	if (tm_list)
688 		ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
689 	if (!ret)
690 		ret = __tree_mod_log_insert(fs_info, tm);
691 
692 	write_unlock(&fs_info->tree_mod_log_lock);
693 	if (ret)
694 		goto free_tms;
695 	kfree(tm_list);
696 
697 	return ret;
698 
699 free_tms:
700 	if (tm_list) {
701 		for (i = 0; i < nritems; i++)
702 			kfree(tm_list[i]);
703 		kfree(tm_list);
704 	}
705 	kfree(tm);
706 
707 	return ret;
708 }
709 
710 static struct tree_mod_elem *
__tree_mod_log_search(struct btrfs_fs_info * fs_info,u64 start,u64 min_seq,int smallest)711 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
712 		      int smallest)
713 {
714 	struct rb_root *tm_root;
715 	struct rb_node *node;
716 	struct tree_mod_elem *cur = NULL;
717 	struct tree_mod_elem *found = NULL;
718 
719 	read_lock(&fs_info->tree_mod_log_lock);
720 	tm_root = &fs_info->tree_mod_log;
721 	node = tm_root->rb_node;
722 	while (node) {
723 		cur = rb_entry(node, struct tree_mod_elem, node);
724 		if (cur->logical < start) {
725 			node = node->rb_left;
726 		} else if (cur->logical > start) {
727 			node = node->rb_right;
728 		} else if (cur->seq < min_seq) {
729 			node = node->rb_left;
730 		} else if (!smallest) {
731 			/* we want the node with the highest seq */
732 			if (found)
733 				BUG_ON(found->seq > cur->seq);
734 			found = cur;
735 			node = node->rb_left;
736 		} else if (cur->seq > min_seq) {
737 			/* we want the node with the smallest seq */
738 			if (found)
739 				BUG_ON(found->seq < cur->seq);
740 			found = cur;
741 			node = node->rb_right;
742 		} else {
743 			found = cur;
744 			break;
745 		}
746 	}
747 	read_unlock(&fs_info->tree_mod_log_lock);
748 
749 	return found;
750 }
751 
752 /*
753  * this returns the element from the log with the smallest time sequence
754  * value that's in the log (the oldest log item). any element with a time
755  * sequence lower than min_seq will be ignored.
756  */
757 static struct tree_mod_elem *
tree_mod_log_search_oldest(struct btrfs_fs_info * fs_info,u64 start,u64 min_seq)758 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
759 			   u64 min_seq)
760 {
761 	return __tree_mod_log_search(fs_info, start, min_seq, 1);
762 }
763 
764 /*
765  * this returns the element from the log with the largest time sequence
766  * value that's in the log (the most recent log item). any element with
767  * a time sequence lower than min_seq will be ignored.
768  */
769 static struct tree_mod_elem *
tree_mod_log_search(struct btrfs_fs_info * fs_info,u64 start,u64 min_seq)770 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
771 {
772 	return __tree_mod_log_search(fs_info, start, min_seq, 0);
773 }
774 
775 static noinline int
tree_mod_log_eb_copy(struct btrfs_fs_info * fs_info,struct extent_buffer * dst,struct extent_buffer * src,unsigned long dst_offset,unsigned long src_offset,int nr_items)776 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
777 		     struct extent_buffer *src, unsigned long dst_offset,
778 		     unsigned long src_offset, int nr_items)
779 {
780 	int ret = 0;
781 	struct tree_mod_elem **tm_list = NULL;
782 	struct tree_mod_elem **tm_list_add, **tm_list_rem;
783 	int i;
784 	int locked = 0;
785 
786 	if (!tree_mod_need_log(fs_info, NULL))
787 		return 0;
788 
789 	if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
790 		return 0;
791 
792 	tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
793 			  GFP_NOFS);
794 	if (!tm_list)
795 		return -ENOMEM;
796 
797 	tm_list_add = tm_list;
798 	tm_list_rem = tm_list + nr_items;
799 	for (i = 0; i < nr_items; i++) {
800 		tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
801 		    MOD_LOG_KEY_REMOVE, GFP_NOFS);
802 		if (!tm_list_rem[i]) {
803 			ret = -ENOMEM;
804 			goto free_tms;
805 		}
806 
807 		tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
808 		    MOD_LOG_KEY_ADD, GFP_NOFS);
809 		if (!tm_list_add[i]) {
810 			ret = -ENOMEM;
811 			goto free_tms;
812 		}
813 	}
814 
815 	if (tree_mod_dont_log(fs_info, NULL))
816 		goto free_tms;
817 	locked = 1;
818 
819 	for (i = 0; i < nr_items; i++) {
820 		ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
821 		if (ret)
822 			goto free_tms;
823 		ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
824 		if (ret)
825 			goto free_tms;
826 	}
827 
828 	write_unlock(&fs_info->tree_mod_log_lock);
829 	kfree(tm_list);
830 
831 	return 0;
832 
833 free_tms:
834 	for (i = 0; i < nr_items * 2; i++) {
835 		if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
836 			rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
837 		kfree(tm_list[i]);
838 	}
839 	if (locked)
840 		write_unlock(&fs_info->tree_mod_log_lock);
841 	kfree(tm_list);
842 
843 	return ret;
844 }
845 
846 static inline void
tree_mod_log_eb_move(struct btrfs_fs_info * fs_info,struct extent_buffer * dst,int dst_offset,int src_offset,int nr_items)847 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
848 		     int dst_offset, int src_offset, int nr_items)
849 {
850 	int ret;
851 	ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
852 				       nr_items);
853 	BUG_ON(ret < 0);
854 }
855 
856 static noinline void
tree_mod_log_set_node_key(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,int slot,int atomic)857 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
858 			  struct extent_buffer *eb, int slot, int atomic)
859 {
860 	int ret;
861 
862 	ret = tree_mod_log_insert_key(fs_info, eb, slot,
863 					MOD_LOG_KEY_REPLACE,
864 					atomic ? GFP_ATOMIC : GFP_NOFS);
865 	BUG_ON(ret < 0);
866 }
867 
868 static noinline int
tree_mod_log_free_eb(struct btrfs_fs_info * fs_info,struct extent_buffer * eb)869 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
870 {
871 	struct tree_mod_elem **tm_list = NULL;
872 	int nritems = 0;
873 	int i;
874 	int ret = 0;
875 
876 	if (btrfs_header_level(eb) == 0)
877 		return 0;
878 
879 	if (!tree_mod_need_log(fs_info, NULL))
880 		return 0;
881 
882 	nritems = btrfs_header_nritems(eb);
883 	tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
884 	if (!tm_list)
885 		return -ENOMEM;
886 
887 	for (i = 0; i < nritems; i++) {
888 		tm_list[i] = alloc_tree_mod_elem(eb, i,
889 		    MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
890 		if (!tm_list[i]) {
891 			ret = -ENOMEM;
892 			goto free_tms;
893 		}
894 	}
895 
896 	if (tree_mod_dont_log(fs_info, eb))
897 		goto free_tms;
898 
899 	ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
900 	write_unlock(&eb->fs_info->tree_mod_log_lock);
901 	if (ret)
902 		goto free_tms;
903 	kfree(tm_list);
904 
905 	return 0;
906 
907 free_tms:
908 	for (i = 0; i < nritems; i++)
909 		kfree(tm_list[i]);
910 	kfree(tm_list);
911 
912 	return ret;
913 }
914 
915 static noinline void
tree_mod_log_set_root_pointer(struct btrfs_root * root,struct extent_buffer * new_root_node,int log_removal)916 tree_mod_log_set_root_pointer(struct btrfs_root *root,
917 			      struct extent_buffer *new_root_node,
918 			      int log_removal)
919 {
920 	int ret;
921 	ret = tree_mod_log_insert_root(root->fs_info, root->node,
922 				       new_root_node, log_removal);
923 	BUG_ON(ret < 0);
924 }
925 
926 /*
927  * check if the tree block can be shared by multiple trees
928  */
btrfs_block_can_be_shared(struct btrfs_root * root,struct extent_buffer * buf)929 int btrfs_block_can_be_shared(struct btrfs_root *root,
930 			      struct extent_buffer *buf)
931 {
932 	/*
933 	 * Tree blocks not in reference counted trees and tree roots
934 	 * are never shared. If a block was allocated after the last
935 	 * snapshot and the block was not allocated by tree relocation,
936 	 * we know the block is not shared.
937 	 */
938 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
939 	    buf != root->node && buf != root->commit_root &&
940 	    (btrfs_header_generation(buf) <=
941 	     btrfs_root_last_snapshot(&root->root_item) ||
942 	     btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
943 		return 1;
944 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
945 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
946 	    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
947 		return 1;
948 #endif
949 	return 0;
950 }
951 
update_ref_for_cow(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * cow,int * last_ref)952 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
953 				       struct btrfs_root *root,
954 				       struct extent_buffer *buf,
955 				       struct extent_buffer *cow,
956 				       int *last_ref)
957 {
958 	struct btrfs_fs_info *fs_info = root->fs_info;
959 	u64 refs;
960 	u64 owner;
961 	u64 flags;
962 	u64 new_flags = 0;
963 	int ret;
964 
965 	/*
966 	 * Backrefs update rules:
967 	 *
968 	 * Always use full backrefs for extent pointers in tree block
969 	 * allocated by tree relocation.
970 	 *
971 	 * If a shared tree block is no longer referenced by its owner
972 	 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
973 	 * use full backrefs for extent pointers in tree block.
974 	 *
975 	 * If a tree block is been relocating
976 	 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
977 	 * use full backrefs for extent pointers in tree block.
978 	 * The reason for this is some operations (such as drop tree)
979 	 * are only allowed for blocks use full backrefs.
980 	 */
981 
982 	if (btrfs_block_can_be_shared(root, buf)) {
983 		ret = btrfs_lookup_extent_info(trans, fs_info, buf->start,
984 					       btrfs_header_level(buf), 1,
985 					       &refs, &flags);
986 		if (ret)
987 			return ret;
988 		if (refs == 0) {
989 			ret = -EROFS;
990 			btrfs_handle_fs_error(fs_info, ret, NULL);
991 			return ret;
992 		}
993 	} else {
994 		refs = 1;
995 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
996 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
997 			flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
998 		else
999 			flags = 0;
1000 	}
1001 
1002 	owner = btrfs_header_owner(buf);
1003 	BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1004 	       !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1005 
1006 	if (refs > 1) {
1007 		if ((owner == root->root_key.objectid ||
1008 		     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1009 		    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1010 			ret = btrfs_inc_ref(trans, root, buf, 1);
1011 			if (ret)
1012 				return ret;
1013 
1014 			if (root->root_key.objectid ==
1015 			    BTRFS_TREE_RELOC_OBJECTID) {
1016 				ret = btrfs_dec_ref(trans, root, buf, 0);
1017 				if (ret)
1018 					return ret;
1019 				ret = btrfs_inc_ref(trans, root, cow, 1);
1020 				if (ret)
1021 					return ret;
1022 			}
1023 			new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1024 		} else {
1025 
1026 			if (root->root_key.objectid ==
1027 			    BTRFS_TREE_RELOC_OBJECTID)
1028 				ret = btrfs_inc_ref(trans, root, cow, 1);
1029 			else
1030 				ret = btrfs_inc_ref(trans, root, cow, 0);
1031 			if (ret)
1032 				return ret;
1033 		}
1034 		if (new_flags != 0) {
1035 			int level = btrfs_header_level(buf);
1036 
1037 			ret = btrfs_set_disk_extent_flags(trans, fs_info,
1038 							  buf->start,
1039 							  buf->len,
1040 							  new_flags, level, 0);
1041 			if (ret)
1042 				return ret;
1043 		}
1044 	} else {
1045 		if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1046 			if (root->root_key.objectid ==
1047 			    BTRFS_TREE_RELOC_OBJECTID)
1048 				ret = btrfs_inc_ref(trans, root, cow, 1);
1049 			else
1050 				ret = btrfs_inc_ref(trans, root, cow, 0);
1051 			if (ret)
1052 				return ret;
1053 			ret = btrfs_dec_ref(trans, root, buf, 1);
1054 			if (ret)
1055 				return ret;
1056 		}
1057 		clean_tree_block(fs_info, buf);
1058 		*last_ref = 1;
1059 	}
1060 	return 0;
1061 }
1062 
1063 /*
1064  * does the dirty work in cow of a single block.  The parent block (if
1065  * supplied) is updated to point to the new cow copy.  The new buffer is marked
1066  * dirty and returned locked.  If you modify the block it needs to be marked
1067  * dirty again.
1068  *
1069  * search_start -- an allocation hint for the new block
1070  *
1071  * empty_size -- a hint that you plan on doing more cow.  This is the size in
1072  * bytes the allocator should try to find free next to the block it returns.
1073  * This is just a hint and may be ignored by the allocator.
1074  */
__btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret,u64 search_start,u64 empty_size)1075 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1076 			     struct btrfs_root *root,
1077 			     struct extent_buffer *buf,
1078 			     struct extent_buffer *parent, int parent_slot,
1079 			     struct extent_buffer **cow_ret,
1080 			     u64 search_start, u64 empty_size)
1081 {
1082 	struct btrfs_fs_info *fs_info = root->fs_info;
1083 	struct btrfs_disk_key disk_key;
1084 	struct extent_buffer *cow;
1085 	int level, ret;
1086 	int last_ref = 0;
1087 	int unlock_orig = 0;
1088 	u64 parent_start = 0;
1089 
1090 	if (*cow_ret == buf)
1091 		unlock_orig = 1;
1092 
1093 	btrfs_assert_tree_locked(buf);
1094 
1095 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1096 		trans->transid != fs_info->running_transaction->transid);
1097 	WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1098 		trans->transid != root->last_trans);
1099 
1100 	level = btrfs_header_level(buf);
1101 
1102 	if (level == 0)
1103 		btrfs_item_key(buf, &disk_key, 0);
1104 	else
1105 		btrfs_node_key(buf, &disk_key, 0);
1106 
1107 	if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent)
1108 		parent_start = parent->start;
1109 
1110 	cow = btrfs_alloc_tree_block(trans, root, parent_start,
1111 			root->root_key.objectid, &disk_key, level,
1112 			search_start, empty_size);
1113 	if (IS_ERR(cow))
1114 		return PTR_ERR(cow);
1115 
1116 	/* cow is set to blocking by btrfs_init_new_buffer */
1117 
1118 	copy_extent_buffer_full(cow, buf);
1119 	btrfs_set_header_bytenr(cow, cow->start);
1120 	btrfs_set_header_generation(cow, trans->transid);
1121 	btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1122 	btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1123 				     BTRFS_HEADER_FLAG_RELOC);
1124 	if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1125 		btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1126 	else
1127 		btrfs_set_header_owner(cow, root->root_key.objectid);
1128 
1129 	write_extent_buffer_fsid(cow, fs_info->fsid);
1130 
1131 	ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1132 	if (ret) {
1133 		btrfs_abort_transaction(trans, ret);
1134 		return ret;
1135 	}
1136 
1137 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1138 		ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1139 		if (ret) {
1140 			btrfs_abort_transaction(trans, ret);
1141 			return ret;
1142 		}
1143 	}
1144 
1145 	if (buf == root->node) {
1146 		WARN_ON(parent && parent != buf);
1147 		if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1148 		    btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1149 			parent_start = buf->start;
1150 
1151 		extent_buffer_get(cow);
1152 		tree_mod_log_set_root_pointer(root, cow, 1);
1153 		rcu_assign_pointer(root->node, cow);
1154 
1155 		btrfs_free_tree_block(trans, root, buf, parent_start,
1156 				      last_ref);
1157 		free_extent_buffer(buf);
1158 		add_root_to_dirty_list(root);
1159 	} else {
1160 		WARN_ON(trans->transid != btrfs_header_generation(parent));
1161 		tree_mod_log_insert_key(fs_info, parent, parent_slot,
1162 					MOD_LOG_KEY_REPLACE, GFP_NOFS);
1163 		btrfs_set_node_blockptr(parent, parent_slot,
1164 					cow->start);
1165 		btrfs_set_node_ptr_generation(parent, parent_slot,
1166 					      trans->transid);
1167 		btrfs_mark_buffer_dirty(parent);
1168 		if (last_ref) {
1169 			ret = tree_mod_log_free_eb(fs_info, buf);
1170 			if (ret) {
1171 				btrfs_abort_transaction(trans, ret);
1172 				return ret;
1173 			}
1174 		}
1175 		btrfs_free_tree_block(trans, root, buf, parent_start,
1176 				      last_ref);
1177 	}
1178 	if (unlock_orig)
1179 		btrfs_tree_unlock(buf);
1180 	free_extent_buffer_stale(buf);
1181 	btrfs_mark_buffer_dirty(cow);
1182 	*cow_ret = cow;
1183 	return 0;
1184 }
1185 
1186 /*
1187  * returns the logical address of the oldest predecessor of the given root.
1188  * entries older than time_seq are ignored.
1189  */
1190 static struct tree_mod_elem *
__tree_mod_log_oldest_root(struct btrfs_fs_info * fs_info,struct extent_buffer * eb_root,u64 time_seq)1191 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1192 			   struct extent_buffer *eb_root, u64 time_seq)
1193 {
1194 	struct tree_mod_elem *tm;
1195 	struct tree_mod_elem *found = NULL;
1196 	u64 root_logical = eb_root->start;
1197 	int looped = 0;
1198 
1199 	if (!time_seq)
1200 		return NULL;
1201 
1202 	/*
1203 	 * the very last operation that's logged for a root is the
1204 	 * replacement operation (if it is replaced at all). this has
1205 	 * the logical address of the *new* root, making it the very
1206 	 * first operation that's logged for this root.
1207 	 */
1208 	while (1) {
1209 		tm = tree_mod_log_search_oldest(fs_info, root_logical,
1210 						time_seq);
1211 		if (!looped && !tm)
1212 			return NULL;
1213 		/*
1214 		 * if there are no tree operation for the oldest root, we simply
1215 		 * return it. this should only happen if that (old) root is at
1216 		 * level 0.
1217 		 */
1218 		if (!tm)
1219 			break;
1220 
1221 		/*
1222 		 * if there's an operation that's not a root replacement, we
1223 		 * found the oldest version of our root. normally, we'll find a
1224 		 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1225 		 */
1226 		if (tm->op != MOD_LOG_ROOT_REPLACE)
1227 			break;
1228 
1229 		found = tm;
1230 		root_logical = tm->old_root.logical;
1231 		looped = 1;
1232 	}
1233 
1234 	/* if there's no old root to return, return what we found instead */
1235 	if (!found)
1236 		found = tm;
1237 
1238 	return found;
1239 }
1240 
1241 /*
1242  * tm is a pointer to the first operation to rewind within eb. then, all
1243  * previous operations will be rewound (until we reach something older than
1244  * time_seq).
1245  */
1246 static void
__tree_mod_log_rewind(struct btrfs_fs_info * fs_info,struct extent_buffer * eb,u64 time_seq,struct tree_mod_elem * first_tm)1247 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1248 		      u64 time_seq, struct tree_mod_elem *first_tm)
1249 {
1250 	u32 n;
1251 	struct rb_node *next;
1252 	struct tree_mod_elem *tm = first_tm;
1253 	unsigned long o_dst;
1254 	unsigned long o_src;
1255 	unsigned long p_size = sizeof(struct btrfs_key_ptr);
1256 
1257 	n = btrfs_header_nritems(eb);
1258 	read_lock(&fs_info->tree_mod_log_lock);
1259 	while (tm && tm->seq >= time_seq) {
1260 		/*
1261 		 * all the operations are recorded with the operator used for
1262 		 * the modification. as we're going backwards, we do the
1263 		 * opposite of each operation here.
1264 		 */
1265 		switch (tm->op) {
1266 		case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1267 			BUG_ON(tm->slot < n);
1268 			/* Fallthrough */
1269 		case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1270 		case MOD_LOG_KEY_REMOVE:
1271 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1272 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1273 			btrfs_set_node_ptr_generation(eb, tm->slot,
1274 						      tm->generation);
1275 			n++;
1276 			break;
1277 		case MOD_LOG_KEY_REPLACE:
1278 			BUG_ON(tm->slot >= n);
1279 			btrfs_set_node_key(eb, &tm->key, tm->slot);
1280 			btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1281 			btrfs_set_node_ptr_generation(eb, tm->slot,
1282 						      tm->generation);
1283 			break;
1284 		case MOD_LOG_KEY_ADD:
1285 			/* if a move operation is needed it's in the log */
1286 			n--;
1287 			break;
1288 		case MOD_LOG_MOVE_KEYS:
1289 			o_dst = btrfs_node_key_ptr_offset(tm->slot);
1290 			o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1291 			memmove_extent_buffer(eb, o_dst, o_src,
1292 					      tm->move.nr_items * p_size);
1293 			break;
1294 		case MOD_LOG_ROOT_REPLACE:
1295 			/*
1296 			 * this operation is special. for roots, this must be
1297 			 * handled explicitly before rewinding.
1298 			 * for non-roots, this operation may exist if the node
1299 			 * was a root: root A -> child B; then A gets empty and
1300 			 * B is promoted to the new root. in the mod log, we'll
1301 			 * have a root-replace operation for B, a tree block
1302 			 * that is no root. we simply ignore that operation.
1303 			 */
1304 			break;
1305 		}
1306 		next = rb_next(&tm->node);
1307 		if (!next)
1308 			break;
1309 		tm = rb_entry(next, struct tree_mod_elem, node);
1310 		if (tm->logical != first_tm->logical)
1311 			break;
1312 	}
1313 	read_unlock(&fs_info->tree_mod_log_lock);
1314 	btrfs_set_header_nritems(eb, n);
1315 }
1316 
1317 /*
1318  * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1319  * is returned. If rewind operations happen, a fresh buffer is returned. The
1320  * returned buffer is always read-locked. If the returned buffer is not the
1321  * input buffer, the lock on the input buffer is released and the input buffer
1322  * is freed (its refcount is decremented).
1323  */
1324 static struct extent_buffer *
tree_mod_log_rewind(struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct extent_buffer * eb,u64 time_seq)1325 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1326 		    struct extent_buffer *eb, u64 time_seq)
1327 {
1328 	struct extent_buffer *eb_rewin;
1329 	struct tree_mod_elem *tm;
1330 
1331 	if (!time_seq)
1332 		return eb;
1333 
1334 	if (btrfs_header_level(eb) == 0)
1335 		return eb;
1336 
1337 	tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1338 	if (!tm)
1339 		return eb;
1340 
1341 	btrfs_set_path_blocking(path);
1342 	btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1343 
1344 	if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1345 		BUG_ON(tm->slot != 0);
1346 		eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1347 		if (!eb_rewin) {
1348 			btrfs_tree_read_unlock_blocking(eb);
1349 			free_extent_buffer(eb);
1350 			return NULL;
1351 		}
1352 		btrfs_set_header_bytenr(eb_rewin, eb->start);
1353 		btrfs_set_header_backref_rev(eb_rewin,
1354 					     btrfs_header_backref_rev(eb));
1355 		btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1356 		btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1357 	} else {
1358 		eb_rewin = btrfs_clone_extent_buffer(eb);
1359 		if (!eb_rewin) {
1360 			btrfs_tree_read_unlock_blocking(eb);
1361 			free_extent_buffer(eb);
1362 			return NULL;
1363 		}
1364 	}
1365 
1366 	btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1367 	btrfs_tree_read_unlock_blocking(eb);
1368 	free_extent_buffer(eb);
1369 
1370 	extent_buffer_get(eb_rewin);
1371 	btrfs_tree_read_lock(eb_rewin);
1372 	__tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1373 	WARN_ON(btrfs_header_nritems(eb_rewin) >
1374 		BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1375 
1376 	return eb_rewin;
1377 }
1378 
1379 /*
1380  * get_old_root() rewinds the state of @root's root node to the given @time_seq
1381  * value. If there are no changes, the current root->root_node is returned. If
1382  * anything changed in between, there's a fresh buffer allocated on which the
1383  * rewind operations are done. In any case, the returned buffer is read locked.
1384  * Returns NULL on error (with no locks held).
1385  */
1386 static inline struct extent_buffer *
get_old_root(struct btrfs_root * root,u64 time_seq)1387 get_old_root(struct btrfs_root *root, u64 time_seq)
1388 {
1389 	struct btrfs_fs_info *fs_info = root->fs_info;
1390 	struct tree_mod_elem *tm;
1391 	struct extent_buffer *eb = NULL;
1392 	struct extent_buffer *eb_root;
1393 	u64 eb_root_owner = 0;
1394 	struct extent_buffer *old;
1395 	struct tree_mod_root *old_root = NULL;
1396 	u64 old_generation = 0;
1397 	u64 logical;
1398 
1399 	eb_root = btrfs_read_lock_root_node(root);
1400 	tm = __tree_mod_log_oldest_root(fs_info, eb_root, time_seq);
1401 	if (!tm)
1402 		return eb_root;
1403 
1404 	if (tm->op == MOD_LOG_ROOT_REPLACE) {
1405 		old_root = &tm->old_root;
1406 		old_generation = tm->generation;
1407 		logical = old_root->logical;
1408 	} else {
1409 		logical = eb_root->start;
1410 	}
1411 
1412 	tm = tree_mod_log_search(fs_info, logical, time_seq);
1413 	if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1414 		btrfs_tree_read_unlock(eb_root);
1415 		free_extent_buffer(eb_root);
1416 		old = read_tree_block(fs_info, logical, 0);
1417 		if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1418 			if (!IS_ERR(old))
1419 				free_extent_buffer(old);
1420 			btrfs_warn(fs_info,
1421 				   "failed to read tree block %llu from get_old_root",
1422 				   logical);
1423 		} else {
1424 			eb = btrfs_clone_extent_buffer(old);
1425 			free_extent_buffer(old);
1426 		}
1427 	} else if (old_root) {
1428 		eb_root_owner = btrfs_header_owner(eb_root);
1429 		btrfs_tree_read_unlock(eb_root);
1430 		free_extent_buffer(eb_root);
1431 		eb = alloc_dummy_extent_buffer(fs_info, logical);
1432 	} else {
1433 		btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1434 		eb = btrfs_clone_extent_buffer(eb_root);
1435 		btrfs_tree_read_unlock_blocking(eb_root);
1436 		free_extent_buffer(eb_root);
1437 	}
1438 
1439 	if (!eb)
1440 		return NULL;
1441 	extent_buffer_get(eb);
1442 	btrfs_tree_read_lock(eb);
1443 	if (old_root) {
1444 		btrfs_set_header_bytenr(eb, eb->start);
1445 		btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1446 		btrfs_set_header_owner(eb, eb_root_owner);
1447 		btrfs_set_header_level(eb, old_root->level);
1448 		btrfs_set_header_generation(eb, old_generation);
1449 	}
1450 	if (tm)
1451 		__tree_mod_log_rewind(fs_info, eb, time_seq, tm);
1452 	else
1453 		WARN_ON(btrfs_header_level(eb) != 0);
1454 	WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(fs_info));
1455 
1456 	return eb;
1457 }
1458 
btrfs_old_root_level(struct btrfs_root * root,u64 time_seq)1459 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1460 {
1461 	struct tree_mod_elem *tm;
1462 	int level;
1463 	struct extent_buffer *eb_root = btrfs_root_node(root);
1464 
1465 	tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1466 	if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1467 		level = tm->old_root.level;
1468 	} else {
1469 		level = btrfs_header_level(eb_root);
1470 	}
1471 	free_extent_buffer(eb_root);
1472 
1473 	return level;
1474 }
1475 
should_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf)1476 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1477 				   struct btrfs_root *root,
1478 				   struct extent_buffer *buf)
1479 {
1480 	if (btrfs_is_testing(root->fs_info))
1481 		return 0;
1482 
1483 	/* ensure we can see the force_cow */
1484 	smp_rmb();
1485 
1486 	/*
1487 	 * We do not need to cow a block if
1488 	 * 1) this block is not created or changed in this transaction;
1489 	 * 2) this block does not belong to TREE_RELOC tree;
1490 	 * 3) the root is not forced COW.
1491 	 *
1492 	 * What is forced COW:
1493 	 *    when we create snapshot during committing the transaction,
1494 	 *    after we've finished coping src root, we must COW the shared
1495 	 *    block to ensure the metadata consistency.
1496 	 */
1497 	if (btrfs_header_generation(buf) == trans->transid &&
1498 	    !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1499 	    !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1500 	      btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1501 	    !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1502 		return 0;
1503 	return 1;
1504 }
1505 
1506 /*
1507  * cows a single block, see __btrfs_cow_block for the real work.
1508  * This version of it has extra checks so that a block isn't COWed more than
1509  * once per transaction, as long as it hasn't been written yet
1510  */
btrfs_cow_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,struct extent_buffer * parent,int parent_slot,struct extent_buffer ** cow_ret)1511 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1512 		    struct btrfs_root *root, struct extent_buffer *buf,
1513 		    struct extent_buffer *parent, int parent_slot,
1514 		    struct extent_buffer **cow_ret)
1515 {
1516 	struct btrfs_fs_info *fs_info = root->fs_info;
1517 	u64 search_start;
1518 	int ret;
1519 
1520 	if (trans->transaction != fs_info->running_transaction)
1521 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1522 		       trans->transid,
1523 		       fs_info->running_transaction->transid);
1524 
1525 	if (trans->transid != fs_info->generation)
1526 		WARN(1, KERN_CRIT "trans %llu running %llu\n",
1527 		       trans->transid, fs_info->generation);
1528 
1529 	if (!should_cow_block(trans, root, buf)) {
1530 		trans->dirty = true;
1531 		*cow_ret = buf;
1532 		return 0;
1533 	}
1534 
1535 	search_start = buf->start & ~((u64)SZ_1G - 1);
1536 
1537 	if (parent)
1538 		btrfs_set_lock_blocking(parent);
1539 	btrfs_set_lock_blocking(buf);
1540 
1541 	ret = __btrfs_cow_block(trans, root, buf, parent,
1542 				 parent_slot, cow_ret, search_start, 0);
1543 
1544 	trace_btrfs_cow_block(root, buf, *cow_ret);
1545 
1546 	return ret;
1547 }
1548 
1549 /*
1550  * helper function for defrag to decide if two blocks pointed to by a
1551  * node are actually close by
1552  */
close_blocks(u64 blocknr,u64 other,u32 blocksize)1553 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1554 {
1555 	if (blocknr < other && other - (blocknr + blocksize) < 32768)
1556 		return 1;
1557 	if (blocknr > other && blocknr - (other + blocksize) < 32768)
1558 		return 1;
1559 	return 0;
1560 }
1561 
1562 /*
1563  * compare two keys in a memcmp fashion
1564  */
comp_keys(const struct btrfs_disk_key * disk,const struct btrfs_key * k2)1565 static int comp_keys(const struct btrfs_disk_key *disk,
1566 		     const struct btrfs_key *k2)
1567 {
1568 	struct btrfs_key k1;
1569 
1570 	btrfs_disk_key_to_cpu(&k1, disk);
1571 
1572 	return btrfs_comp_cpu_keys(&k1, k2);
1573 }
1574 
1575 /*
1576  * same as comp_keys only with two btrfs_key's
1577  */
btrfs_comp_cpu_keys(const struct btrfs_key * k1,const struct btrfs_key * k2)1578 int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
1579 {
1580 	if (k1->objectid > k2->objectid)
1581 		return 1;
1582 	if (k1->objectid < k2->objectid)
1583 		return -1;
1584 	if (k1->type > k2->type)
1585 		return 1;
1586 	if (k1->type < k2->type)
1587 		return -1;
1588 	if (k1->offset > k2->offset)
1589 		return 1;
1590 	if (k1->offset < k2->offset)
1591 		return -1;
1592 	return 0;
1593 }
1594 
1595 /*
1596  * this is used by the defrag code to go through all the
1597  * leaves pointed to by a node and reallocate them so that
1598  * disk order is close to key order
1599  */
btrfs_realloc_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * parent,int start_slot,u64 * last_ret,struct btrfs_key * progress)1600 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1601 		       struct btrfs_root *root, struct extent_buffer *parent,
1602 		       int start_slot, u64 *last_ret,
1603 		       struct btrfs_key *progress)
1604 {
1605 	struct btrfs_fs_info *fs_info = root->fs_info;
1606 	struct extent_buffer *cur;
1607 	u64 blocknr;
1608 	u64 gen;
1609 	u64 search_start = *last_ret;
1610 	u64 last_block = 0;
1611 	u64 other;
1612 	u32 parent_nritems;
1613 	int end_slot;
1614 	int i;
1615 	int err = 0;
1616 	int parent_level;
1617 	int uptodate;
1618 	u32 blocksize;
1619 	int progress_passed = 0;
1620 	struct btrfs_disk_key disk_key;
1621 
1622 	parent_level = btrfs_header_level(parent);
1623 
1624 	WARN_ON(trans->transaction != fs_info->running_transaction);
1625 	WARN_ON(trans->transid != fs_info->generation);
1626 
1627 	parent_nritems = btrfs_header_nritems(parent);
1628 	blocksize = fs_info->nodesize;
1629 	end_slot = parent_nritems - 1;
1630 
1631 	if (parent_nritems <= 1)
1632 		return 0;
1633 
1634 	btrfs_set_lock_blocking(parent);
1635 
1636 	for (i = start_slot; i <= end_slot; i++) {
1637 		int close = 1;
1638 
1639 		btrfs_node_key(parent, &disk_key, i);
1640 		if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1641 			continue;
1642 
1643 		progress_passed = 1;
1644 		blocknr = btrfs_node_blockptr(parent, i);
1645 		gen = btrfs_node_ptr_generation(parent, i);
1646 		if (last_block == 0)
1647 			last_block = blocknr;
1648 
1649 		if (i > 0) {
1650 			other = btrfs_node_blockptr(parent, i - 1);
1651 			close = close_blocks(blocknr, other, blocksize);
1652 		}
1653 		if (!close && i < end_slot) {
1654 			other = btrfs_node_blockptr(parent, i + 1);
1655 			close = close_blocks(blocknr, other, blocksize);
1656 		}
1657 		if (close) {
1658 			last_block = blocknr;
1659 			continue;
1660 		}
1661 
1662 		cur = find_extent_buffer(fs_info, blocknr);
1663 		if (cur)
1664 			uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1665 		else
1666 			uptodate = 0;
1667 		if (!cur || !uptodate) {
1668 			if (!cur) {
1669 				cur = read_tree_block(fs_info, blocknr, gen);
1670 				if (IS_ERR(cur)) {
1671 					return PTR_ERR(cur);
1672 				} else if (!extent_buffer_uptodate(cur)) {
1673 					free_extent_buffer(cur);
1674 					return -EIO;
1675 				}
1676 			} else if (!uptodate) {
1677 				err = btrfs_read_buffer(cur, gen);
1678 				if (err) {
1679 					free_extent_buffer(cur);
1680 					return err;
1681 				}
1682 			}
1683 		}
1684 		if (search_start == 0)
1685 			search_start = last_block;
1686 
1687 		btrfs_tree_lock(cur);
1688 		btrfs_set_lock_blocking(cur);
1689 		err = __btrfs_cow_block(trans, root, cur, parent, i,
1690 					&cur, search_start,
1691 					min(16 * blocksize,
1692 					    (end_slot - i) * blocksize));
1693 		if (err) {
1694 			btrfs_tree_unlock(cur);
1695 			free_extent_buffer(cur);
1696 			break;
1697 		}
1698 		search_start = cur->start;
1699 		last_block = cur->start;
1700 		*last_ret = search_start;
1701 		btrfs_tree_unlock(cur);
1702 		free_extent_buffer(cur);
1703 	}
1704 	return err;
1705 }
1706 
1707 /*
1708  * search for key in the extent_buffer.  The items start at offset p,
1709  * and they are item_size apart.  There are 'max' items in p.
1710  *
1711  * the slot in the array is returned via slot, and it points to
1712  * the place where you would insert key if it is not found in
1713  * the array.
1714  *
1715  * slot may point to max if the key is bigger than all of the keys
1716  */
generic_bin_search(struct extent_buffer * eb,unsigned long p,int item_size,const struct btrfs_key * key,int max,int * slot)1717 static noinline int generic_bin_search(struct extent_buffer *eb,
1718 				       unsigned long p, int item_size,
1719 				       const struct btrfs_key *key,
1720 				       int max, int *slot)
1721 {
1722 	int low = 0;
1723 	int high = max;
1724 	int mid;
1725 	int ret;
1726 	struct btrfs_disk_key *tmp = NULL;
1727 	struct btrfs_disk_key unaligned;
1728 	unsigned long offset;
1729 	char *kaddr = NULL;
1730 	unsigned long map_start = 0;
1731 	unsigned long map_len = 0;
1732 	int err;
1733 
1734 	if (low > high) {
1735 		btrfs_err(eb->fs_info,
1736 		 "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
1737 			  __func__, low, high, eb->start,
1738 			  btrfs_header_owner(eb), btrfs_header_level(eb));
1739 		return -EINVAL;
1740 	}
1741 
1742 	while (low < high) {
1743 		mid = (low + high) / 2;
1744 		offset = p + mid * item_size;
1745 
1746 		if (!kaddr || offset < map_start ||
1747 		    (offset + sizeof(struct btrfs_disk_key)) >
1748 		    map_start + map_len) {
1749 
1750 			err = map_private_extent_buffer(eb, offset,
1751 						sizeof(struct btrfs_disk_key),
1752 						&kaddr, &map_start, &map_len);
1753 
1754 			if (!err) {
1755 				tmp = (struct btrfs_disk_key *)(kaddr + offset -
1756 							map_start);
1757 			} else if (err == 1) {
1758 				read_extent_buffer(eb, &unaligned,
1759 						   offset, sizeof(unaligned));
1760 				tmp = &unaligned;
1761 			} else {
1762 				return err;
1763 			}
1764 
1765 		} else {
1766 			tmp = (struct btrfs_disk_key *)(kaddr + offset -
1767 							map_start);
1768 		}
1769 		ret = comp_keys(tmp, key);
1770 
1771 		if (ret < 0)
1772 			low = mid + 1;
1773 		else if (ret > 0)
1774 			high = mid;
1775 		else {
1776 			*slot = mid;
1777 			return 0;
1778 		}
1779 	}
1780 	*slot = low;
1781 	return 1;
1782 }
1783 
1784 /*
1785  * simple bin_search frontend that does the right thing for
1786  * leaves vs nodes
1787  */
bin_search(struct extent_buffer * eb,const struct btrfs_key * key,int level,int * slot)1788 static int bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1789 		      int level, int *slot)
1790 {
1791 	if (level == 0)
1792 		return generic_bin_search(eb,
1793 					  offsetof(struct btrfs_leaf, items),
1794 					  sizeof(struct btrfs_item),
1795 					  key, btrfs_header_nritems(eb),
1796 					  slot);
1797 	else
1798 		return generic_bin_search(eb,
1799 					  offsetof(struct btrfs_node, ptrs),
1800 					  sizeof(struct btrfs_key_ptr),
1801 					  key, btrfs_header_nritems(eb),
1802 					  slot);
1803 }
1804 
btrfs_bin_search(struct extent_buffer * eb,const struct btrfs_key * key,int level,int * slot)1805 int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
1806 		     int level, int *slot)
1807 {
1808 	return bin_search(eb, key, level, slot);
1809 }
1810 
root_add_used(struct btrfs_root * root,u32 size)1811 static void root_add_used(struct btrfs_root *root, u32 size)
1812 {
1813 	spin_lock(&root->accounting_lock);
1814 	btrfs_set_root_used(&root->root_item,
1815 			    btrfs_root_used(&root->root_item) + size);
1816 	spin_unlock(&root->accounting_lock);
1817 }
1818 
root_sub_used(struct btrfs_root * root,u32 size)1819 static void root_sub_used(struct btrfs_root *root, u32 size)
1820 {
1821 	spin_lock(&root->accounting_lock);
1822 	btrfs_set_root_used(&root->root_item,
1823 			    btrfs_root_used(&root->root_item) - size);
1824 	spin_unlock(&root->accounting_lock);
1825 }
1826 
1827 /* given a node and slot number, this reads the blocks it points to.  The
1828  * extent buffer is returned with a reference taken (but unlocked).
1829  */
1830 static noinline struct extent_buffer *
read_node_slot(struct btrfs_fs_info * fs_info,struct extent_buffer * parent,int slot)1831 read_node_slot(struct btrfs_fs_info *fs_info, struct extent_buffer *parent,
1832 	       int slot)
1833 {
1834 	int level = btrfs_header_level(parent);
1835 	struct extent_buffer *eb;
1836 
1837 	if (slot < 0 || slot >= btrfs_header_nritems(parent))
1838 		return ERR_PTR(-ENOENT);
1839 
1840 	BUG_ON(level == 0);
1841 
1842 	eb = read_tree_block(fs_info, btrfs_node_blockptr(parent, slot),
1843 			     btrfs_node_ptr_generation(parent, slot));
1844 	if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
1845 		free_extent_buffer(eb);
1846 		eb = ERR_PTR(-EIO);
1847 	}
1848 
1849 	return eb;
1850 }
1851 
1852 /*
1853  * node level balancing, used to make sure nodes are in proper order for
1854  * item deletion.  We balance from the top down, so we have to make sure
1855  * that a deletion won't leave an node completely empty later on.
1856  */
balance_level(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)1857 static noinline int balance_level(struct btrfs_trans_handle *trans,
1858 			 struct btrfs_root *root,
1859 			 struct btrfs_path *path, int level)
1860 {
1861 	struct btrfs_fs_info *fs_info = root->fs_info;
1862 	struct extent_buffer *right = NULL;
1863 	struct extent_buffer *mid;
1864 	struct extent_buffer *left = NULL;
1865 	struct extent_buffer *parent = NULL;
1866 	int ret = 0;
1867 	int wret;
1868 	int pslot;
1869 	int orig_slot = path->slots[level];
1870 	u64 orig_ptr;
1871 
1872 	if (level == 0)
1873 		return 0;
1874 
1875 	mid = path->nodes[level];
1876 
1877 	WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1878 		path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1879 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
1880 
1881 	orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1882 
1883 	if (level < BTRFS_MAX_LEVEL - 1) {
1884 		parent = path->nodes[level + 1];
1885 		pslot = path->slots[level + 1];
1886 	}
1887 
1888 	/*
1889 	 * deal with the case where there is only one pointer in the root
1890 	 * by promoting the node below to a root
1891 	 */
1892 	if (!parent) {
1893 		struct extent_buffer *child;
1894 
1895 		if (btrfs_header_nritems(mid) != 1)
1896 			return 0;
1897 
1898 		/* promote the child to a root */
1899 		child = read_node_slot(fs_info, mid, 0);
1900 		if (IS_ERR(child)) {
1901 			ret = PTR_ERR(child);
1902 			btrfs_handle_fs_error(fs_info, ret, NULL);
1903 			goto enospc;
1904 		}
1905 
1906 		btrfs_tree_lock(child);
1907 		btrfs_set_lock_blocking(child);
1908 		ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1909 		if (ret) {
1910 			btrfs_tree_unlock(child);
1911 			free_extent_buffer(child);
1912 			goto enospc;
1913 		}
1914 
1915 		tree_mod_log_set_root_pointer(root, child, 1);
1916 		rcu_assign_pointer(root->node, child);
1917 
1918 		add_root_to_dirty_list(root);
1919 		btrfs_tree_unlock(child);
1920 
1921 		path->locks[level] = 0;
1922 		path->nodes[level] = NULL;
1923 		clean_tree_block(fs_info, mid);
1924 		btrfs_tree_unlock(mid);
1925 		/* once for the path */
1926 		free_extent_buffer(mid);
1927 
1928 		root_sub_used(root, mid->len);
1929 		btrfs_free_tree_block(trans, root, mid, 0, 1);
1930 		/* once for the root ptr */
1931 		free_extent_buffer_stale(mid);
1932 		return 0;
1933 	}
1934 	if (btrfs_header_nritems(mid) >
1935 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 4)
1936 		return 0;
1937 
1938 	left = read_node_slot(fs_info, parent, pslot - 1);
1939 	if (IS_ERR(left))
1940 		left = NULL;
1941 
1942 	if (left) {
1943 		btrfs_tree_lock(left);
1944 		btrfs_set_lock_blocking(left);
1945 		wret = btrfs_cow_block(trans, root, left,
1946 				       parent, pslot - 1, &left);
1947 		if (wret) {
1948 			ret = wret;
1949 			goto enospc;
1950 		}
1951 	}
1952 
1953 	right = read_node_slot(fs_info, parent, pslot + 1);
1954 	if (IS_ERR(right))
1955 		right = NULL;
1956 
1957 	if (right) {
1958 		btrfs_tree_lock(right);
1959 		btrfs_set_lock_blocking(right);
1960 		wret = btrfs_cow_block(trans, root, right,
1961 				       parent, pslot + 1, &right);
1962 		if (wret) {
1963 			ret = wret;
1964 			goto enospc;
1965 		}
1966 	}
1967 
1968 	/* first, try to make some room in the middle buffer */
1969 	if (left) {
1970 		orig_slot += btrfs_header_nritems(left);
1971 		wret = push_node_left(trans, fs_info, left, mid, 1);
1972 		if (wret < 0)
1973 			ret = wret;
1974 	}
1975 
1976 	/*
1977 	 * then try to empty the right most buffer into the middle
1978 	 */
1979 	if (right) {
1980 		wret = push_node_left(trans, fs_info, mid, right, 1);
1981 		if (wret < 0 && wret != -ENOSPC)
1982 			ret = wret;
1983 		if (btrfs_header_nritems(right) == 0) {
1984 			clean_tree_block(fs_info, right);
1985 			btrfs_tree_unlock(right);
1986 			del_ptr(root, path, level + 1, pslot + 1);
1987 			root_sub_used(root, right->len);
1988 			btrfs_free_tree_block(trans, root, right, 0, 1);
1989 			free_extent_buffer_stale(right);
1990 			right = NULL;
1991 		} else {
1992 			struct btrfs_disk_key right_key;
1993 			btrfs_node_key(right, &right_key, 0);
1994 			tree_mod_log_set_node_key(fs_info, parent,
1995 						  pslot + 1, 0);
1996 			btrfs_set_node_key(parent, &right_key, pslot + 1);
1997 			btrfs_mark_buffer_dirty(parent);
1998 		}
1999 	}
2000 	if (btrfs_header_nritems(mid) == 1) {
2001 		/*
2002 		 * we're not allowed to leave a node with one item in the
2003 		 * tree during a delete.  A deletion from lower in the tree
2004 		 * could try to delete the only pointer in this node.
2005 		 * So, pull some keys from the left.
2006 		 * There has to be a left pointer at this point because
2007 		 * otherwise we would have pulled some pointers from the
2008 		 * right
2009 		 */
2010 		if (!left) {
2011 			ret = -EROFS;
2012 			btrfs_handle_fs_error(fs_info, ret, NULL);
2013 			goto enospc;
2014 		}
2015 		wret = balance_node_right(trans, fs_info, mid, left);
2016 		if (wret < 0) {
2017 			ret = wret;
2018 			goto enospc;
2019 		}
2020 		if (wret == 1) {
2021 			wret = push_node_left(trans, fs_info, left, mid, 1);
2022 			if (wret < 0)
2023 				ret = wret;
2024 		}
2025 		BUG_ON(wret == 1);
2026 	}
2027 	if (btrfs_header_nritems(mid) == 0) {
2028 		clean_tree_block(fs_info, mid);
2029 		btrfs_tree_unlock(mid);
2030 		del_ptr(root, path, level + 1, pslot);
2031 		root_sub_used(root, mid->len);
2032 		btrfs_free_tree_block(trans, root, mid, 0, 1);
2033 		free_extent_buffer_stale(mid);
2034 		mid = NULL;
2035 	} else {
2036 		/* update the parent key to reflect our changes */
2037 		struct btrfs_disk_key mid_key;
2038 		btrfs_node_key(mid, &mid_key, 0);
2039 		tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2040 		btrfs_set_node_key(parent, &mid_key, pslot);
2041 		btrfs_mark_buffer_dirty(parent);
2042 	}
2043 
2044 	/* update the path */
2045 	if (left) {
2046 		if (btrfs_header_nritems(left) > orig_slot) {
2047 			extent_buffer_get(left);
2048 			/* left was locked after cow */
2049 			path->nodes[level] = left;
2050 			path->slots[level + 1] -= 1;
2051 			path->slots[level] = orig_slot;
2052 			if (mid) {
2053 				btrfs_tree_unlock(mid);
2054 				free_extent_buffer(mid);
2055 			}
2056 		} else {
2057 			orig_slot -= btrfs_header_nritems(left);
2058 			path->slots[level] = orig_slot;
2059 		}
2060 	}
2061 	/* double check we haven't messed things up */
2062 	if (orig_ptr !=
2063 	    btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2064 		BUG();
2065 enospc:
2066 	if (right) {
2067 		btrfs_tree_unlock(right);
2068 		free_extent_buffer(right);
2069 	}
2070 	if (left) {
2071 		if (path->nodes[level] != left)
2072 			btrfs_tree_unlock(left);
2073 		free_extent_buffer(left);
2074 	}
2075 	return ret;
2076 }
2077 
2078 /* Node balancing for insertion.  Here we only split or push nodes around
2079  * when they are completely full.  This is also done top down, so we
2080  * have to be pessimistic.
2081  */
push_nodes_for_insert(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)2082 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2083 					  struct btrfs_root *root,
2084 					  struct btrfs_path *path, int level)
2085 {
2086 	struct btrfs_fs_info *fs_info = root->fs_info;
2087 	struct extent_buffer *right = NULL;
2088 	struct extent_buffer *mid;
2089 	struct extent_buffer *left = NULL;
2090 	struct extent_buffer *parent = NULL;
2091 	int ret = 0;
2092 	int wret;
2093 	int pslot;
2094 	int orig_slot = path->slots[level];
2095 
2096 	if (level == 0)
2097 		return 1;
2098 
2099 	mid = path->nodes[level];
2100 	WARN_ON(btrfs_header_generation(mid) != trans->transid);
2101 
2102 	if (level < BTRFS_MAX_LEVEL - 1) {
2103 		parent = path->nodes[level + 1];
2104 		pslot = path->slots[level + 1];
2105 	}
2106 
2107 	if (!parent)
2108 		return 1;
2109 
2110 	left = read_node_slot(fs_info, parent, pslot - 1);
2111 	if (IS_ERR(left))
2112 		left = NULL;
2113 
2114 	/* first, try to make some room in the middle buffer */
2115 	if (left) {
2116 		u32 left_nr;
2117 
2118 		btrfs_tree_lock(left);
2119 		btrfs_set_lock_blocking(left);
2120 
2121 		left_nr = btrfs_header_nritems(left);
2122 		if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2123 			wret = 1;
2124 		} else {
2125 			ret = btrfs_cow_block(trans, root, left, parent,
2126 					      pslot - 1, &left);
2127 			if (ret)
2128 				wret = 1;
2129 			else {
2130 				wret = push_node_left(trans, fs_info,
2131 						      left, mid, 0);
2132 			}
2133 		}
2134 		if (wret < 0)
2135 			ret = wret;
2136 		if (wret == 0) {
2137 			struct btrfs_disk_key disk_key;
2138 			orig_slot += left_nr;
2139 			btrfs_node_key(mid, &disk_key, 0);
2140 			tree_mod_log_set_node_key(fs_info, parent, pslot, 0);
2141 			btrfs_set_node_key(parent, &disk_key, pslot);
2142 			btrfs_mark_buffer_dirty(parent);
2143 			if (btrfs_header_nritems(left) > orig_slot) {
2144 				path->nodes[level] = left;
2145 				path->slots[level + 1] -= 1;
2146 				path->slots[level] = orig_slot;
2147 				btrfs_tree_unlock(mid);
2148 				free_extent_buffer(mid);
2149 			} else {
2150 				orig_slot -=
2151 					btrfs_header_nritems(left);
2152 				path->slots[level] = orig_slot;
2153 				btrfs_tree_unlock(left);
2154 				free_extent_buffer(left);
2155 			}
2156 			return 0;
2157 		}
2158 		btrfs_tree_unlock(left);
2159 		free_extent_buffer(left);
2160 	}
2161 	right = read_node_slot(fs_info, parent, pslot + 1);
2162 	if (IS_ERR(right))
2163 		right = NULL;
2164 
2165 	/*
2166 	 * then try to empty the right most buffer into the middle
2167 	 */
2168 	if (right) {
2169 		u32 right_nr;
2170 
2171 		btrfs_tree_lock(right);
2172 		btrfs_set_lock_blocking(right);
2173 
2174 		right_nr = btrfs_header_nritems(right);
2175 		if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 1) {
2176 			wret = 1;
2177 		} else {
2178 			ret = btrfs_cow_block(trans, root, right,
2179 					      parent, pslot + 1,
2180 					      &right);
2181 			if (ret)
2182 				wret = 1;
2183 			else {
2184 				wret = balance_node_right(trans, fs_info,
2185 							  right, mid);
2186 			}
2187 		}
2188 		if (wret < 0)
2189 			ret = wret;
2190 		if (wret == 0) {
2191 			struct btrfs_disk_key disk_key;
2192 
2193 			btrfs_node_key(right, &disk_key, 0);
2194 			tree_mod_log_set_node_key(fs_info, parent,
2195 						  pslot + 1, 0);
2196 			btrfs_set_node_key(parent, &disk_key, pslot + 1);
2197 			btrfs_mark_buffer_dirty(parent);
2198 
2199 			if (btrfs_header_nritems(mid) <= orig_slot) {
2200 				path->nodes[level] = right;
2201 				path->slots[level + 1] += 1;
2202 				path->slots[level] = orig_slot -
2203 					btrfs_header_nritems(mid);
2204 				btrfs_tree_unlock(mid);
2205 				free_extent_buffer(mid);
2206 			} else {
2207 				btrfs_tree_unlock(right);
2208 				free_extent_buffer(right);
2209 			}
2210 			return 0;
2211 		}
2212 		btrfs_tree_unlock(right);
2213 		free_extent_buffer(right);
2214 	}
2215 	return 1;
2216 }
2217 
2218 /*
2219  * readahead one full node of leaves, finding things that are close
2220  * to the block in 'slot', and triggering ra on them.
2221  */
reada_for_search(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int level,int slot,u64 objectid)2222 static void reada_for_search(struct btrfs_fs_info *fs_info,
2223 			     struct btrfs_path *path,
2224 			     int level, int slot, u64 objectid)
2225 {
2226 	struct extent_buffer *node;
2227 	struct btrfs_disk_key disk_key;
2228 	u32 nritems;
2229 	u64 search;
2230 	u64 target;
2231 	u64 nread = 0;
2232 	struct extent_buffer *eb;
2233 	u32 nr;
2234 	u32 blocksize;
2235 	u32 nscan = 0;
2236 
2237 	if (level != 1)
2238 		return;
2239 
2240 	if (!path->nodes[level])
2241 		return;
2242 
2243 	node = path->nodes[level];
2244 
2245 	search = btrfs_node_blockptr(node, slot);
2246 	blocksize = fs_info->nodesize;
2247 	eb = find_extent_buffer(fs_info, search);
2248 	if (eb) {
2249 		free_extent_buffer(eb);
2250 		return;
2251 	}
2252 
2253 	target = search;
2254 
2255 	nritems = btrfs_header_nritems(node);
2256 	nr = slot;
2257 
2258 	while (1) {
2259 		if (path->reada == READA_BACK) {
2260 			if (nr == 0)
2261 				break;
2262 			nr--;
2263 		} else if (path->reada == READA_FORWARD) {
2264 			nr++;
2265 			if (nr >= nritems)
2266 				break;
2267 		}
2268 		if (path->reada == READA_BACK && objectid) {
2269 			btrfs_node_key(node, &disk_key, nr);
2270 			if (btrfs_disk_key_objectid(&disk_key) != objectid)
2271 				break;
2272 		}
2273 		search = btrfs_node_blockptr(node, nr);
2274 		if ((search <= target && target - search <= 65536) ||
2275 		    (search > target && search - target <= 65536)) {
2276 			readahead_tree_block(fs_info, search);
2277 			nread += blocksize;
2278 		}
2279 		nscan++;
2280 		if ((nread > 65536 || nscan > 32))
2281 			break;
2282 	}
2283 }
2284 
reada_for_balance(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int level)2285 static noinline void reada_for_balance(struct btrfs_fs_info *fs_info,
2286 				       struct btrfs_path *path, int level)
2287 {
2288 	int slot;
2289 	int nritems;
2290 	struct extent_buffer *parent;
2291 	struct extent_buffer *eb;
2292 	u64 gen;
2293 	u64 block1 = 0;
2294 	u64 block2 = 0;
2295 
2296 	parent = path->nodes[level + 1];
2297 	if (!parent)
2298 		return;
2299 
2300 	nritems = btrfs_header_nritems(parent);
2301 	slot = path->slots[level + 1];
2302 
2303 	if (slot > 0) {
2304 		block1 = btrfs_node_blockptr(parent, slot - 1);
2305 		gen = btrfs_node_ptr_generation(parent, slot - 1);
2306 		eb = find_extent_buffer(fs_info, block1);
2307 		/*
2308 		 * if we get -eagain from btrfs_buffer_uptodate, we
2309 		 * don't want to return eagain here.  That will loop
2310 		 * forever
2311 		 */
2312 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2313 			block1 = 0;
2314 		free_extent_buffer(eb);
2315 	}
2316 	if (slot + 1 < nritems) {
2317 		block2 = btrfs_node_blockptr(parent, slot + 1);
2318 		gen = btrfs_node_ptr_generation(parent, slot + 1);
2319 		eb = find_extent_buffer(fs_info, block2);
2320 		if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2321 			block2 = 0;
2322 		free_extent_buffer(eb);
2323 	}
2324 
2325 	if (block1)
2326 		readahead_tree_block(fs_info, block1);
2327 	if (block2)
2328 		readahead_tree_block(fs_info, block2);
2329 }
2330 
2331 
2332 /*
2333  * when we walk down the tree, it is usually safe to unlock the higher layers
2334  * in the tree.  The exceptions are when our path goes through slot 0, because
2335  * operations on the tree might require changing key pointers higher up in the
2336  * tree.
2337  *
2338  * callers might also have set path->keep_locks, which tells this code to keep
2339  * the lock if the path points to the last slot in the block.  This is part of
2340  * walking through the tree, and selecting the next slot in the higher block.
2341  *
2342  * lowest_unlock sets the lowest level in the tree we're allowed to unlock.  so
2343  * if lowest_unlock is 1, level 0 won't be unlocked
2344  */
unlock_up(struct btrfs_path * path,int level,int lowest_unlock,int min_write_lock_level,int * write_lock_level)2345 static noinline void unlock_up(struct btrfs_path *path, int level,
2346 			       int lowest_unlock, int min_write_lock_level,
2347 			       int *write_lock_level)
2348 {
2349 	int i;
2350 	int skip_level = level;
2351 	int no_skips = 0;
2352 	struct extent_buffer *t;
2353 
2354 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2355 		if (!path->nodes[i])
2356 			break;
2357 		if (!path->locks[i])
2358 			break;
2359 		if (!no_skips && path->slots[i] == 0) {
2360 			skip_level = i + 1;
2361 			continue;
2362 		}
2363 		if (!no_skips && path->keep_locks) {
2364 			u32 nritems;
2365 			t = path->nodes[i];
2366 			nritems = btrfs_header_nritems(t);
2367 			if (nritems < 1 || path->slots[i] >= nritems - 1) {
2368 				skip_level = i + 1;
2369 				continue;
2370 			}
2371 		}
2372 		if (skip_level < i && i >= lowest_unlock)
2373 			no_skips = 1;
2374 
2375 		t = path->nodes[i];
2376 		if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2377 			btrfs_tree_unlock_rw(t, path->locks[i]);
2378 			path->locks[i] = 0;
2379 			if (write_lock_level &&
2380 			    i > min_write_lock_level &&
2381 			    i <= *write_lock_level) {
2382 				*write_lock_level = i - 1;
2383 			}
2384 		}
2385 	}
2386 }
2387 
2388 /*
2389  * This releases any locks held in the path starting at level and
2390  * going all the way up to the root.
2391  *
2392  * btrfs_search_slot will keep the lock held on higher nodes in a few
2393  * corner cases, such as COW of the block at slot zero in the node.  This
2394  * ignores those rules, and it should only be called when there are no
2395  * more updates to be done higher up in the tree.
2396  */
btrfs_unlock_up_safe(struct btrfs_path * path,int level)2397 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2398 {
2399 	int i;
2400 
2401 	if (path->keep_locks)
2402 		return;
2403 
2404 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2405 		if (!path->nodes[i])
2406 			continue;
2407 		if (!path->locks[i])
2408 			continue;
2409 		btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2410 		path->locks[i] = 0;
2411 	}
2412 }
2413 
2414 /*
2415  * helper function for btrfs_search_slot.  The goal is to find a block
2416  * in cache without setting the path to blocking.  If we find the block
2417  * we return zero and the path is unchanged.
2418  *
2419  * If we can't find the block, we set the path blocking and do some
2420  * reada.  -EAGAIN is returned and the search must be repeated.
2421  */
2422 static int
read_block_for_search(struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer ** eb_ret,int level,int slot,const struct btrfs_key * key)2423 read_block_for_search(struct btrfs_root *root, struct btrfs_path *p,
2424 		      struct extent_buffer **eb_ret, int level, int slot,
2425 		      const struct btrfs_key *key)
2426 {
2427 	struct btrfs_fs_info *fs_info = root->fs_info;
2428 	u64 blocknr;
2429 	u64 gen;
2430 	struct extent_buffer *b = *eb_ret;
2431 	struct extent_buffer *tmp;
2432 	int ret;
2433 
2434 	blocknr = btrfs_node_blockptr(b, slot);
2435 	gen = btrfs_node_ptr_generation(b, slot);
2436 
2437 	tmp = find_extent_buffer(fs_info, blocknr);
2438 	if (tmp) {
2439 		/* first we do an atomic uptodate check */
2440 		if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2441 			*eb_ret = tmp;
2442 			return 0;
2443 		}
2444 
2445 		/* the pages were up to date, but we failed
2446 		 * the generation number check.  Do a full
2447 		 * read for the generation number that is correct.
2448 		 * We must do this without dropping locks so
2449 		 * we can trust our generation number
2450 		 */
2451 		btrfs_set_path_blocking(p);
2452 
2453 		/* now we're allowed to do a blocking uptodate check */
2454 		ret = btrfs_read_buffer(tmp, gen);
2455 		if (!ret) {
2456 			*eb_ret = tmp;
2457 			return 0;
2458 		}
2459 		free_extent_buffer(tmp);
2460 		btrfs_release_path(p);
2461 		return -EIO;
2462 	}
2463 
2464 	/*
2465 	 * reduce lock contention at high levels
2466 	 * of the btree by dropping locks before
2467 	 * we read.  Don't release the lock on the current
2468 	 * level because we need to walk this node to figure
2469 	 * out which blocks to read.
2470 	 */
2471 	btrfs_unlock_up_safe(p, level + 1);
2472 	btrfs_set_path_blocking(p);
2473 
2474 	free_extent_buffer(tmp);
2475 	if (p->reada != READA_NONE)
2476 		reada_for_search(fs_info, p, level, slot, key->objectid);
2477 
2478 	ret = -EAGAIN;
2479 	tmp = read_tree_block(fs_info, blocknr, gen);
2480 	if (!IS_ERR(tmp)) {
2481 		/*
2482 		 * If the read above didn't mark this buffer up to date,
2483 		 * it will never end up being up to date.  Set ret to EIO now
2484 		 * and give up so that our caller doesn't loop forever
2485 		 * on our EAGAINs.
2486 		 */
2487 		if (!btrfs_buffer_uptodate(tmp, 0, 0))
2488 			ret = -EIO;
2489 		free_extent_buffer(tmp);
2490 	} else {
2491 		ret = PTR_ERR(tmp);
2492 	}
2493 
2494 	btrfs_release_path(p);
2495 	return ret;
2496 }
2497 
2498 /*
2499  * helper function for btrfs_search_slot.  This does all of the checks
2500  * for node-level blocks and does any balancing required based on
2501  * the ins_len.
2502  *
2503  * If no extra work was required, zero is returned.  If we had to
2504  * drop the path, -EAGAIN is returned and btrfs_search_slot must
2505  * start over
2506  */
2507 static int
setup_nodes_for_search(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * p,struct extent_buffer * b,int level,int ins_len,int * write_lock_level)2508 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2509 		       struct btrfs_root *root, struct btrfs_path *p,
2510 		       struct extent_buffer *b, int level, int ins_len,
2511 		       int *write_lock_level)
2512 {
2513 	struct btrfs_fs_info *fs_info = root->fs_info;
2514 	int ret;
2515 
2516 	if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2517 	    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3) {
2518 		int sret;
2519 
2520 		if (*write_lock_level < level + 1) {
2521 			*write_lock_level = level + 1;
2522 			btrfs_release_path(p);
2523 			goto again;
2524 		}
2525 
2526 		btrfs_set_path_blocking(p);
2527 		reada_for_balance(fs_info, p, level);
2528 		sret = split_node(trans, root, p, level);
2529 		btrfs_clear_path_blocking(p, NULL, 0);
2530 
2531 		BUG_ON(sret > 0);
2532 		if (sret) {
2533 			ret = sret;
2534 			goto done;
2535 		}
2536 		b = p->nodes[level];
2537 	} else if (ins_len < 0 && btrfs_header_nritems(b) <
2538 		   BTRFS_NODEPTRS_PER_BLOCK(fs_info) / 2) {
2539 		int sret;
2540 
2541 		if (*write_lock_level < level + 1) {
2542 			*write_lock_level = level + 1;
2543 			btrfs_release_path(p);
2544 			goto again;
2545 		}
2546 
2547 		btrfs_set_path_blocking(p);
2548 		reada_for_balance(fs_info, p, level);
2549 		sret = balance_level(trans, root, p, level);
2550 		btrfs_clear_path_blocking(p, NULL, 0);
2551 
2552 		if (sret) {
2553 			ret = sret;
2554 			goto done;
2555 		}
2556 		b = p->nodes[level];
2557 		if (!b) {
2558 			btrfs_release_path(p);
2559 			goto again;
2560 		}
2561 		BUG_ON(btrfs_header_nritems(b) == 1);
2562 	}
2563 	return 0;
2564 
2565 again:
2566 	ret = -EAGAIN;
2567 done:
2568 	return ret;
2569 }
2570 
key_search_validate(struct extent_buffer * b,const struct btrfs_key * key,int level)2571 static void key_search_validate(struct extent_buffer *b,
2572 				const struct btrfs_key *key,
2573 				int level)
2574 {
2575 #ifdef CONFIG_BTRFS_ASSERT
2576 	struct btrfs_disk_key disk_key;
2577 
2578 	btrfs_cpu_key_to_disk(&disk_key, key);
2579 
2580 	if (level == 0)
2581 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2582 		    offsetof(struct btrfs_leaf, items[0].key),
2583 		    sizeof(disk_key)));
2584 	else
2585 		ASSERT(!memcmp_extent_buffer(b, &disk_key,
2586 		    offsetof(struct btrfs_node, ptrs[0].key),
2587 		    sizeof(disk_key)));
2588 #endif
2589 }
2590 
key_search(struct extent_buffer * b,const struct btrfs_key * key,int level,int * prev_cmp,int * slot)2591 static int key_search(struct extent_buffer *b, const struct btrfs_key *key,
2592 		      int level, int *prev_cmp, int *slot)
2593 {
2594 	if (*prev_cmp != 0) {
2595 		*prev_cmp = bin_search(b, key, level, slot);
2596 		return *prev_cmp;
2597 	}
2598 
2599 	key_search_validate(b, key, level);
2600 	*slot = 0;
2601 
2602 	return 0;
2603 }
2604 
btrfs_find_item(struct btrfs_root * fs_root,struct btrfs_path * path,u64 iobjectid,u64 ioff,u8 key_type,struct btrfs_key * found_key)2605 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2606 		u64 iobjectid, u64 ioff, u8 key_type,
2607 		struct btrfs_key *found_key)
2608 {
2609 	int ret;
2610 	struct btrfs_key key;
2611 	struct extent_buffer *eb;
2612 
2613 	ASSERT(path);
2614 	ASSERT(found_key);
2615 
2616 	key.type = key_type;
2617 	key.objectid = iobjectid;
2618 	key.offset = ioff;
2619 
2620 	ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2621 	if (ret < 0)
2622 		return ret;
2623 
2624 	eb = path->nodes[0];
2625 	if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2626 		ret = btrfs_next_leaf(fs_root, path);
2627 		if (ret)
2628 			return ret;
2629 		eb = path->nodes[0];
2630 	}
2631 
2632 	btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2633 	if (found_key->type != key.type ||
2634 			found_key->objectid != key.objectid)
2635 		return 1;
2636 
2637 	return 0;
2638 }
2639 
2640 /*
2641  * look for key in the tree.  path is filled in with nodes along the way
2642  * if key is found, we return zero and you can find the item in the leaf
2643  * level of the path (level 0)
2644  *
2645  * If the key isn't found, the path points to the slot where it should
2646  * be inserted, and 1 is returned.  If there are other errors during the
2647  * search a negative error number is returned.
2648  *
2649  * if ins_len > 0, nodes and leaves will be split as we walk down the
2650  * tree.  if ins_len < 0, nodes will be merged as we walk down the tree (if
2651  * possible)
2652  */
btrfs_search_slot(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int ins_len,int cow)2653 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2654 		      const struct btrfs_key *key, struct btrfs_path *p,
2655 		      int ins_len, int cow)
2656 {
2657 	struct btrfs_fs_info *fs_info = root->fs_info;
2658 	struct extent_buffer *b;
2659 	int slot;
2660 	int ret;
2661 	int err;
2662 	int level;
2663 	int lowest_unlock = 1;
2664 	int root_lock;
2665 	/* everything at write_lock_level or lower must be write locked */
2666 	int write_lock_level = 0;
2667 	u8 lowest_level = 0;
2668 	int min_write_lock_level;
2669 	int prev_cmp;
2670 
2671 	lowest_level = p->lowest_level;
2672 	WARN_ON(lowest_level && ins_len > 0);
2673 	WARN_ON(p->nodes[0] != NULL);
2674 	BUG_ON(!cow && ins_len);
2675 
2676 	if (ins_len < 0) {
2677 		lowest_unlock = 2;
2678 
2679 		/* when we are removing items, we might have to go up to level
2680 		 * two as we update tree pointers  Make sure we keep write
2681 		 * for those levels as well
2682 		 */
2683 		write_lock_level = 2;
2684 	} else if (ins_len > 0) {
2685 		/*
2686 		 * for inserting items, make sure we have a write lock on
2687 		 * level 1 so we can update keys
2688 		 */
2689 		write_lock_level = 1;
2690 	}
2691 
2692 	if (!cow)
2693 		write_lock_level = -1;
2694 
2695 	if (cow && (p->keep_locks || p->lowest_level))
2696 		write_lock_level = BTRFS_MAX_LEVEL;
2697 
2698 	min_write_lock_level = write_lock_level;
2699 
2700 again:
2701 	prev_cmp = -1;
2702 	/*
2703 	 * we try very hard to do read locks on the root
2704 	 */
2705 	root_lock = BTRFS_READ_LOCK;
2706 	level = 0;
2707 	if (p->search_commit_root) {
2708 		/*
2709 		 * the commit roots are read only
2710 		 * so we always do read locks
2711 		 */
2712 		if (p->need_commit_sem)
2713 			down_read(&fs_info->commit_root_sem);
2714 		b = root->commit_root;
2715 		extent_buffer_get(b);
2716 		level = btrfs_header_level(b);
2717 		if (p->need_commit_sem)
2718 			up_read(&fs_info->commit_root_sem);
2719 		if (!p->skip_locking)
2720 			btrfs_tree_read_lock(b);
2721 	} else {
2722 		if (p->skip_locking) {
2723 			b = btrfs_root_node(root);
2724 			level = btrfs_header_level(b);
2725 		} else {
2726 			/* we don't know the level of the root node
2727 			 * until we actually have it read locked
2728 			 */
2729 			b = btrfs_read_lock_root_node(root);
2730 			level = btrfs_header_level(b);
2731 			if (level <= write_lock_level) {
2732 				/* whoops, must trade for write lock */
2733 				btrfs_tree_read_unlock(b);
2734 				free_extent_buffer(b);
2735 				b = btrfs_lock_root_node(root);
2736 				root_lock = BTRFS_WRITE_LOCK;
2737 
2738 				/* the level might have changed, check again */
2739 				level = btrfs_header_level(b);
2740 			}
2741 		}
2742 	}
2743 	p->nodes[level] = b;
2744 	if (!p->skip_locking)
2745 		p->locks[level] = root_lock;
2746 
2747 	while (b) {
2748 		level = btrfs_header_level(b);
2749 
2750 		/*
2751 		 * setup the path here so we can release it under lock
2752 		 * contention with the cow code
2753 		 */
2754 		if (cow) {
2755 			bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
2756 
2757 			/*
2758 			 * if we don't really need to cow this block
2759 			 * then we don't want to set the path blocking,
2760 			 * so we test it here
2761 			 */
2762 			if (!should_cow_block(trans, root, b)) {
2763 				trans->dirty = true;
2764 				goto cow_done;
2765 			}
2766 
2767 			/*
2768 			 * must have write locks on this node and the
2769 			 * parent
2770 			 */
2771 			if (level > write_lock_level ||
2772 			    (level + 1 > write_lock_level &&
2773 			    level + 1 < BTRFS_MAX_LEVEL &&
2774 			    p->nodes[level + 1])) {
2775 				write_lock_level = level + 1;
2776 				btrfs_release_path(p);
2777 				goto again;
2778 			}
2779 
2780 			btrfs_set_path_blocking(p);
2781 			if (last_level)
2782 				err = btrfs_cow_block(trans, root, b, NULL, 0,
2783 						      &b);
2784 			else
2785 				err = btrfs_cow_block(trans, root, b,
2786 						      p->nodes[level + 1],
2787 						      p->slots[level + 1], &b);
2788 			if (err) {
2789 				ret = err;
2790 				goto done;
2791 			}
2792 		}
2793 cow_done:
2794 		p->nodes[level] = b;
2795 		btrfs_clear_path_blocking(p, NULL, 0);
2796 
2797 		/*
2798 		 * we have a lock on b and as long as we aren't changing
2799 		 * the tree, there is no way to for the items in b to change.
2800 		 * It is safe to drop the lock on our parent before we
2801 		 * go through the expensive btree search on b.
2802 		 *
2803 		 * If we're inserting or deleting (ins_len != 0), then we might
2804 		 * be changing slot zero, which may require changing the parent.
2805 		 * So, we can't drop the lock until after we know which slot
2806 		 * we're operating on.
2807 		 */
2808 		if (!ins_len && !p->keep_locks) {
2809 			int u = level + 1;
2810 
2811 			if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2812 				btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2813 				p->locks[u] = 0;
2814 			}
2815 		}
2816 
2817 		ret = key_search(b, key, level, &prev_cmp, &slot);
2818 		if (ret < 0)
2819 			goto done;
2820 
2821 		if (level != 0) {
2822 			int dec = 0;
2823 			if (ret && slot > 0) {
2824 				dec = 1;
2825 				slot -= 1;
2826 			}
2827 			p->slots[level] = slot;
2828 			err = setup_nodes_for_search(trans, root, p, b, level,
2829 					     ins_len, &write_lock_level);
2830 			if (err == -EAGAIN)
2831 				goto again;
2832 			if (err) {
2833 				ret = err;
2834 				goto done;
2835 			}
2836 			b = p->nodes[level];
2837 			slot = p->slots[level];
2838 
2839 			/*
2840 			 * slot 0 is special, if we change the key
2841 			 * we have to update the parent pointer
2842 			 * which means we must have a write lock
2843 			 * on the parent
2844 			 */
2845 			if (slot == 0 && ins_len &&
2846 			    write_lock_level < level + 1) {
2847 				write_lock_level = level + 1;
2848 				btrfs_release_path(p);
2849 				goto again;
2850 			}
2851 
2852 			unlock_up(p, level, lowest_unlock,
2853 				  min_write_lock_level, &write_lock_level);
2854 
2855 			if (level == lowest_level) {
2856 				if (dec)
2857 					p->slots[level]++;
2858 				goto done;
2859 			}
2860 
2861 			err = read_block_for_search(root, p, &b, level,
2862 						    slot, key);
2863 			if (err == -EAGAIN)
2864 				goto again;
2865 			if (err) {
2866 				ret = err;
2867 				goto done;
2868 			}
2869 
2870 			if (!p->skip_locking) {
2871 				level = btrfs_header_level(b);
2872 				if (level <= write_lock_level) {
2873 					err = btrfs_try_tree_write_lock(b);
2874 					if (!err) {
2875 						btrfs_set_path_blocking(p);
2876 						btrfs_tree_lock(b);
2877 						btrfs_clear_path_blocking(p, b,
2878 								  BTRFS_WRITE_LOCK);
2879 					}
2880 					p->locks[level] = BTRFS_WRITE_LOCK;
2881 				} else {
2882 					err = btrfs_tree_read_lock_atomic(b);
2883 					if (!err) {
2884 						btrfs_set_path_blocking(p);
2885 						btrfs_tree_read_lock(b);
2886 						btrfs_clear_path_blocking(p, b,
2887 								  BTRFS_READ_LOCK);
2888 					}
2889 					p->locks[level] = BTRFS_READ_LOCK;
2890 				}
2891 				p->nodes[level] = b;
2892 			}
2893 		} else {
2894 			p->slots[level] = slot;
2895 			if (ins_len > 0 &&
2896 			    btrfs_leaf_free_space(fs_info, b) < ins_len) {
2897 				if (write_lock_level < 1) {
2898 					write_lock_level = 1;
2899 					btrfs_release_path(p);
2900 					goto again;
2901 				}
2902 
2903 				btrfs_set_path_blocking(p);
2904 				err = split_leaf(trans, root, key,
2905 						 p, ins_len, ret == 0);
2906 				btrfs_clear_path_blocking(p, NULL, 0);
2907 
2908 				BUG_ON(err > 0);
2909 				if (err) {
2910 					ret = err;
2911 					goto done;
2912 				}
2913 			}
2914 			if (!p->search_for_split)
2915 				unlock_up(p, level, lowest_unlock,
2916 					  min_write_lock_level, &write_lock_level);
2917 			goto done;
2918 		}
2919 	}
2920 	ret = 1;
2921 done:
2922 	/*
2923 	 * we don't really know what they plan on doing with the path
2924 	 * from here on, so for now just mark it as blocking
2925 	 */
2926 	if (!p->leave_spinning)
2927 		btrfs_set_path_blocking(p);
2928 	if (ret < 0 && !p->skip_release_on_error)
2929 		btrfs_release_path(p);
2930 	return ret;
2931 }
2932 
2933 /*
2934  * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2935  * current state of the tree together with the operations recorded in the tree
2936  * modification log to search for the key in a previous version of this tree, as
2937  * denoted by the time_seq parameter.
2938  *
2939  * Naturally, there is no support for insert, delete or cow operations.
2940  *
2941  * The resulting path and return value will be set up as if we called
2942  * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2943  */
btrfs_search_old_slot(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,u64 time_seq)2944 int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
2945 			  struct btrfs_path *p, u64 time_seq)
2946 {
2947 	struct btrfs_fs_info *fs_info = root->fs_info;
2948 	struct extent_buffer *b;
2949 	int slot;
2950 	int ret;
2951 	int err;
2952 	int level;
2953 	int lowest_unlock = 1;
2954 	u8 lowest_level = 0;
2955 	int prev_cmp = -1;
2956 
2957 	lowest_level = p->lowest_level;
2958 	WARN_ON(p->nodes[0] != NULL);
2959 
2960 	if (p->search_commit_root) {
2961 		BUG_ON(time_seq);
2962 		return btrfs_search_slot(NULL, root, key, p, 0, 0);
2963 	}
2964 
2965 again:
2966 	b = get_old_root(root, time_seq);
2967 	if (!b) {
2968 		ret = -EIO;
2969 		goto done;
2970 	}
2971 	level = btrfs_header_level(b);
2972 	p->locks[level] = BTRFS_READ_LOCK;
2973 
2974 	while (b) {
2975 		level = btrfs_header_level(b);
2976 		p->nodes[level] = b;
2977 		btrfs_clear_path_blocking(p, NULL, 0);
2978 
2979 		/*
2980 		 * we have a lock on b and as long as we aren't changing
2981 		 * the tree, there is no way to for the items in b to change.
2982 		 * It is safe to drop the lock on our parent before we
2983 		 * go through the expensive btree search on b.
2984 		 */
2985 		btrfs_unlock_up_safe(p, level + 1);
2986 
2987 		/*
2988 		 * Since we can unwind ebs we want to do a real search every
2989 		 * time.
2990 		 */
2991 		prev_cmp = -1;
2992 		ret = key_search(b, key, level, &prev_cmp, &slot);
2993 
2994 		if (level != 0) {
2995 			int dec = 0;
2996 			if (ret && slot > 0) {
2997 				dec = 1;
2998 				slot -= 1;
2999 			}
3000 			p->slots[level] = slot;
3001 			unlock_up(p, level, lowest_unlock, 0, NULL);
3002 
3003 			if (level == lowest_level) {
3004 				if (dec)
3005 					p->slots[level]++;
3006 				goto done;
3007 			}
3008 
3009 			err = read_block_for_search(root, p, &b, level,
3010 						    slot, key);
3011 			if (err == -EAGAIN)
3012 				goto again;
3013 			if (err) {
3014 				ret = err;
3015 				goto done;
3016 			}
3017 
3018 			level = btrfs_header_level(b);
3019 			err = btrfs_tree_read_lock_atomic(b);
3020 			if (!err) {
3021 				btrfs_set_path_blocking(p);
3022 				btrfs_tree_read_lock(b);
3023 				btrfs_clear_path_blocking(p, b,
3024 							  BTRFS_READ_LOCK);
3025 			}
3026 			b = tree_mod_log_rewind(fs_info, p, b, time_seq);
3027 			if (!b) {
3028 				ret = -ENOMEM;
3029 				goto done;
3030 			}
3031 			p->locks[level] = BTRFS_READ_LOCK;
3032 			p->nodes[level] = b;
3033 		} else {
3034 			p->slots[level] = slot;
3035 			unlock_up(p, level, lowest_unlock, 0, NULL);
3036 			goto done;
3037 		}
3038 	}
3039 	ret = 1;
3040 done:
3041 	if (!p->leave_spinning)
3042 		btrfs_set_path_blocking(p);
3043 	if (ret < 0)
3044 		btrfs_release_path(p);
3045 
3046 	return ret;
3047 }
3048 
3049 /*
3050  * helper to use instead of search slot if no exact match is needed but
3051  * instead the next or previous item should be returned.
3052  * When find_higher is true, the next higher item is returned, the next lower
3053  * otherwise.
3054  * When return_any and find_higher are both true, and no higher item is found,
3055  * return the next lower instead.
3056  * When return_any is true and find_higher is false, and no lower item is found,
3057  * return the next higher instead.
3058  * It returns 0 if any item is found, 1 if none is found (tree empty), and
3059  * < 0 on error
3060  */
btrfs_search_slot_for_read(struct btrfs_root * root,const struct btrfs_key * key,struct btrfs_path * p,int find_higher,int return_any)3061 int btrfs_search_slot_for_read(struct btrfs_root *root,
3062 			       const struct btrfs_key *key,
3063 			       struct btrfs_path *p, int find_higher,
3064 			       int return_any)
3065 {
3066 	int ret;
3067 	struct extent_buffer *leaf;
3068 
3069 again:
3070 	ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3071 	if (ret <= 0)
3072 		return ret;
3073 	/*
3074 	 * a return value of 1 means the path is at the position where the
3075 	 * item should be inserted. Normally this is the next bigger item,
3076 	 * but in case the previous item is the last in a leaf, path points
3077 	 * to the first free slot in the previous leaf, i.e. at an invalid
3078 	 * item.
3079 	 */
3080 	leaf = p->nodes[0];
3081 
3082 	if (find_higher) {
3083 		if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3084 			ret = btrfs_next_leaf(root, p);
3085 			if (ret <= 0)
3086 				return ret;
3087 			if (!return_any)
3088 				return 1;
3089 			/*
3090 			 * no higher item found, return the next
3091 			 * lower instead
3092 			 */
3093 			return_any = 0;
3094 			find_higher = 0;
3095 			btrfs_release_path(p);
3096 			goto again;
3097 		}
3098 	} else {
3099 		if (p->slots[0] == 0) {
3100 			ret = btrfs_prev_leaf(root, p);
3101 			if (ret < 0)
3102 				return ret;
3103 			if (!ret) {
3104 				leaf = p->nodes[0];
3105 				if (p->slots[0] == btrfs_header_nritems(leaf))
3106 					p->slots[0]--;
3107 				return 0;
3108 			}
3109 			if (!return_any)
3110 				return 1;
3111 			/*
3112 			 * no lower item found, return the next
3113 			 * higher instead
3114 			 */
3115 			return_any = 0;
3116 			find_higher = 1;
3117 			btrfs_release_path(p);
3118 			goto again;
3119 		} else {
3120 			--p->slots[0];
3121 		}
3122 	}
3123 	return 0;
3124 }
3125 
3126 /*
3127  * adjust the pointers going up the tree, starting at level
3128  * making sure the right key of each node is points to 'key'.
3129  * This is used after shifting pointers to the left, so it stops
3130  * fixing up pointers when a given leaf/node is not in slot 0 of the
3131  * higher levels
3132  *
3133  */
fixup_low_keys(struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct btrfs_disk_key * key,int level)3134 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3135 			   struct btrfs_path *path,
3136 			   struct btrfs_disk_key *key, int level)
3137 {
3138 	int i;
3139 	struct extent_buffer *t;
3140 
3141 	for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3142 		int tslot = path->slots[i];
3143 		if (!path->nodes[i])
3144 			break;
3145 		t = path->nodes[i];
3146 		tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3147 		btrfs_set_node_key(t, key, tslot);
3148 		btrfs_mark_buffer_dirty(path->nodes[i]);
3149 		if (tslot != 0)
3150 			break;
3151 	}
3152 }
3153 
3154 /*
3155  * update item key.
3156  *
3157  * This function isn't completely safe. It's the caller's responsibility
3158  * that the new key won't break the order
3159  */
btrfs_set_item_key_safe(struct btrfs_fs_info * fs_info,struct btrfs_path * path,const struct btrfs_key * new_key)3160 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3161 			     struct btrfs_path *path,
3162 			     const struct btrfs_key *new_key)
3163 {
3164 	struct btrfs_disk_key disk_key;
3165 	struct extent_buffer *eb;
3166 	int slot;
3167 
3168 	eb = path->nodes[0];
3169 	slot = path->slots[0];
3170 	if (slot > 0) {
3171 		btrfs_item_key(eb, &disk_key, slot - 1);
3172 		BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3173 	}
3174 	if (slot < btrfs_header_nritems(eb) - 1) {
3175 		btrfs_item_key(eb, &disk_key, slot + 1);
3176 		BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3177 	}
3178 
3179 	btrfs_cpu_key_to_disk(&disk_key, new_key);
3180 	btrfs_set_item_key(eb, &disk_key, slot);
3181 	btrfs_mark_buffer_dirty(eb);
3182 	if (slot == 0)
3183 		fixup_low_keys(fs_info, path, &disk_key, 1);
3184 }
3185 
3186 /*
3187  * try to push data from one node into the next node left in the
3188  * tree.
3189  *
3190  * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3191  * error, and > 0 if there was no room in the left hand block.
3192  */
push_node_left(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct extent_buffer * dst,struct extent_buffer * src,int empty)3193 static int push_node_left(struct btrfs_trans_handle *trans,
3194 			  struct btrfs_fs_info *fs_info,
3195 			  struct extent_buffer *dst,
3196 			  struct extent_buffer *src, int empty)
3197 {
3198 	int push_items = 0;
3199 	int src_nritems;
3200 	int dst_nritems;
3201 	int ret = 0;
3202 
3203 	src_nritems = btrfs_header_nritems(src);
3204 	dst_nritems = btrfs_header_nritems(dst);
3205 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3206 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3207 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3208 
3209 	if (!empty && src_nritems <= 8)
3210 		return 1;
3211 
3212 	if (push_items <= 0)
3213 		return 1;
3214 
3215 	if (empty) {
3216 		push_items = min(src_nritems, push_items);
3217 		if (push_items < src_nritems) {
3218 			/* leave at least 8 pointers in the node if
3219 			 * we aren't going to empty it
3220 			 */
3221 			if (src_nritems - push_items < 8) {
3222 				if (push_items <= 8)
3223 					return 1;
3224 				push_items -= 8;
3225 			}
3226 		}
3227 	} else
3228 		push_items = min(src_nritems - 8, push_items);
3229 
3230 	ret = tree_mod_log_eb_copy(fs_info, dst, src, dst_nritems, 0,
3231 				   push_items);
3232 	if (ret) {
3233 		btrfs_abort_transaction(trans, ret);
3234 		return ret;
3235 	}
3236 	copy_extent_buffer(dst, src,
3237 			   btrfs_node_key_ptr_offset(dst_nritems),
3238 			   btrfs_node_key_ptr_offset(0),
3239 			   push_items * sizeof(struct btrfs_key_ptr));
3240 
3241 	if (push_items < src_nritems) {
3242 		/*
3243 		 * don't call tree_mod_log_eb_move here, key removal was already
3244 		 * fully logged by tree_mod_log_eb_copy above.
3245 		 */
3246 		memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3247 				      btrfs_node_key_ptr_offset(push_items),
3248 				      (src_nritems - push_items) *
3249 				      sizeof(struct btrfs_key_ptr));
3250 	}
3251 	btrfs_set_header_nritems(src, src_nritems - push_items);
3252 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3253 	btrfs_mark_buffer_dirty(src);
3254 	btrfs_mark_buffer_dirty(dst);
3255 
3256 	return ret;
3257 }
3258 
3259 /*
3260  * try to push data from one node into the next node right in the
3261  * tree.
3262  *
3263  * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3264  * error, and > 0 if there was no room in the right hand block.
3265  *
3266  * this will  only push up to 1/2 the contents of the left node over
3267  */
balance_node_right(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct extent_buffer * dst,struct extent_buffer * src)3268 static int balance_node_right(struct btrfs_trans_handle *trans,
3269 			      struct btrfs_fs_info *fs_info,
3270 			      struct extent_buffer *dst,
3271 			      struct extent_buffer *src)
3272 {
3273 	int push_items = 0;
3274 	int max_push;
3275 	int src_nritems;
3276 	int dst_nritems;
3277 	int ret = 0;
3278 
3279 	WARN_ON(btrfs_header_generation(src) != trans->transid);
3280 	WARN_ON(btrfs_header_generation(dst) != trans->transid);
3281 
3282 	src_nritems = btrfs_header_nritems(src);
3283 	dst_nritems = btrfs_header_nritems(dst);
3284 	push_items = BTRFS_NODEPTRS_PER_BLOCK(fs_info) - dst_nritems;
3285 	if (push_items <= 0)
3286 		return 1;
3287 
3288 	if (src_nritems < 4)
3289 		return 1;
3290 
3291 	max_push = src_nritems / 2 + 1;
3292 	/* don't try to empty the node */
3293 	if (max_push >= src_nritems)
3294 		return 1;
3295 
3296 	if (max_push < push_items)
3297 		push_items = max_push;
3298 
3299 	tree_mod_log_eb_move(fs_info, dst, push_items, 0, dst_nritems);
3300 	memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3301 				      btrfs_node_key_ptr_offset(0),
3302 				      (dst_nritems) *
3303 				      sizeof(struct btrfs_key_ptr));
3304 
3305 	ret = tree_mod_log_eb_copy(fs_info, dst, src, 0,
3306 				   src_nritems - push_items, push_items);
3307 	if (ret) {
3308 		btrfs_abort_transaction(trans, ret);
3309 		return ret;
3310 	}
3311 	copy_extent_buffer(dst, src,
3312 			   btrfs_node_key_ptr_offset(0),
3313 			   btrfs_node_key_ptr_offset(src_nritems - push_items),
3314 			   push_items * sizeof(struct btrfs_key_ptr));
3315 
3316 	btrfs_set_header_nritems(src, src_nritems - push_items);
3317 	btrfs_set_header_nritems(dst, dst_nritems + push_items);
3318 
3319 	btrfs_mark_buffer_dirty(src);
3320 	btrfs_mark_buffer_dirty(dst);
3321 
3322 	return ret;
3323 }
3324 
3325 /*
3326  * helper function to insert a new root level in the tree.
3327  * A new node is allocated, and a single item is inserted to
3328  * point to the existing root
3329  *
3330  * returns zero on success or < 0 on failure.
3331  */
insert_new_root(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)3332 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3333 			   struct btrfs_root *root,
3334 			   struct btrfs_path *path, int level)
3335 {
3336 	struct btrfs_fs_info *fs_info = root->fs_info;
3337 	u64 lower_gen;
3338 	struct extent_buffer *lower;
3339 	struct extent_buffer *c;
3340 	struct extent_buffer *old;
3341 	struct btrfs_disk_key lower_key;
3342 
3343 	BUG_ON(path->nodes[level]);
3344 	BUG_ON(path->nodes[level-1] != root->node);
3345 
3346 	lower = path->nodes[level-1];
3347 	if (level == 1)
3348 		btrfs_item_key(lower, &lower_key, 0);
3349 	else
3350 		btrfs_node_key(lower, &lower_key, 0);
3351 
3352 	c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3353 				   &lower_key, level, root->node->start, 0);
3354 	if (IS_ERR(c))
3355 		return PTR_ERR(c);
3356 
3357 	root_add_used(root, fs_info->nodesize);
3358 
3359 	memzero_extent_buffer(c, 0, sizeof(struct btrfs_header));
3360 	btrfs_set_header_nritems(c, 1);
3361 	btrfs_set_header_level(c, level);
3362 	btrfs_set_header_bytenr(c, c->start);
3363 	btrfs_set_header_generation(c, trans->transid);
3364 	btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3365 	btrfs_set_header_owner(c, root->root_key.objectid);
3366 
3367 	write_extent_buffer_fsid(c, fs_info->fsid);
3368 	write_extent_buffer_chunk_tree_uuid(c, fs_info->chunk_tree_uuid);
3369 
3370 	btrfs_set_node_key(c, &lower_key, 0);
3371 	btrfs_set_node_blockptr(c, 0, lower->start);
3372 	lower_gen = btrfs_header_generation(lower);
3373 	WARN_ON(lower_gen != trans->transid);
3374 
3375 	btrfs_set_node_ptr_generation(c, 0, lower_gen);
3376 
3377 	btrfs_mark_buffer_dirty(c);
3378 
3379 	old = root->node;
3380 	tree_mod_log_set_root_pointer(root, c, 0);
3381 	rcu_assign_pointer(root->node, c);
3382 
3383 	/* the super has an extra ref to root->node */
3384 	free_extent_buffer(old);
3385 
3386 	add_root_to_dirty_list(root);
3387 	extent_buffer_get(c);
3388 	path->nodes[level] = c;
3389 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3390 	path->slots[level] = 0;
3391 	return 0;
3392 }
3393 
3394 /*
3395  * worker function to insert a single pointer in a node.
3396  * the node should have enough room for the pointer already
3397  *
3398  * slot and level indicate where you want the key to go, and
3399  * blocknr is the block the key points to.
3400  */
insert_ptr(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct btrfs_disk_key * key,u64 bytenr,int slot,int level)3401 static void insert_ptr(struct btrfs_trans_handle *trans,
3402 		       struct btrfs_fs_info *fs_info, struct btrfs_path *path,
3403 		       struct btrfs_disk_key *key, u64 bytenr,
3404 		       int slot, int level)
3405 {
3406 	struct extent_buffer *lower;
3407 	int nritems;
3408 	int ret;
3409 
3410 	BUG_ON(!path->nodes[level]);
3411 	btrfs_assert_tree_locked(path->nodes[level]);
3412 	lower = path->nodes[level];
3413 	nritems = btrfs_header_nritems(lower);
3414 	BUG_ON(slot > nritems);
3415 	BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(fs_info));
3416 	if (slot != nritems) {
3417 		if (level)
3418 			tree_mod_log_eb_move(fs_info, lower, slot + 1,
3419 					     slot, nritems - slot);
3420 		memmove_extent_buffer(lower,
3421 			      btrfs_node_key_ptr_offset(slot + 1),
3422 			      btrfs_node_key_ptr_offset(slot),
3423 			      (nritems - slot) * sizeof(struct btrfs_key_ptr));
3424 	}
3425 	if (level) {
3426 		ret = tree_mod_log_insert_key(fs_info, lower, slot,
3427 					      MOD_LOG_KEY_ADD, GFP_NOFS);
3428 		BUG_ON(ret < 0);
3429 	}
3430 	btrfs_set_node_key(lower, key, slot);
3431 	btrfs_set_node_blockptr(lower, slot, bytenr);
3432 	WARN_ON(trans->transid == 0);
3433 	btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3434 	btrfs_set_header_nritems(lower, nritems + 1);
3435 	btrfs_mark_buffer_dirty(lower);
3436 }
3437 
3438 /*
3439  * split the node at the specified level in path in two.
3440  * The path is corrected to point to the appropriate node after the split
3441  *
3442  * Before splitting this tries to make some room in the node by pushing
3443  * left and right, if either one works, it returns right away.
3444  *
3445  * returns 0 on success and < 0 on failure
3446  */
split_node(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int level)3447 static noinline int split_node(struct btrfs_trans_handle *trans,
3448 			       struct btrfs_root *root,
3449 			       struct btrfs_path *path, int level)
3450 {
3451 	struct btrfs_fs_info *fs_info = root->fs_info;
3452 	struct extent_buffer *c;
3453 	struct extent_buffer *split;
3454 	struct btrfs_disk_key disk_key;
3455 	int mid;
3456 	int ret;
3457 	u32 c_nritems;
3458 
3459 	c = path->nodes[level];
3460 	WARN_ON(btrfs_header_generation(c) != trans->transid);
3461 	if (c == root->node) {
3462 		/*
3463 		 * trying to split the root, lets make a new one
3464 		 *
3465 		 * tree mod log: We don't log_removal old root in
3466 		 * insert_new_root, because that root buffer will be kept as a
3467 		 * normal node. We are going to log removal of half of the
3468 		 * elements below with tree_mod_log_eb_copy. We're holding a
3469 		 * tree lock on the buffer, which is why we cannot race with
3470 		 * other tree_mod_log users.
3471 		 */
3472 		ret = insert_new_root(trans, root, path, level + 1);
3473 		if (ret)
3474 			return ret;
3475 	} else {
3476 		ret = push_nodes_for_insert(trans, root, path, level);
3477 		c = path->nodes[level];
3478 		if (!ret && btrfs_header_nritems(c) <
3479 		    BTRFS_NODEPTRS_PER_BLOCK(fs_info) - 3)
3480 			return 0;
3481 		if (ret < 0)
3482 			return ret;
3483 	}
3484 
3485 	c_nritems = btrfs_header_nritems(c);
3486 	mid = (c_nritems + 1) / 2;
3487 	btrfs_node_key(c, &disk_key, mid);
3488 
3489 	split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3490 			&disk_key, level, c->start, 0);
3491 	if (IS_ERR(split))
3492 		return PTR_ERR(split);
3493 
3494 	root_add_used(root, fs_info->nodesize);
3495 
3496 	memzero_extent_buffer(split, 0, sizeof(struct btrfs_header));
3497 	btrfs_set_header_level(split, btrfs_header_level(c));
3498 	btrfs_set_header_bytenr(split, split->start);
3499 	btrfs_set_header_generation(split, trans->transid);
3500 	btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3501 	btrfs_set_header_owner(split, root->root_key.objectid);
3502 	write_extent_buffer_fsid(split, fs_info->fsid);
3503 	write_extent_buffer_chunk_tree_uuid(split, fs_info->chunk_tree_uuid);
3504 
3505 	ret = tree_mod_log_eb_copy(fs_info, split, c, 0, mid, c_nritems - mid);
3506 	if (ret) {
3507 		btrfs_abort_transaction(trans, ret);
3508 		return ret;
3509 	}
3510 	copy_extent_buffer(split, c,
3511 			   btrfs_node_key_ptr_offset(0),
3512 			   btrfs_node_key_ptr_offset(mid),
3513 			   (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3514 	btrfs_set_header_nritems(split, c_nritems - mid);
3515 	btrfs_set_header_nritems(c, mid);
3516 	ret = 0;
3517 
3518 	btrfs_mark_buffer_dirty(c);
3519 	btrfs_mark_buffer_dirty(split);
3520 
3521 	insert_ptr(trans, fs_info, path, &disk_key, split->start,
3522 		   path->slots[level + 1] + 1, level + 1);
3523 
3524 	if (path->slots[level] >= mid) {
3525 		path->slots[level] -= mid;
3526 		btrfs_tree_unlock(c);
3527 		free_extent_buffer(c);
3528 		path->nodes[level] = split;
3529 		path->slots[level + 1] += 1;
3530 	} else {
3531 		btrfs_tree_unlock(split);
3532 		free_extent_buffer(split);
3533 	}
3534 	return ret;
3535 }
3536 
3537 /*
3538  * how many bytes are required to store the items in a leaf.  start
3539  * and nr indicate which items in the leaf to check.  This totals up the
3540  * space used both by the item structs and the item data
3541  */
leaf_space_used(struct extent_buffer * l,int start,int nr)3542 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3543 {
3544 	struct btrfs_item *start_item;
3545 	struct btrfs_item *end_item;
3546 	struct btrfs_map_token token;
3547 	int data_len;
3548 	int nritems = btrfs_header_nritems(l);
3549 	int end = min(nritems, start + nr) - 1;
3550 
3551 	if (!nr)
3552 		return 0;
3553 	btrfs_init_map_token(&token);
3554 	start_item = btrfs_item_nr(start);
3555 	end_item = btrfs_item_nr(end);
3556 	data_len = btrfs_token_item_offset(l, start_item, &token) +
3557 		btrfs_token_item_size(l, start_item, &token);
3558 	data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3559 	data_len += sizeof(struct btrfs_item) * nr;
3560 	WARN_ON(data_len < 0);
3561 	return data_len;
3562 }
3563 
3564 /*
3565  * The space between the end of the leaf items and
3566  * the start of the leaf data.  IOW, how much room
3567  * the leaf has left for both items and data
3568  */
btrfs_leaf_free_space(struct btrfs_fs_info * fs_info,struct extent_buffer * leaf)3569 noinline int btrfs_leaf_free_space(struct btrfs_fs_info *fs_info,
3570 				   struct extent_buffer *leaf)
3571 {
3572 	int nritems = btrfs_header_nritems(leaf);
3573 	int ret;
3574 
3575 	ret = BTRFS_LEAF_DATA_SIZE(fs_info) - leaf_space_used(leaf, 0, nritems);
3576 	if (ret < 0) {
3577 		btrfs_crit(fs_info,
3578 			   "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3579 			   ret,
3580 			   (unsigned long) BTRFS_LEAF_DATA_SIZE(fs_info),
3581 			   leaf_space_used(leaf, 0, nritems), nritems);
3582 	}
3583 	return ret;
3584 }
3585 
3586 /*
3587  * min slot controls the lowest index we're willing to push to the
3588  * right.  We'll push up to and including min_slot, but no lower
3589  */
__push_leaf_right(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * right,int free_space,u32 left_nritems,u32 min_slot)3590 static noinline int __push_leaf_right(struct btrfs_fs_info *fs_info,
3591 				      struct btrfs_path *path,
3592 				      int data_size, int empty,
3593 				      struct extent_buffer *right,
3594 				      int free_space, u32 left_nritems,
3595 				      u32 min_slot)
3596 {
3597 	struct extent_buffer *left = path->nodes[0];
3598 	struct extent_buffer *upper = path->nodes[1];
3599 	struct btrfs_map_token token;
3600 	struct btrfs_disk_key disk_key;
3601 	int slot;
3602 	u32 i;
3603 	int push_space = 0;
3604 	int push_items = 0;
3605 	struct btrfs_item *item;
3606 	u32 nr;
3607 	u32 right_nritems;
3608 	u32 data_end;
3609 	u32 this_item_size;
3610 
3611 	btrfs_init_map_token(&token);
3612 
3613 	if (empty)
3614 		nr = 0;
3615 	else
3616 		nr = max_t(u32, 1, min_slot);
3617 
3618 	if (path->slots[0] >= left_nritems)
3619 		push_space += data_size;
3620 
3621 	slot = path->slots[1];
3622 	i = left_nritems - 1;
3623 	while (i >= nr) {
3624 		item = btrfs_item_nr(i);
3625 
3626 		if (!empty && push_items > 0) {
3627 			if (path->slots[0] > i)
3628 				break;
3629 			if (path->slots[0] == i) {
3630 				int space = btrfs_leaf_free_space(fs_info, left);
3631 				if (space + push_space * 2 > free_space)
3632 					break;
3633 			}
3634 		}
3635 
3636 		if (path->slots[0] == i)
3637 			push_space += data_size;
3638 
3639 		this_item_size = btrfs_item_size(left, item);
3640 		if (this_item_size + sizeof(*item) + push_space > free_space)
3641 			break;
3642 
3643 		push_items++;
3644 		push_space += this_item_size + sizeof(*item);
3645 		if (i == 0)
3646 			break;
3647 		i--;
3648 	}
3649 
3650 	if (push_items == 0)
3651 		goto out_unlock;
3652 
3653 	WARN_ON(!empty && push_items == left_nritems);
3654 
3655 	/* push left to right */
3656 	right_nritems = btrfs_header_nritems(right);
3657 
3658 	push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3659 	push_space -= leaf_data_end(fs_info, left);
3660 
3661 	/* make room in the right data area */
3662 	data_end = leaf_data_end(fs_info, right);
3663 	memmove_extent_buffer(right,
3664 			      BTRFS_LEAF_DATA_OFFSET + data_end - push_space,
3665 			      BTRFS_LEAF_DATA_OFFSET + data_end,
3666 			      BTRFS_LEAF_DATA_SIZE(fs_info) - data_end);
3667 
3668 	/* copy from the left data area */
3669 	copy_extent_buffer(right, left, BTRFS_LEAF_DATA_OFFSET +
3670 		     BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3671 		     BTRFS_LEAF_DATA_OFFSET + leaf_data_end(fs_info, left),
3672 		     push_space);
3673 
3674 	memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3675 			      btrfs_item_nr_offset(0),
3676 			      right_nritems * sizeof(struct btrfs_item));
3677 
3678 	/* copy the items from left to right */
3679 	copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3680 		   btrfs_item_nr_offset(left_nritems - push_items),
3681 		   push_items * sizeof(struct btrfs_item));
3682 
3683 	/* update the item pointers */
3684 	right_nritems += push_items;
3685 	btrfs_set_header_nritems(right, right_nritems);
3686 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3687 	for (i = 0; i < right_nritems; i++) {
3688 		item = btrfs_item_nr(i);
3689 		push_space -= btrfs_token_item_size(right, item, &token);
3690 		btrfs_set_token_item_offset(right, item, push_space, &token);
3691 	}
3692 
3693 	left_nritems -= push_items;
3694 	btrfs_set_header_nritems(left, left_nritems);
3695 
3696 	if (left_nritems)
3697 		btrfs_mark_buffer_dirty(left);
3698 	else
3699 		clean_tree_block(fs_info, left);
3700 
3701 	btrfs_mark_buffer_dirty(right);
3702 
3703 	btrfs_item_key(right, &disk_key, 0);
3704 	btrfs_set_node_key(upper, &disk_key, slot + 1);
3705 	btrfs_mark_buffer_dirty(upper);
3706 
3707 	/* then fixup the leaf pointer in the path */
3708 	if (path->slots[0] >= left_nritems) {
3709 		path->slots[0] -= left_nritems;
3710 		if (btrfs_header_nritems(path->nodes[0]) == 0)
3711 			clean_tree_block(fs_info, path->nodes[0]);
3712 		btrfs_tree_unlock(path->nodes[0]);
3713 		free_extent_buffer(path->nodes[0]);
3714 		path->nodes[0] = right;
3715 		path->slots[1] += 1;
3716 	} else {
3717 		btrfs_tree_unlock(right);
3718 		free_extent_buffer(right);
3719 	}
3720 	return 0;
3721 
3722 out_unlock:
3723 	btrfs_tree_unlock(right);
3724 	free_extent_buffer(right);
3725 	return 1;
3726 }
3727 
3728 /*
3729  * push some data in the path leaf to the right, trying to free up at
3730  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3731  *
3732  * returns 1 if the push failed because the other node didn't have enough
3733  * room, 0 if everything worked out and < 0 if there were major errors.
3734  *
3735  * this will push starting from min_slot to the end of the leaf.  It won't
3736  * push any slot lower than min_slot
3737  */
push_leaf_right(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 min_slot)3738 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3739 			   *root, struct btrfs_path *path,
3740 			   int min_data_size, int data_size,
3741 			   int empty, u32 min_slot)
3742 {
3743 	struct btrfs_fs_info *fs_info = root->fs_info;
3744 	struct extent_buffer *left = path->nodes[0];
3745 	struct extent_buffer *right;
3746 	struct extent_buffer *upper;
3747 	int slot;
3748 	int free_space;
3749 	u32 left_nritems;
3750 	int ret;
3751 
3752 	if (!path->nodes[1])
3753 		return 1;
3754 
3755 	slot = path->slots[1];
3756 	upper = path->nodes[1];
3757 	if (slot >= btrfs_header_nritems(upper) - 1)
3758 		return 1;
3759 
3760 	btrfs_assert_tree_locked(path->nodes[1]);
3761 
3762 	right = read_node_slot(fs_info, upper, slot + 1);
3763 	/*
3764 	 * slot + 1 is not valid or we fail to read the right node,
3765 	 * no big deal, just return.
3766 	 */
3767 	if (IS_ERR(right))
3768 		return 1;
3769 
3770 	btrfs_tree_lock(right);
3771 	btrfs_set_lock_blocking(right);
3772 
3773 	free_space = btrfs_leaf_free_space(fs_info, right);
3774 	if (free_space < data_size)
3775 		goto out_unlock;
3776 
3777 	/* cow and double check */
3778 	ret = btrfs_cow_block(trans, root, right, upper,
3779 			      slot + 1, &right);
3780 	if (ret)
3781 		goto out_unlock;
3782 
3783 	free_space = btrfs_leaf_free_space(fs_info, right);
3784 	if (free_space < data_size)
3785 		goto out_unlock;
3786 
3787 	left_nritems = btrfs_header_nritems(left);
3788 	if (left_nritems == 0)
3789 		goto out_unlock;
3790 
3791 	if (path->slots[0] == left_nritems && !empty) {
3792 		/* Key greater than all keys in the leaf, right neighbor has
3793 		 * enough room for it and we're not emptying our leaf to delete
3794 		 * it, therefore use right neighbor to insert the new item and
3795 		 * no need to touch/dirty our left leaft. */
3796 		btrfs_tree_unlock(left);
3797 		free_extent_buffer(left);
3798 		path->nodes[0] = right;
3799 		path->slots[0] = 0;
3800 		path->slots[1]++;
3801 		return 0;
3802 	}
3803 
3804 	return __push_leaf_right(fs_info, path, min_data_size, empty,
3805 				right, free_space, left_nritems, min_slot);
3806 out_unlock:
3807 	btrfs_tree_unlock(right);
3808 	free_extent_buffer(right);
3809 	return 1;
3810 }
3811 
3812 /*
3813  * push some data in the path leaf to the left, trying to free up at
3814  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3815  *
3816  * max_slot can put a limit on how far into the leaf we'll push items.  The
3817  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us do all the
3818  * items
3819  */
__push_leaf_left(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int data_size,int empty,struct extent_buffer * left,int free_space,u32 right_nritems,u32 max_slot)3820 static noinline int __push_leaf_left(struct btrfs_fs_info *fs_info,
3821 				     struct btrfs_path *path, int data_size,
3822 				     int empty, struct extent_buffer *left,
3823 				     int free_space, u32 right_nritems,
3824 				     u32 max_slot)
3825 {
3826 	struct btrfs_disk_key disk_key;
3827 	struct extent_buffer *right = path->nodes[0];
3828 	int i;
3829 	int push_space = 0;
3830 	int push_items = 0;
3831 	struct btrfs_item *item;
3832 	u32 old_left_nritems;
3833 	u32 nr;
3834 	int ret = 0;
3835 	u32 this_item_size;
3836 	u32 old_left_item_size;
3837 	struct btrfs_map_token token;
3838 
3839 	btrfs_init_map_token(&token);
3840 
3841 	if (empty)
3842 		nr = min(right_nritems, max_slot);
3843 	else
3844 		nr = min(right_nritems - 1, max_slot);
3845 
3846 	for (i = 0; i < nr; i++) {
3847 		item = btrfs_item_nr(i);
3848 
3849 		if (!empty && push_items > 0) {
3850 			if (path->slots[0] < i)
3851 				break;
3852 			if (path->slots[0] == i) {
3853 				int space = btrfs_leaf_free_space(fs_info, right);
3854 				if (space + push_space * 2 > free_space)
3855 					break;
3856 			}
3857 		}
3858 
3859 		if (path->slots[0] == i)
3860 			push_space += data_size;
3861 
3862 		this_item_size = btrfs_item_size(right, item);
3863 		if (this_item_size + sizeof(*item) + push_space > free_space)
3864 			break;
3865 
3866 		push_items++;
3867 		push_space += this_item_size + sizeof(*item);
3868 	}
3869 
3870 	if (push_items == 0) {
3871 		ret = 1;
3872 		goto out;
3873 	}
3874 	WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3875 
3876 	/* push data from right to left */
3877 	copy_extent_buffer(left, right,
3878 			   btrfs_item_nr_offset(btrfs_header_nritems(left)),
3879 			   btrfs_item_nr_offset(0),
3880 			   push_items * sizeof(struct btrfs_item));
3881 
3882 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info) -
3883 		     btrfs_item_offset_nr(right, push_items - 1);
3884 
3885 	copy_extent_buffer(left, right, BTRFS_LEAF_DATA_OFFSET +
3886 		     leaf_data_end(fs_info, left) - push_space,
3887 		     BTRFS_LEAF_DATA_OFFSET +
3888 		     btrfs_item_offset_nr(right, push_items - 1),
3889 		     push_space);
3890 	old_left_nritems = btrfs_header_nritems(left);
3891 	BUG_ON(old_left_nritems <= 0);
3892 
3893 	old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3894 	for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3895 		u32 ioff;
3896 
3897 		item = btrfs_item_nr(i);
3898 
3899 		ioff = btrfs_token_item_offset(left, item, &token);
3900 		btrfs_set_token_item_offset(left, item,
3901 		      ioff - (BTRFS_LEAF_DATA_SIZE(fs_info) - old_left_item_size),
3902 		      &token);
3903 	}
3904 	btrfs_set_header_nritems(left, old_left_nritems + push_items);
3905 
3906 	/* fixup right node */
3907 	if (push_items > right_nritems)
3908 		WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3909 		       right_nritems);
3910 
3911 	if (push_items < right_nritems) {
3912 		push_space = btrfs_item_offset_nr(right, push_items - 1) -
3913 						  leaf_data_end(fs_info, right);
3914 		memmove_extent_buffer(right, BTRFS_LEAF_DATA_OFFSET +
3915 				      BTRFS_LEAF_DATA_SIZE(fs_info) - push_space,
3916 				      BTRFS_LEAF_DATA_OFFSET +
3917 				      leaf_data_end(fs_info, right), push_space);
3918 
3919 		memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3920 			      btrfs_item_nr_offset(push_items),
3921 			     (btrfs_header_nritems(right) - push_items) *
3922 			     sizeof(struct btrfs_item));
3923 	}
3924 	right_nritems -= push_items;
3925 	btrfs_set_header_nritems(right, right_nritems);
3926 	push_space = BTRFS_LEAF_DATA_SIZE(fs_info);
3927 	for (i = 0; i < right_nritems; i++) {
3928 		item = btrfs_item_nr(i);
3929 
3930 		push_space = push_space - btrfs_token_item_size(right,
3931 								item, &token);
3932 		btrfs_set_token_item_offset(right, item, push_space, &token);
3933 	}
3934 
3935 	btrfs_mark_buffer_dirty(left);
3936 	if (right_nritems)
3937 		btrfs_mark_buffer_dirty(right);
3938 	else
3939 		clean_tree_block(fs_info, right);
3940 
3941 	btrfs_item_key(right, &disk_key, 0);
3942 	fixup_low_keys(fs_info, path, &disk_key, 1);
3943 
3944 	/* then fixup the leaf pointer in the path */
3945 	if (path->slots[0] < push_items) {
3946 		path->slots[0] += old_left_nritems;
3947 		btrfs_tree_unlock(path->nodes[0]);
3948 		free_extent_buffer(path->nodes[0]);
3949 		path->nodes[0] = left;
3950 		path->slots[1] -= 1;
3951 	} else {
3952 		btrfs_tree_unlock(left);
3953 		free_extent_buffer(left);
3954 		path->slots[0] -= push_items;
3955 	}
3956 	BUG_ON(path->slots[0] < 0);
3957 	return ret;
3958 out:
3959 	btrfs_tree_unlock(left);
3960 	free_extent_buffer(left);
3961 	return ret;
3962 }
3963 
3964 /*
3965  * push some data in the path leaf to the left, trying to free up at
3966  * least data_size bytes.  returns zero if the push worked, nonzero otherwise
3967  *
3968  * max_slot can put a limit on how far into the leaf we'll push items.  The
3969  * item at 'max_slot' won't be touched.  Use (u32)-1 to make us push all the
3970  * items
3971  */
push_leaf_left(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int min_data_size,int data_size,int empty,u32 max_slot)3972 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3973 			  *root, struct btrfs_path *path, int min_data_size,
3974 			  int data_size, int empty, u32 max_slot)
3975 {
3976 	struct btrfs_fs_info *fs_info = root->fs_info;
3977 	struct extent_buffer *right = path->nodes[0];
3978 	struct extent_buffer *left;
3979 	int slot;
3980 	int free_space;
3981 	u32 right_nritems;
3982 	int ret = 0;
3983 
3984 	slot = path->slots[1];
3985 	if (slot == 0)
3986 		return 1;
3987 	if (!path->nodes[1])
3988 		return 1;
3989 
3990 	right_nritems = btrfs_header_nritems(right);
3991 	if (right_nritems == 0)
3992 		return 1;
3993 
3994 	btrfs_assert_tree_locked(path->nodes[1]);
3995 
3996 	left = read_node_slot(fs_info, path->nodes[1], slot - 1);
3997 	/*
3998 	 * slot - 1 is not valid or we fail to read the left node,
3999 	 * no big deal, just return.
4000 	 */
4001 	if (IS_ERR(left))
4002 		return 1;
4003 
4004 	btrfs_tree_lock(left);
4005 	btrfs_set_lock_blocking(left);
4006 
4007 	free_space = btrfs_leaf_free_space(fs_info, left);
4008 	if (free_space < data_size) {
4009 		ret = 1;
4010 		goto out;
4011 	}
4012 
4013 	/* cow and double check */
4014 	ret = btrfs_cow_block(trans, root, left,
4015 			      path->nodes[1], slot - 1, &left);
4016 	if (ret) {
4017 		/* we hit -ENOSPC, but it isn't fatal here */
4018 		if (ret == -ENOSPC)
4019 			ret = 1;
4020 		goto out;
4021 	}
4022 
4023 	free_space = btrfs_leaf_free_space(fs_info, left);
4024 	if (free_space < data_size) {
4025 		ret = 1;
4026 		goto out;
4027 	}
4028 
4029 	return __push_leaf_left(fs_info, path, min_data_size,
4030 			       empty, left, free_space, right_nritems,
4031 			       max_slot);
4032 out:
4033 	btrfs_tree_unlock(left);
4034 	free_extent_buffer(left);
4035 	return ret;
4036 }
4037 
4038 /*
4039  * split the path's leaf in two, making sure there is at least data_size
4040  * available for the resulting leaf level of the path.
4041  */
copy_for_split(struct btrfs_trans_handle * trans,struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct extent_buffer * l,struct extent_buffer * right,int slot,int mid,int nritems)4042 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4043 				    struct btrfs_fs_info *fs_info,
4044 				    struct btrfs_path *path,
4045 				    struct extent_buffer *l,
4046 				    struct extent_buffer *right,
4047 				    int slot, int mid, int nritems)
4048 {
4049 	int data_copy_size;
4050 	int rt_data_off;
4051 	int i;
4052 	struct btrfs_disk_key disk_key;
4053 	struct btrfs_map_token token;
4054 
4055 	btrfs_init_map_token(&token);
4056 
4057 	nritems = nritems - mid;
4058 	btrfs_set_header_nritems(right, nritems);
4059 	data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(fs_info, l);
4060 
4061 	copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4062 			   btrfs_item_nr_offset(mid),
4063 			   nritems * sizeof(struct btrfs_item));
4064 
4065 	copy_extent_buffer(right, l,
4066 		     BTRFS_LEAF_DATA_OFFSET + BTRFS_LEAF_DATA_SIZE(fs_info) -
4067 		     data_copy_size, BTRFS_LEAF_DATA_OFFSET +
4068 		     leaf_data_end(fs_info, l), data_copy_size);
4069 
4070 	rt_data_off = BTRFS_LEAF_DATA_SIZE(fs_info) - btrfs_item_end_nr(l, mid);
4071 
4072 	for (i = 0; i < nritems; i++) {
4073 		struct btrfs_item *item = btrfs_item_nr(i);
4074 		u32 ioff;
4075 
4076 		ioff = btrfs_token_item_offset(right, item, &token);
4077 		btrfs_set_token_item_offset(right, item,
4078 					    ioff + rt_data_off, &token);
4079 	}
4080 
4081 	btrfs_set_header_nritems(l, mid);
4082 	btrfs_item_key(right, &disk_key, 0);
4083 	insert_ptr(trans, fs_info, path, &disk_key, right->start,
4084 		   path->slots[1] + 1, 1);
4085 
4086 	btrfs_mark_buffer_dirty(right);
4087 	btrfs_mark_buffer_dirty(l);
4088 	BUG_ON(path->slots[0] != slot);
4089 
4090 	if (mid <= slot) {
4091 		btrfs_tree_unlock(path->nodes[0]);
4092 		free_extent_buffer(path->nodes[0]);
4093 		path->nodes[0] = right;
4094 		path->slots[0] -= mid;
4095 		path->slots[1] += 1;
4096 	} else {
4097 		btrfs_tree_unlock(right);
4098 		free_extent_buffer(right);
4099 	}
4100 
4101 	BUG_ON(path->slots[0] < 0);
4102 }
4103 
4104 /*
4105  * double splits happen when we need to insert a big item in the middle
4106  * of a leaf.  A double split can leave us with 3 mostly empty leaves:
4107  * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4108  *          A                 B                 C
4109  *
4110  * We avoid this by trying to push the items on either side of our target
4111  * into the adjacent leaves.  If all goes well we can avoid the double split
4112  * completely.
4113  */
push_for_double_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int data_size)4114 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4115 					  struct btrfs_root *root,
4116 					  struct btrfs_path *path,
4117 					  int data_size)
4118 {
4119 	struct btrfs_fs_info *fs_info = root->fs_info;
4120 	int ret;
4121 	int progress = 0;
4122 	int slot;
4123 	u32 nritems;
4124 	int space_needed = data_size;
4125 
4126 	slot = path->slots[0];
4127 	if (slot < btrfs_header_nritems(path->nodes[0]))
4128 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4129 
4130 	/*
4131 	 * try to push all the items after our slot into the
4132 	 * right leaf
4133 	 */
4134 	ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4135 	if (ret < 0)
4136 		return ret;
4137 
4138 	if (ret == 0)
4139 		progress++;
4140 
4141 	nritems = btrfs_header_nritems(path->nodes[0]);
4142 	/*
4143 	 * our goal is to get our slot at the start or end of a leaf.  If
4144 	 * we've done so we're done
4145 	 */
4146 	if (path->slots[0] == 0 || path->slots[0] == nritems)
4147 		return 0;
4148 
4149 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4150 		return 0;
4151 
4152 	/* try to push all the items before our slot into the next leaf */
4153 	slot = path->slots[0];
4154 	space_needed = data_size;
4155 	if (slot > 0)
4156 		space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]);
4157 	ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4158 	if (ret < 0)
4159 		return ret;
4160 
4161 	if (ret == 0)
4162 		progress++;
4163 
4164 	if (progress)
4165 		return 0;
4166 	return 1;
4167 }
4168 
4169 /*
4170  * split the path's leaf in two, making sure there is at least data_size
4171  * available for the resulting leaf level of the path.
4172  *
4173  * returns 0 if all went well and < 0 on failure.
4174  */
split_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * ins_key,struct btrfs_path * path,int data_size,int extend)4175 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4176 			       struct btrfs_root *root,
4177 			       const struct btrfs_key *ins_key,
4178 			       struct btrfs_path *path, int data_size,
4179 			       int extend)
4180 {
4181 	struct btrfs_disk_key disk_key;
4182 	struct extent_buffer *l;
4183 	u32 nritems;
4184 	int mid;
4185 	int slot;
4186 	struct extent_buffer *right;
4187 	struct btrfs_fs_info *fs_info = root->fs_info;
4188 	int ret = 0;
4189 	int wret;
4190 	int split;
4191 	int num_doubles = 0;
4192 	int tried_avoid_double = 0;
4193 
4194 	l = path->nodes[0];
4195 	slot = path->slots[0];
4196 	if (extend && data_size + btrfs_item_size_nr(l, slot) +
4197 	    sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(fs_info))
4198 		return -EOVERFLOW;
4199 
4200 	/* first try to make some room by pushing left and right */
4201 	if (data_size && path->nodes[1]) {
4202 		int space_needed = data_size;
4203 
4204 		if (slot < btrfs_header_nritems(l))
4205 			space_needed -= btrfs_leaf_free_space(fs_info, l);
4206 
4207 		wret = push_leaf_right(trans, root, path, space_needed,
4208 				       space_needed, 0, 0);
4209 		if (wret < 0)
4210 			return wret;
4211 		if (wret) {
4212 			space_needed = data_size;
4213 			if (slot > 0)
4214 				space_needed -= btrfs_leaf_free_space(fs_info,
4215 								      l);
4216 			wret = push_leaf_left(trans, root, path, space_needed,
4217 					      space_needed, 0, (u32)-1);
4218 			if (wret < 0)
4219 				return wret;
4220 		}
4221 		l = path->nodes[0];
4222 
4223 		/* did the pushes work? */
4224 		if (btrfs_leaf_free_space(fs_info, l) >= data_size)
4225 			return 0;
4226 	}
4227 
4228 	if (!path->nodes[1]) {
4229 		ret = insert_new_root(trans, root, path, 1);
4230 		if (ret)
4231 			return ret;
4232 	}
4233 again:
4234 	split = 1;
4235 	l = path->nodes[0];
4236 	slot = path->slots[0];
4237 	nritems = btrfs_header_nritems(l);
4238 	mid = (nritems + 1) / 2;
4239 
4240 	if (mid <= slot) {
4241 		if (nritems == 1 ||
4242 		    leaf_space_used(l, mid, nritems - mid) + data_size >
4243 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4244 			if (slot >= nritems) {
4245 				split = 0;
4246 			} else {
4247 				mid = slot;
4248 				if (mid != nritems &&
4249 				    leaf_space_used(l, mid, nritems - mid) +
4250 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4251 					if (data_size && !tried_avoid_double)
4252 						goto push_for_double;
4253 					split = 2;
4254 				}
4255 			}
4256 		}
4257 	} else {
4258 		if (leaf_space_used(l, 0, mid) + data_size >
4259 			BTRFS_LEAF_DATA_SIZE(fs_info)) {
4260 			if (!extend && data_size && slot == 0) {
4261 				split = 0;
4262 			} else if ((extend || !data_size) && slot == 0) {
4263 				mid = 1;
4264 			} else {
4265 				mid = slot;
4266 				if (mid != nritems &&
4267 				    leaf_space_used(l, mid, nritems - mid) +
4268 				    data_size > BTRFS_LEAF_DATA_SIZE(fs_info)) {
4269 					if (data_size && !tried_avoid_double)
4270 						goto push_for_double;
4271 					split = 2;
4272 				}
4273 			}
4274 		}
4275 	}
4276 
4277 	if (split == 0)
4278 		btrfs_cpu_key_to_disk(&disk_key, ins_key);
4279 	else
4280 		btrfs_item_key(l, &disk_key, mid);
4281 
4282 	right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4283 			&disk_key, 0, l->start, 0);
4284 	if (IS_ERR(right))
4285 		return PTR_ERR(right);
4286 
4287 	root_add_used(root, fs_info->nodesize);
4288 
4289 	memzero_extent_buffer(right, 0, sizeof(struct btrfs_header));
4290 	btrfs_set_header_bytenr(right, right->start);
4291 	btrfs_set_header_generation(right, trans->transid);
4292 	btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4293 	btrfs_set_header_owner(right, root->root_key.objectid);
4294 	btrfs_set_header_level(right, 0);
4295 	write_extent_buffer_fsid(right, fs_info->fsid);
4296 	write_extent_buffer_chunk_tree_uuid(right, fs_info->chunk_tree_uuid);
4297 
4298 	if (split == 0) {
4299 		if (mid <= slot) {
4300 			btrfs_set_header_nritems(right, 0);
4301 			insert_ptr(trans, fs_info, path, &disk_key,
4302 				   right->start, path->slots[1] + 1, 1);
4303 			btrfs_tree_unlock(path->nodes[0]);
4304 			free_extent_buffer(path->nodes[0]);
4305 			path->nodes[0] = right;
4306 			path->slots[0] = 0;
4307 			path->slots[1] += 1;
4308 		} else {
4309 			btrfs_set_header_nritems(right, 0);
4310 			insert_ptr(trans, fs_info, path, &disk_key,
4311 				   right->start, path->slots[1], 1);
4312 			btrfs_tree_unlock(path->nodes[0]);
4313 			free_extent_buffer(path->nodes[0]);
4314 			path->nodes[0] = right;
4315 			path->slots[0] = 0;
4316 			if (path->slots[1] == 0)
4317 				fixup_low_keys(fs_info, path, &disk_key, 1);
4318 		}
4319 		/*
4320 		 * We create a new leaf 'right' for the required ins_len and
4321 		 * we'll do btrfs_mark_buffer_dirty() on this leaf after copying
4322 		 * the content of ins_len to 'right'.
4323 		 */
4324 		return ret;
4325 	}
4326 
4327 	copy_for_split(trans, fs_info, path, l, right, slot, mid, nritems);
4328 
4329 	if (split == 2) {
4330 		BUG_ON(num_doubles != 0);
4331 		num_doubles++;
4332 		goto again;
4333 	}
4334 
4335 	return 0;
4336 
4337 push_for_double:
4338 	push_for_double_split(trans, root, path, data_size);
4339 	tried_avoid_double = 1;
4340 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= data_size)
4341 		return 0;
4342 	goto again;
4343 }
4344 
setup_leaf_for_split(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int ins_len)4345 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4346 					 struct btrfs_root *root,
4347 					 struct btrfs_path *path, int ins_len)
4348 {
4349 	struct btrfs_fs_info *fs_info = root->fs_info;
4350 	struct btrfs_key key;
4351 	struct extent_buffer *leaf;
4352 	struct btrfs_file_extent_item *fi;
4353 	u64 extent_len = 0;
4354 	u32 item_size;
4355 	int ret;
4356 
4357 	leaf = path->nodes[0];
4358 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4359 
4360 	BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4361 	       key.type != BTRFS_EXTENT_CSUM_KEY);
4362 
4363 	if (btrfs_leaf_free_space(fs_info, leaf) >= ins_len)
4364 		return 0;
4365 
4366 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4367 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4368 		fi = btrfs_item_ptr(leaf, path->slots[0],
4369 				    struct btrfs_file_extent_item);
4370 		extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4371 	}
4372 	btrfs_release_path(path);
4373 
4374 	path->keep_locks = 1;
4375 	path->search_for_split = 1;
4376 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4377 	path->search_for_split = 0;
4378 	if (ret > 0)
4379 		ret = -EAGAIN;
4380 	if (ret < 0)
4381 		goto err;
4382 
4383 	ret = -EAGAIN;
4384 	leaf = path->nodes[0];
4385 	/* if our item isn't there, return now */
4386 	if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4387 		goto err;
4388 
4389 	/* the leaf has  changed, it now has room.  return now */
4390 	if (btrfs_leaf_free_space(fs_info, path->nodes[0]) >= ins_len)
4391 		goto err;
4392 
4393 	if (key.type == BTRFS_EXTENT_DATA_KEY) {
4394 		fi = btrfs_item_ptr(leaf, path->slots[0],
4395 				    struct btrfs_file_extent_item);
4396 		if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4397 			goto err;
4398 	}
4399 
4400 	btrfs_set_path_blocking(path);
4401 	ret = split_leaf(trans, root, &key, path, ins_len, 1);
4402 	if (ret)
4403 		goto err;
4404 
4405 	path->keep_locks = 0;
4406 	btrfs_unlock_up_safe(path, 1);
4407 	return 0;
4408 err:
4409 	path->keep_locks = 0;
4410 	return ret;
4411 }
4412 
split_item(struct btrfs_fs_info * fs_info,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4413 static noinline int split_item(struct btrfs_fs_info *fs_info,
4414 			       struct btrfs_path *path,
4415 			       const struct btrfs_key *new_key,
4416 			       unsigned long split_offset)
4417 {
4418 	struct extent_buffer *leaf;
4419 	struct btrfs_item *item;
4420 	struct btrfs_item *new_item;
4421 	int slot;
4422 	char *buf;
4423 	u32 nritems;
4424 	u32 item_size;
4425 	u32 orig_offset;
4426 	struct btrfs_disk_key disk_key;
4427 
4428 	leaf = path->nodes[0];
4429 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < sizeof(struct btrfs_item));
4430 
4431 	btrfs_set_path_blocking(path);
4432 
4433 	item = btrfs_item_nr(path->slots[0]);
4434 	orig_offset = btrfs_item_offset(leaf, item);
4435 	item_size = btrfs_item_size(leaf, item);
4436 
4437 	buf = kmalloc(item_size, GFP_NOFS);
4438 	if (!buf)
4439 		return -ENOMEM;
4440 
4441 	read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4442 			    path->slots[0]), item_size);
4443 
4444 	slot = path->slots[0] + 1;
4445 	nritems = btrfs_header_nritems(leaf);
4446 	if (slot != nritems) {
4447 		/* shift the items */
4448 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4449 				btrfs_item_nr_offset(slot),
4450 				(nritems - slot) * sizeof(struct btrfs_item));
4451 	}
4452 
4453 	btrfs_cpu_key_to_disk(&disk_key, new_key);
4454 	btrfs_set_item_key(leaf, &disk_key, slot);
4455 
4456 	new_item = btrfs_item_nr(slot);
4457 
4458 	btrfs_set_item_offset(leaf, new_item, orig_offset);
4459 	btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4460 
4461 	btrfs_set_item_offset(leaf, item,
4462 			      orig_offset + item_size - split_offset);
4463 	btrfs_set_item_size(leaf, item, split_offset);
4464 
4465 	btrfs_set_header_nritems(leaf, nritems + 1);
4466 
4467 	/* write the data for the start of the original item */
4468 	write_extent_buffer(leaf, buf,
4469 			    btrfs_item_ptr_offset(leaf, path->slots[0]),
4470 			    split_offset);
4471 
4472 	/* write the data for the new item */
4473 	write_extent_buffer(leaf, buf + split_offset,
4474 			    btrfs_item_ptr_offset(leaf, slot),
4475 			    item_size - split_offset);
4476 	btrfs_mark_buffer_dirty(leaf);
4477 
4478 	BUG_ON(btrfs_leaf_free_space(fs_info, leaf) < 0);
4479 	kfree(buf);
4480 	return 0;
4481 }
4482 
4483 /*
4484  * This function splits a single item into two items,
4485  * giving 'new_key' to the new item and splitting the
4486  * old one at split_offset (from the start of the item).
4487  *
4488  * The path may be released by this operation.  After
4489  * the split, the path is pointing to the old item.  The
4490  * new item is going to be in the same node as the old one.
4491  *
4492  * Note, the item being split must be smaller enough to live alone on
4493  * a tree block with room for one extra struct btrfs_item
4494  *
4495  * This allows us to split the item in place, keeping a lock on the
4496  * leaf the entire time.
4497  */
btrfs_split_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key,unsigned long split_offset)4498 int btrfs_split_item(struct btrfs_trans_handle *trans,
4499 		     struct btrfs_root *root,
4500 		     struct btrfs_path *path,
4501 		     const struct btrfs_key *new_key,
4502 		     unsigned long split_offset)
4503 {
4504 	int ret;
4505 	ret = setup_leaf_for_split(trans, root, path,
4506 				   sizeof(struct btrfs_item));
4507 	if (ret)
4508 		return ret;
4509 
4510 	ret = split_item(root->fs_info, path, new_key, split_offset);
4511 	return ret;
4512 }
4513 
4514 /*
4515  * This function duplicate a item, giving 'new_key' to the new item.
4516  * It guarantees both items live in the same tree leaf and the new item
4517  * is contiguous with the original item.
4518  *
4519  * This allows us to split file extent in place, keeping a lock on the
4520  * leaf the entire time.
4521  */
btrfs_duplicate_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * new_key)4522 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4523 			 struct btrfs_root *root,
4524 			 struct btrfs_path *path,
4525 			 const struct btrfs_key *new_key)
4526 {
4527 	struct extent_buffer *leaf;
4528 	int ret;
4529 	u32 item_size;
4530 
4531 	leaf = path->nodes[0];
4532 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4533 	ret = setup_leaf_for_split(trans, root, path,
4534 				   item_size + sizeof(struct btrfs_item));
4535 	if (ret)
4536 		return ret;
4537 
4538 	path->slots[0]++;
4539 	setup_items_for_insert(root, path, new_key, &item_size,
4540 			       item_size, item_size +
4541 			       sizeof(struct btrfs_item), 1);
4542 	leaf = path->nodes[0];
4543 	memcpy_extent_buffer(leaf,
4544 			     btrfs_item_ptr_offset(leaf, path->slots[0]),
4545 			     btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4546 			     item_size);
4547 	return 0;
4548 }
4549 
4550 /*
4551  * make the item pointed to by the path smaller.  new_size indicates
4552  * how small to make it, and from_end tells us if we just chop bytes
4553  * off the end of the item or if we shift the item to chop bytes off
4554  * the front.
4555  */
btrfs_truncate_item(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u32 new_size,int from_end)4556 void btrfs_truncate_item(struct btrfs_fs_info *fs_info,
4557 			 struct btrfs_path *path, u32 new_size, int from_end)
4558 {
4559 	int slot;
4560 	struct extent_buffer *leaf;
4561 	struct btrfs_item *item;
4562 	u32 nritems;
4563 	unsigned int data_end;
4564 	unsigned int old_data_start;
4565 	unsigned int old_size;
4566 	unsigned int size_diff;
4567 	int i;
4568 	struct btrfs_map_token token;
4569 
4570 	btrfs_init_map_token(&token);
4571 
4572 	leaf = path->nodes[0];
4573 	slot = path->slots[0];
4574 
4575 	old_size = btrfs_item_size_nr(leaf, slot);
4576 	if (old_size == new_size)
4577 		return;
4578 
4579 	nritems = btrfs_header_nritems(leaf);
4580 	data_end = leaf_data_end(fs_info, leaf);
4581 
4582 	old_data_start = btrfs_item_offset_nr(leaf, slot);
4583 
4584 	size_diff = old_size - new_size;
4585 
4586 	BUG_ON(slot < 0);
4587 	BUG_ON(slot >= nritems);
4588 
4589 	/*
4590 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4591 	 */
4592 	/* first correct the data pointers */
4593 	for (i = slot; i < nritems; i++) {
4594 		u32 ioff;
4595 		item = btrfs_item_nr(i);
4596 
4597 		ioff = btrfs_token_item_offset(leaf, item, &token);
4598 		btrfs_set_token_item_offset(leaf, item,
4599 					    ioff + size_diff, &token);
4600 	}
4601 
4602 	/* shift the data */
4603 	if (from_end) {
4604 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4605 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4606 			      data_end, old_data_start + new_size - data_end);
4607 	} else {
4608 		struct btrfs_disk_key disk_key;
4609 		u64 offset;
4610 
4611 		btrfs_item_key(leaf, &disk_key, slot);
4612 
4613 		if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4614 			unsigned long ptr;
4615 			struct btrfs_file_extent_item *fi;
4616 
4617 			fi = btrfs_item_ptr(leaf, slot,
4618 					    struct btrfs_file_extent_item);
4619 			fi = (struct btrfs_file_extent_item *)(
4620 			     (unsigned long)fi - size_diff);
4621 
4622 			if (btrfs_file_extent_type(leaf, fi) ==
4623 			    BTRFS_FILE_EXTENT_INLINE) {
4624 				ptr = btrfs_item_ptr_offset(leaf, slot);
4625 				memmove_extent_buffer(leaf, ptr,
4626 				      (unsigned long)fi,
4627 				      BTRFS_FILE_EXTENT_INLINE_DATA_START);
4628 			}
4629 		}
4630 
4631 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4632 			      data_end + size_diff, BTRFS_LEAF_DATA_OFFSET +
4633 			      data_end, old_data_start - data_end);
4634 
4635 		offset = btrfs_disk_key_offset(&disk_key);
4636 		btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4637 		btrfs_set_item_key(leaf, &disk_key, slot);
4638 		if (slot == 0)
4639 			fixup_low_keys(fs_info, path, &disk_key, 1);
4640 	}
4641 
4642 	item = btrfs_item_nr(slot);
4643 	btrfs_set_item_size(leaf, item, new_size);
4644 	btrfs_mark_buffer_dirty(leaf);
4645 
4646 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4647 		btrfs_print_leaf(leaf);
4648 		BUG();
4649 	}
4650 }
4651 
4652 /*
4653  * make the item pointed to by the path bigger, data_size is the added size.
4654  */
btrfs_extend_item(struct btrfs_fs_info * fs_info,struct btrfs_path * path,u32 data_size)4655 void btrfs_extend_item(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
4656 		       u32 data_size)
4657 {
4658 	int slot;
4659 	struct extent_buffer *leaf;
4660 	struct btrfs_item *item;
4661 	u32 nritems;
4662 	unsigned int data_end;
4663 	unsigned int old_data;
4664 	unsigned int old_size;
4665 	int i;
4666 	struct btrfs_map_token token;
4667 
4668 	btrfs_init_map_token(&token);
4669 
4670 	leaf = path->nodes[0];
4671 
4672 	nritems = btrfs_header_nritems(leaf);
4673 	data_end = leaf_data_end(fs_info, leaf);
4674 
4675 	if (btrfs_leaf_free_space(fs_info, leaf) < data_size) {
4676 		btrfs_print_leaf(leaf);
4677 		BUG();
4678 	}
4679 	slot = path->slots[0];
4680 	old_data = btrfs_item_end_nr(leaf, slot);
4681 
4682 	BUG_ON(slot < 0);
4683 	if (slot >= nritems) {
4684 		btrfs_print_leaf(leaf);
4685 		btrfs_crit(fs_info, "slot %d too large, nritems %d",
4686 			   slot, nritems);
4687 		BUG_ON(1);
4688 	}
4689 
4690 	/*
4691 	 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4692 	 */
4693 	/* first correct the data pointers */
4694 	for (i = slot; i < nritems; i++) {
4695 		u32 ioff;
4696 		item = btrfs_item_nr(i);
4697 
4698 		ioff = btrfs_token_item_offset(leaf, item, &token);
4699 		btrfs_set_token_item_offset(leaf, item,
4700 					    ioff - data_size, &token);
4701 	}
4702 
4703 	/* shift the data */
4704 	memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4705 		      data_end - data_size, BTRFS_LEAF_DATA_OFFSET +
4706 		      data_end, old_data - data_end);
4707 
4708 	data_end = old_data;
4709 	old_size = btrfs_item_size_nr(leaf, slot);
4710 	item = btrfs_item_nr(slot);
4711 	btrfs_set_item_size(leaf, item, old_size + data_size);
4712 	btrfs_mark_buffer_dirty(leaf);
4713 
4714 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4715 		btrfs_print_leaf(leaf);
4716 		BUG();
4717 	}
4718 }
4719 
4720 /*
4721  * this is a helper for btrfs_insert_empty_items, the main goal here is
4722  * to save stack depth by doing the bulk of the work in a function
4723  * that doesn't call btrfs_search_slot
4724  */
setup_items_for_insert(struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * cpu_key,u32 * data_size,u32 total_data,u32 total_size,int nr)4725 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4726 			    const struct btrfs_key *cpu_key, u32 *data_size,
4727 			    u32 total_data, u32 total_size, int nr)
4728 {
4729 	struct btrfs_fs_info *fs_info = root->fs_info;
4730 	struct btrfs_item *item;
4731 	int i;
4732 	u32 nritems;
4733 	unsigned int data_end;
4734 	struct btrfs_disk_key disk_key;
4735 	struct extent_buffer *leaf;
4736 	int slot;
4737 	struct btrfs_map_token token;
4738 
4739 	if (path->slots[0] == 0) {
4740 		btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4741 		fixup_low_keys(fs_info, path, &disk_key, 1);
4742 	}
4743 	btrfs_unlock_up_safe(path, 1);
4744 
4745 	btrfs_init_map_token(&token);
4746 
4747 	leaf = path->nodes[0];
4748 	slot = path->slots[0];
4749 
4750 	nritems = btrfs_header_nritems(leaf);
4751 	data_end = leaf_data_end(fs_info, leaf);
4752 
4753 	if (btrfs_leaf_free_space(fs_info, leaf) < total_size) {
4754 		btrfs_print_leaf(leaf);
4755 		btrfs_crit(fs_info, "not enough freespace need %u have %d",
4756 			   total_size, btrfs_leaf_free_space(fs_info, leaf));
4757 		BUG();
4758 	}
4759 
4760 	if (slot != nritems) {
4761 		unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4762 
4763 		if (old_data < data_end) {
4764 			btrfs_print_leaf(leaf);
4765 			btrfs_crit(fs_info, "slot %d old_data %d data_end %d",
4766 				   slot, old_data, data_end);
4767 			BUG_ON(1);
4768 		}
4769 		/*
4770 		 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4771 		 */
4772 		/* first correct the data pointers */
4773 		for (i = slot; i < nritems; i++) {
4774 			u32 ioff;
4775 
4776 			item = btrfs_item_nr(i);
4777 			ioff = btrfs_token_item_offset(leaf, item, &token);
4778 			btrfs_set_token_item_offset(leaf, item,
4779 						    ioff - total_data, &token);
4780 		}
4781 		/* shift the items */
4782 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4783 			      btrfs_item_nr_offset(slot),
4784 			      (nritems - slot) * sizeof(struct btrfs_item));
4785 
4786 		/* shift the data */
4787 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4788 			      data_end - total_data, BTRFS_LEAF_DATA_OFFSET +
4789 			      data_end, old_data - data_end);
4790 		data_end = old_data;
4791 	}
4792 
4793 	/* setup the item for the new data */
4794 	for (i = 0; i < nr; i++) {
4795 		btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4796 		btrfs_set_item_key(leaf, &disk_key, slot + i);
4797 		item = btrfs_item_nr(slot + i);
4798 		btrfs_set_token_item_offset(leaf, item,
4799 					    data_end - data_size[i], &token);
4800 		data_end -= data_size[i];
4801 		btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4802 	}
4803 
4804 	btrfs_set_header_nritems(leaf, nritems + nr);
4805 	btrfs_mark_buffer_dirty(leaf);
4806 
4807 	if (btrfs_leaf_free_space(fs_info, leaf) < 0) {
4808 		btrfs_print_leaf(leaf);
4809 		BUG();
4810 	}
4811 }
4812 
4813 /*
4814  * Given a key and some data, insert items into the tree.
4815  * This does all the path init required, making room in the tree if needed.
4816  */
btrfs_insert_empty_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,const struct btrfs_key * cpu_key,u32 * data_size,int nr)4817 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4818 			    struct btrfs_root *root,
4819 			    struct btrfs_path *path,
4820 			    const struct btrfs_key *cpu_key, u32 *data_size,
4821 			    int nr)
4822 {
4823 	int ret = 0;
4824 	int slot;
4825 	int i;
4826 	u32 total_size = 0;
4827 	u32 total_data = 0;
4828 
4829 	for (i = 0; i < nr; i++)
4830 		total_data += data_size[i];
4831 
4832 	total_size = total_data + (nr * sizeof(struct btrfs_item));
4833 	ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4834 	if (ret == 0)
4835 		return -EEXIST;
4836 	if (ret < 0)
4837 		return ret;
4838 
4839 	slot = path->slots[0];
4840 	BUG_ON(slot < 0);
4841 
4842 	setup_items_for_insert(root, path, cpu_key, data_size,
4843 			       total_data, total_size, nr);
4844 	return 0;
4845 }
4846 
4847 /*
4848  * Given a key and some data, insert an item into the tree.
4849  * This does all the path init required, making room in the tree if needed.
4850  */
btrfs_insert_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,const struct btrfs_key * cpu_key,void * data,u32 data_size)4851 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4852 		      const struct btrfs_key *cpu_key, void *data,
4853 		      u32 data_size)
4854 {
4855 	int ret = 0;
4856 	struct btrfs_path *path;
4857 	struct extent_buffer *leaf;
4858 	unsigned long ptr;
4859 
4860 	path = btrfs_alloc_path();
4861 	if (!path)
4862 		return -ENOMEM;
4863 	ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4864 	if (!ret) {
4865 		leaf = path->nodes[0];
4866 		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4867 		write_extent_buffer(leaf, data, ptr, data_size);
4868 		btrfs_mark_buffer_dirty(leaf);
4869 	}
4870 	btrfs_free_path(path);
4871 	return ret;
4872 }
4873 
4874 /*
4875  * delete the pointer from a given node.
4876  *
4877  * the tree should have been previously balanced so the deletion does not
4878  * empty a node.
4879  */
del_ptr(struct btrfs_root * root,struct btrfs_path * path,int level,int slot)4880 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4881 		    int level, int slot)
4882 {
4883 	struct btrfs_fs_info *fs_info = root->fs_info;
4884 	struct extent_buffer *parent = path->nodes[level];
4885 	u32 nritems;
4886 	int ret;
4887 
4888 	nritems = btrfs_header_nritems(parent);
4889 	if (slot != nritems - 1) {
4890 		if (level)
4891 			tree_mod_log_eb_move(fs_info, parent, slot,
4892 					     slot + 1, nritems - slot - 1);
4893 		memmove_extent_buffer(parent,
4894 			      btrfs_node_key_ptr_offset(slot),
4895 			      btrfs_node_key_ptr_offset(slot + 1),
4896 			      sizeof(struct btrfs_key_ptr) *
4897 			      (nritems - slot - 1));
4898 	} else if (level) {
4899 		ret = tree_mod_log_insert_key(fs_info, parent, slot,
4900 					      MOD_LOG_KEY_REMOVE, GFP_NOFS);
4901 		BUG_ON(ret < 0);
4902 	}
4903 
4904 	nritems--;
4905 	btrfs_set_header_nritems(parent, nritems);
4906 	if (nritems == 0 && parent == root->node) {
4907 		BUG_ON(btrfs_header_level(root->node) != 1);
4908 		/* just turn the root into a leaf and break */
4909 		btrfs_set_header_level(root->node, 0);
4910 	} else if (slot == 0) {
4911 		struct btrfs_disk_key disk_key;
4912 
4913 		btrfs_node_key(parent, &disk_key, 0);
4914 		fixup_low_keys(fs_info, path, &disk_key, level + 1);
4915 	}
4916 	btrfs_mark_buffer_dirty(parent);
4917 }
4918 
4919 /*
4920  * a helper function to delete the leaf pointed to by path->slots[1] and
4921  * path->nodes[1].
4922  *
4923  * This deletes the pointer in path->nodes[1] and frees the leaf
4924  * block extent.  zero is returned if it all worked out, < 0 otherwise.
4925  *
4926  * The path must have already been setup for deleting the leaf, including
4927  * all the proper balancing.  path->nodes[1] must be locked.
4928  */
btrfs_del_leaf(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct extent_buffer * leaf)4929 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4930 				    struct btrfs_root *root,
4931 				    struct btrfs_path *path,
4932 				    struct extent_buffer *leaf)
4933 {
4934 	WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4935 	del_ptr(root, path, 1, path->slots[1]);
4936 
4937 	/*
4938 	 * btrfs_free_extent is expensive, we want to make sure we
4939 	 * aren't holding any locks when we call it
4940 	 */
4941 	btrfs_unlock_up_safe(path, 0);
4942 
4943 	root_sub_used(root, leaf->len);
4944 
4945 	extent_buffer_get(leaf);
4946 	btrfs_free_tree_block(trans, root, leaf, 0, 1);
4947 	free_extent_buffer_stale(leaf);
4948 }
4949 /*
4950  * delete the item at the leaf level in path.  If that empties
4951  * the leaf, remove it from the tree
4952  */
btrfs_del_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int slot,int nr)4953 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4954 		    struct btrfs_path *path, int slot, int nr)
4955 {
4956 	struct btrfs_fs_info *fs_info = root->fs_info;
4957 	struct extent_buffer *leaf;
4958 	struct btrfs_item *item;
4959 	u32 last_off;
4960 	u32 dsize = 0;
4961 	int ret = 0;
4962 	int wret;
4963 	int i;
4964 	u32 nritems;
4965 	struct btrfs_map_token token;
4966 
4967 	btrfs_init_map_token(&token);
4968 
4969 	leaf = path->nodes[0];
4970 	last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4971 
4972 	for (i = 0; i < nr; i++)
4973 		dsize += btrfs_item_size_nr(leaf, slot + i);
4974 
4975 	nritems = btrfs_header_nritems(leaf);
4976 
4977 	if (slot + nr != nritems) {
4978 		int data_end = leaf_data_end(fs_info, leaf);
4979 
4980 		memmove_extent_buffer(leaf, BTRFS_LEAF_DATA_OFFSET +
4981 			      data_end + dsize,
4982 			      BTRFS_LEAF_DATA_OFFSET + data_end,
4983 			      last_off - data_end);
4984 
4985 		for (i = slot + nr; i < nritems; i++) {
4986 			u32 ioff;
4987 
4988 			item = btrfs_item_nr(i);
4989 			ioff = btrfs_token_item_offset(leaf, item, &token);
4990 			btrfs_set_token_item_offset(leaf, item,
4991 						    ioff + dsize, &token);
4992 		}
4993 
4994 		memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4995 			      btrfs_item_nr_offset(slot + nr),
4996 			      sizeof(struct btrfs_item) *
4997 			      (nritems - slot - nr));
4998 	}
4999 	btrfs_set_header_nritems(leaf, nritems - nr);
5000 	nritems -= nr;
5001 
5002 	/* delete the leaf if we've emptied it */
5003 	if (nritems == 0) {
5004 		if (leaf == root->node) {
5005 			btrfs_set_header_level(leaf, 0);
5006 		} else {
5007 			btrfs_set_path_blocking(path);
5008 			clean_tree_block(fs_info, leaf);
5009 			btrfs_del_leaf(trans, root, path, leaf);
5010 		}
5011 	} else {
5012 		int used = leaf_space_used(leaf, 0, nritems);
5013 		if (slot == 0) {
5014 			struct btrfs_disk_key disk_key;
5015 
5016 			btrfs_item_key(leaf, &disk_key, 0);
5017 			fixup_low_keys(fs_info, path, &disk_key, 1);
5018 		}
5019 
5020 		/* delete the leaf if it is mostly empty */
5021 		if (used < BTRFS_LEAF_DATA_SIZE(fs_info) / 3) {
5022 			/* push_leaf_left fixes the path.
5023 			 * make sure the path still points to our leaf
5024 			 * for possible call to del_ptr below
5025 			 */
5026 			slot = path->slots[1];
5027 			extent_buffer_get(leaf);
5028 
5029 			btrfs_set_path_blocking(path);
5030 			wret = push_leaf_left(trans, root, path, 1, 1,
5031 					      1, (u32)-1);
5032 			if (wret < 0 && wret != -ENOSPC)
5033 				ret = wret;
5034 
5035 			if (path->nodes[0] == leaf &&
5036 			    btrfs_header_nritems(leaf)) {
5037 				wret = push_leaf_right(trans, root, path, 1,
5038 						       1, 1, 0);
5039 				if (wret < 0 && wret != -ENOSPC)
5040 					ret = wret;
5041 			}
5042 
5043 			if (btrfs_header_nritems(leaf) == 0) {
5044 				path->slots[1] = slot;
5045 				btrfs_del_leaf(trans, root, path, leaf);
5046 				free_extent_buffer(leaf);
5047 				ret = 0;
5048 			} else {
5049 				/* if we're still in the path, make sure
5050 				 * we're dirty.  Otherwise, one of the
5051 				 * push_leaf functions must have already
5052 				 * dirtied this buffer
5053 				 */
5054 				if (path->nodes[0] == leaf)
5055 					btrfs_mark_buffer_dirty(leaf);
5056 				free_extent_buffer(leaf);
5057 			}
5058 		} else {
5059 			btrfs_mark_buffer_dirty(leaf);
5060 		}
5061 	}
5062 	return ret;
5063 }
5064 
5065 /*
5066  * search the tree again to find a leaf with lesser keys
5067  * returns 0 if it found something or 1 if there are no lesser leaves.
5068  * returns < 0 on io errors.
5069  *
5070  * This may release the path, and so you may lose any locks held at the
5071  * time you call it.
5072  */
btrfs_prev_leaf(struct btrfs_root * root,struct btrfs_path * path)5073 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5074 {
5075 	struct btrfs_key key;
5076 	struct btrfs_disk_key found_key;
5077 	int ret;
5078 
5079 	btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5080 
5081 	if (key.offset > 0) {
5082 		key.offset--;
5083 	} else if (key.type > 0) {
5084 		key.type--;
5085 		key.offset = (u64)-1;
5086 	} else if (key.objectid > 0) {
5087 		key.objectid--;
5088 		key.type = (u8)-1;
5089 		key.offset = (u64)-1;
5090 	} else {
5091 		return 1;
5092 	}
5093 
5094 	btrfs_release_path(path);
5095 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5096 	if (ret < 0)
5097 		return ret;
5098 	btrfs_item_key(path->nodes[0], &found_key, 0);
5099 	ret = comp_keys(&found_key, &key);
5100 	/*
5101 	 * We might have had an item with the previous key in the tree right
5102 	 * before we released our path. And after we released our path, that
5103 	 * item might have been pushed to the first slot (0) of the leaf we
5104 	 * were holding due to a tree balance. Alternatively, an item with the
5105 	 * previous key can exist as the only element of a leaf (big fat item).
5106 	 * Therefore account for these 2 cases, so that our callers (like
5107 	 * btrfs_previous_item) don't miss an existing item with a key matching
5108 	 * the previous key we computed above.
5109 	 */
5110 	if (ret <= 0)
5111 		return 0;
5112 	return 1;
5113 }
5114 
5115 /*
5116  * A helper function to walk down the tree starting at min_key, and looking
5117  * for nodes or leaves that are have a minimum transaction id.
5118  * This is used by the btree defrag code, and tree logging
5119  *
5120  * This does not cow, but it does stuff the starting key it finds back
5121  * into min_key, so you can call btrfs_search_slot with cow=1 on the
5122  * key and get a writable path.
5123  *
5124  * This does lock as it descends, and path->keep_locks should be set
5125  * to 1 by the caller.
5126  *
5127  * This honors path->lowest_level to prevent descent past a given level
5128  * of the tree.
5129  *
5130  * min_trans indicates the oldest transaction that you are interested
5131  * in walking through.  Any nodes or leaves older than min_trans are
5132  * skipped over (without reading them).
5133  *
5134  * returns zero if something useful was found, < 0 on error and 1 if there
5135  * was nothing in the tree that matched the search criteria.
5136  */
btrfs_search_forward(struct btrfs_root * root,struct btrfs_key * min_key,struct btrfs_path * path,u64 min_trans)5137 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5138 			 struct btrfs_path *path,
5139 			 u64 min_trans)
5140 {
5141 	struct btrfs_fs_info *fs_info = root->fs_info;
5142 	struct extent_buffer *cur;
5143 	struct btrfs_key found_key;
5144 	int slot;
5145 	int sret;
5146 	u32 nritems;
5147 	int level;
5148 	int ret = 1;
5149 	int keep_locks = path->keep_locks;
5150 
5151 	path->keep_locks = 1;
5152 again:
5153 	cur = btrfs_read_lock_root_node(root);
5154 	level = btrfs_header_level(cur);
5155 	WARN_ON(path->nodes[level]);
5156 	path->nodes[level] = cur;
5157 	path->locks[level] = BTRFS_READ_LOCK;
5158 
5159 	if (btrfs_header_generation(cur) < min_trans) {
5160 		ret = 1;
5161 		goto out;
5162 	}
5163 	while (1) {
5164 		nritems = btrfs_header_nritems(cur);
5165 		level = btrfs_header_level(cur);
5166 		sret = bin_search(cur, min_key, level, &slot);
5167 
5168 		/* at the lowest level, we're done, setup the path and exit */
5169 		if (level == path->lowest_level) {
5170 			if (slot >= nritems)
5171 				goto find_next_key;
5172 			ret = 0;
5173 			path->slots[level] = slot;
5174 			btrfs_item_key_to_cpu(cur, &found_key, slot);
5175 			goto out;
5176 		}
5177 		if (sret && slot > 0)
5178 			slot--;
5179 		/*
5180 		 * check this node pointer against the min_trans parameters.
5181 		 * If it is too old, old, skip to the next one.
5182 		 */
5183 		while (slot < nritems) {
5184 			u64 gen;
5185 
5186 			gen = btrfs_node_ptr_generation(cur, slot);
5187 			if (gen < min_trans) {
5188 				slot++;
5189 				continue;
5190 			}
5191 			break;
5192 		}
5193 find_next_key:
5194 		/*
5195 		 * we didn't find a candidate key in this node, walk forward
5196 		 * and find another one
5197 		 */
5198 		if (slot >= nritems) {
5199 			path->slots[level] = slot;
5200 			btrfs_set_path_blocking(path);
5201 			sret = btrfs_find_next_key(root, path, min_key, level,
5202 						  min_trans);
5203 			if (sret == 0) {
5204 				btrfs_release_path(path);
5205 				goto again;
5206 			} else {
5207 				goto out;
5208 			}
5209 		}
5210 		/* save our key for returning back */
5211 		btrfs_node_key_to_cpu(cur, &found_key, slot);
5212 		path->slots[level] = slot;
5213 		if (level == path->lowest_level) {
5214 			ret = 0;
5215 			goto out;
5216 		}
5217 		btrfs_set_path_blocking(path);
5218 		cur = read_node_slot(fs_info, cur, slot);
5219 		if (IS_ERR(cur)) {
5220 			ret = PTR_ERR(cur);
5221 			goto out;
5222 		}
5223 
5224 		btrfs_tree_read_lock(cur);
5225 
5226 		path->locks[level - 1] = BTRFS_READ_LOCK;
5227 		path->nodes[level - 1] = cur;
5228 		unlock_up(path, level, 1, 0, NULL);
5229 		btrfs_clear_path_blocking(path, NULL, 0);
5230 	}
5231 out:
5232 	path->keep_locks = keep_locks;
5233 	if (ret == 0) {
5234 		btrfs_unlock_up_safe(path, path->lowest_level + 1);
5235 		btrfs_set_path_blocking(path);
5236 		memcpy(min_key, &found_key, sizeof(found_key));
5237 	}
5238 	return ret;
5239 }
5240 
tree_move_down(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int * level)5241 static int tree_move_down(struct btrfs_fs_info *fs_info,
5242 			   struct btrfs_path *path,
5243 			   int *level)
5244 {
5245 	struct extent_buffer *eb;
5246 
5247 	BUG_ON(*level == 0);
5248 	eb = read_node_slot(fs_info, path->nodes[*level], path->slots[*level]);
5249 	if (IS_ERR(eb))
5250 		return PTR_ERR(eb);
5251 
5252 	path->nodes[*level - 1] = eb;
5253 	path->slots[*level - 1] = 0;
5254 	(*level)--;
5255 	return 0;
5256 }
5257 
tree_move_next_or_upnext(struct btrfs_path * path,int * level,int root_level)5258 static int tree_move_next_or_upnext(struct btrfs_path *path,
5259 				    int *level, int root_level)
5260 {
5261 	int ret = 0;
5262 	int nritems;
5263 	nritems = btrfs_header_nritems(path->nodes[*level]);
5264 
5265 	path->slots[*level]++;
5266 
5267 	while (path->slots[*level] >= nritems) {
5268 		if (*level == root_level)
5269 			return -1;
5270 
5271 		/* move upnext */
5272 		path->slots[*level] = 0;
5273 		free_extent_buffer(path->nodes[*level]);
5274 		path->nodes[*level] = NULL;
5275 		(*level)++;
5276 		path->slots[*level]++;
5277 
5278 		nritems = btrfs_header_nritems(path->nodes[*level]);
5279 		ret = 1;
5280 	}
5281 	return ret;
5282 }
5283 
5284 /*
5285  * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5286  * or down.
5287  */
tree_advance(struct btrfs_fs_info * fs_info,struct btrfs_path * path,int * level,int root_level,int allow_down,struct btrfs_key * key)5288 static int tree_advance(struct btrfs_fs_info *fs_info,
5289 			struct btrfs_path *path,
5290 			int *level, int root_level,
5291 			int allow_down,
5292 			struct btrfs_key *key)
5293 {
5294 	int ret;
5295 
5296 	if (*level == 0 || !allow_down) {
5297 		ret = tree_move_next_or_upnext(path, level, root_level);
5298 	} else {
5299 		ret = tree_move_down(fs_info, path, level);
5300 	}
5301 	if (ret >= 0) {
5302 		if (*level == 0)
5303 			btrfs_item_key_to_cpu(path->nodes[*level], key,
5304 					path->slots[*level]);
5305 		else
5306 			btrfs_node_key_to_cpu(path->nodes[*level], key,
5307 					path->slots[*level]);
5308 	}
5309 	return ret;
5310 }
5311 
tree_compare_item(struct btrfs_path * left_path,struct btrfs_path * right_path,char * tmp_buf)5312 static int tree_compare_item(struct btrfs_path *left_path,
5313 			     struct btrfs_path *right_path,
5314 			     char *tmp_buf)
5315 {
5316 	int cmp;
5317 	int len1, len2;
5318 	unsigned long off1, off2;
5319 
5320 	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5321 	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5322 	if (len1 != len2)
5323 		return 1;
5324 
5325 	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5326 	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5327 				right_path->slots[0]);
5328 
5329 	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5330 
5331 	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5332 	if (cmp)
5333 		return 1;
5334 	return 0;
5335 }
5336 
5337 #define ADVANCE 1
5338 #define ADVANCE_ONLY_NEXT -1
5339 
5340 /*
5341  * This function compares two trees and calls the provided callback for
5342  * every changed/new/deleted item it finds.
5343  * If shared tree blocks are encountered, whole subtrees are skipped, making
5344  * the compare pretty fast on snapshotted subvolumes.
5345  *
5346  * This currently works on commit roots only. As commit roots are read only,
5347  * we don't do any locking. The commit roots are protected with transactions.
5348  * Transactions are ended and rejoined when a commit is tried in between.
5349  *
5350  * This function checks for modifications done to the trees while comparing.
5351  * If it detects a change, it aborts immediately.
5352  */
btrfs_compare_trees(struct btrfs_root * left_root,struct btrfs_root * right_root,btrfs_changed_cb_t changed_cb,void * ctx)5353 int btrfs_compare_trees(struct btrfs_root *left_root,
5354 			struct btrfs_root *right_root,
5355 			btrfs_changed_cb_t changed_cb, void *ctx)
5356 {
5357 	struct btrfs_fs_info *fs_info = left_root->fs_info;
5358 	int ret;
5359 	int cmp;
5360 	struct btrfs_path *left_path = NULL;
5361 	struct btrfs_path *right_path = NULL;
5362 	struct btrfs_key left_key;
5363 	struct btrfs_key right_key;
5364 	char *tmp_buf = NULL;
5365 	int left_root_level;
5366 	int right_root_level;
5367 	int left_level;
5368 	int right_level;
5369 	int left_end_reached;
5370 	int right_end_reached;
5371 	int advance_left;
5372 	int advance_right;
5373 	u64 left_blockptr;
5374 	u64 right_blockptr;
5375 	u64 left_gen;
5376 	u64 right_gen;
5377 
5378 	left_path = btrfs_alloc_path();
5379 	if (!left_path) {
5380 		ret = -ENOMEM;
5381 		goto out;
5382 	}
5383 	right_path = btrfs_alloc_path();
5384 	if (!right_path) {
5385 		ret = -ENOMEM;
5386 		goto out;
5387 	}
5388 
5389 	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
5390 	if (!tmp_buf) {
5391 		ret = -ENOMEM;
5392 		goto out;
5393 	}
5394 
5395 	left_path->search_commit_root = 1;
5396 	left_path->skip_locking = 1;
5397 	right_path->search_commit_root = 1;
5398 	right_path->skip_locking = 1;
5399 
5400 	/*
5401 	 * Strategy: Go to the first items of both trees. Then do
5402 	 *
5403 	 * If both trees are at level 0
5404 	 *   Compare keys of current items
5405 	 *     If left < right treat left item as new, advance left tree
5406 	 *       and repeat
5407 	 *     If left > right treat right item as deleted, advance right tree
5408 	 *       and repeat
5409 	 *     If left == right do deep compare of items, treat as changed if
5410 	 *       needed, advance both trees and repeat
5411 	 * If both trees are at the same level but not at level 0
5412 	 *   Compare keys of current nodes/leafs
5413 	 *     If left < right advance left tree and repeat
5414 	 *     If left > right advance right tree and repeat
5415 	 *     If left == right compare blockptrs of the next nodes/leafs
5416 	 *       If they match advance both trees but stay at the same level
5417 	 *         and repeat
5418 	 *       If they don't match advance both trees while allowing to go
5419 	 *         deeper and repeat
5420 	 * If tree levels are different
5421 	 *   Advance the tree that needs it and repeat
5422 	 *
5423 	 * Advancing a tree means:
5424 	 *   If we are at level 0, try to go to the next slot. If that's not
5425 	 *   possible, go one level up and repeat. Stop when we found a level
5426 	 *   where we could go to the next slot. We may at this point be on a
5427 	 *   node or a leaf.
5428 	 *
5429 	 *   If we are not at level 0 and not on shared tree blocks, go one
5430 	 *   level deeper.
5431 	 *
5432 	 *   If we are not at level 0 and on shared tree blocks, go one slot to
5433 	 *   the right if possible or go up and right.
5434 	 */
5435 
5436 	down_read(&fs_info->commit_root_sem);
5437 	left_level = btrfs_header_level(left_root->commit_root);
5438 	left_root_level = left_level;
5439 	left_path->nodes[left_level] =
5440 			btrfs_clone_extent_buffer(left_root->commit_root);
5441 	if (!left_path->nodes[left_level]) {
5442 		up_read(&fs_info->commit_root_sem);
5443 		ret = -ENOMEM;
5444 		goto out;
5445 	}
5446 	extent_buffer_get(left_path->nodes[left_level]);
5447 
5448 	right_level = btrfs_header_level(right_root->commit_root);
5449 	right_root_level = right_level;
5450 	right_path->nodes[right_level] =
5451 			btrfs_clone_extent_buffer(right_root->commit_root);
5452 	if (!right_path->nodes[right_level]) {
5453 		up_read(&fs_info->commit_root_sem);
5454 		ret = -ENOMEM;
5455 		goto out;
5456 	}
5457 	extent_buffer_get(right_path->nodes[right_level]);
5458 	up_read(&fs_info->commit_root_sem);
5459 
5460 	if (left_level == 0)
5461 		btrfs_item_key_to_cpu(left_path->nodes[left_level],
5462 				&left_key, left_path->slots[left_level]);
5463 	else
5464 		btrfs_node_key_to_cpu(left_path->nodes[left_level],
5465 				&left_key, left_path->slots[left_level]);
5466 	if (right_level == 0)
5467 		btrfs_item_key_to_cpu(right_path->nodes[right_level],
5468 				&right_key, right_path->slots[right_level]);
5469 	else
5470 		btrfs_node_key_to_cpu(right_path->nodes[right_level],
5471 				&right_key, right_path->slots[right_level]);
5472 
5473 	left_end_reached = right_end_reached = 0;
5474 	advance_left = advance_right = 0;
5475 
5476 	while (1) {
5477 		cond_resched();
5478 		if (advance_left && !left_end_reached) {
5479 			ret = tree_advance(fs_info, left_path, &left_level,
5480 					left_root_level,
5481 					advance_left != ADVANCE_ONLY_NEXT,
5482 					&left_key);
5483 			if (ret == -1)
5484 				left_end_reached = ADVANCE;
5485 			else if (ret < 0)
5486 				goto out;
5487 			advance_left = 0;
5488 		}
5489 		if (advance_right && !right_end_reached) {
5490 			ret = tree_advance(fs_info, right_path, &right_level,
5491 					right_root_level,
5492 					advance_right != ADVANCE_ONLY_NEXT,
5493 					&right_key);
5494 			if (ret == -1)
5495 				right_end_reached = ADVANCE;
5496 			else if (ret < 0)
5497 				goto out;
5498 			advance_right = 0;
5499 		}
5500 
5501 		if (left_end_reached && right_end_reached) {
5502 			ret = 0;
5503 			goto out;
5504 		} else if (left_end_reached) {
5505 			if (right_level == 0) {
5506 				ret = changed_cb(left_root, right_root,
5507 						left_path, right_path,
5508 						&right_key,
5509 						BTRFS_COMPARE_TREE_DELETED,
5510 						ctx);
5511 				if (ret < 0)
5512 					goto out;
5513 			}
5514 			advance_right = ADVANCE;
5515 			continue;
5516 		} else if (right_end_reached) {
5517 			if (left_level == 0) {
5518 				ret = changed_cb(left_root, right_root,
5519 						left_path, right_path,
5520 						&left_key,
5521 						BTRFS_COMPARE_TREE_NEW,
5522 						ctx);
5523 				if (ret < 0)
5524 					goto out;
5525 			}
5526 			advance_left = ADVANCE;
5527 			continue;
5528 		}
5529 
5530 		if (left_level == 0 && right_level == 0) {
5531 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5532 			if (cmp < 0) {
5533 				ret = changed_cb(left_root, right_root,
5534 						left_path, right_path,
5535 						&left_key,
5536 						BTRFS_COMPARE_TREE_NEW,
5537 						ctx);
5538 				if (ret < 0)
5539 					goto out;
5540 				advance_left = ADVANCE;
5541 			} else if (cmp > 0) {
5542 				ret = changed_cb(left_root, right_root,
5543 						left_path, right_path,
5544 						&right_key,
5545 						BTRFS_COMPARE_TREE_DELETED,
5546 						ctx);
5547 				if (ret < 0)
5548 					goto out;
5549 				advance_right = ADVANCE;
5550 			} else {
5551 				enum btrfs_compare_tree_result result;
5552 
5553 				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5554 				ret = tree_compare_item(left_path, right_path,
5555 							tmp_buf);
5556 				if (ret)
5557 					result = BTRFS_COMPARE_TREE_CHANGED;
5558 				else
5559 					result = BTRFS_COMPARE_TREE_SAME;
5560 				ret = changed_cb(left_root, right_root,
5561 						 left_path, right_path,
5562 						 &left_key, result, ctx);
5563 				if (ret < 0)
5564 					goto out;
5565 				advance_left = ADVANCE;
5566 				advance_right = ADVANCE;
5567 			}
5568 		} else if (left_level == right_level) {
5569 			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5570 			if (cmp < 0) {
5571 				advance_left = ADVANCE;
5572 			} else if (cmp > 0) {
5573 				advance_right = ADVANCE;
5574 			} else {
5575 				left_blockptr = btrfs_node_blockptr(
5576 						left_path->nodes[left_level],
5577 						left_path->slots[left_level]);
5578 				right_blockptr = btrfs_node_blockptr(
5579 						right_path->nodes[right_level],
5580 						right_path->slots[right_level]);
5581 				left_gen = btrfs_node_ptr_generation(
5582 						left_path->nodes[left_level],
5583 						left_path->slots[left_level]);
5584 				right_gen = btrfs_node_ptr_generation(
5585 						right_path->nodes[right_level],
5586 						right_path->slots[right_level]);
5587 				if (left_blockptr == right_blockptr &&
5588 				    left_gen == right_gen) {
5589 					/*
5590 					 * As we're on a shared block, don't
5591 					 * allow to go deeper.
5592 					 */
5593 					advance_left = ADVANCE_ONLY_NEXT;
5594 					advance_right = ADVANCE_ONLY_NEXT;
5595 				} else {
5596 					advance_left = ADVANCE;
5597 					advance_right = ADVANCE;
5598 				}
5599 			}
5600 		} else if (left_level < right_level) {
5601 			advance_right = ADVANCE;
5602 		} else {
5603 			advance_left = ADVANCE;
5604 		}
5605 	}
5606 
5607 out:
5608 	btrfs_free_path(left_path);
5609 	btrfs_free_path(right_path);
5610 	kvfree(tmp_buf);
5611 	return ret;
5612 }
5613 
5614 /*
5615  * this is similar to btrfs_next_leaf, but does not try to preserve
5616  * and fixup the path.  It looks for and returns the next key in the
5617  * tree based on the current path and the min_trans parameters.
5618  *
5619  * 0 is returned if another key is found, < 0 if there are any errors
5620  * and 1 is returned if there are no higher keys in the tree
5621  *
5622  * path->keep_locks should be set to 1 on the search made before
5623  * calling this function.
5624  */
btrfs_find_next_key(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key,int level,u64 min_trans)5625 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5626 			struct btrfs_key *key, int level, u64 min_trans)
5627 {
5628 	int slot;
5629 	struct extent_buffer *c;
5630 
5631 	WARN_ON(!path->keep_locks);
5632 	while (level < BTRFS_MAX_LEVEL) {
5633 		if (!path->nodes[level])
5634 			return 1;
5635 
5636 		slot = path->slots[level] + 1;
5637 		c = path->nodes[level];
5638 next:
5639 		if (slot >= btrfs_header_nritems(c)) {
5640 			int ret;
5641 			int orig_lowest;
5642 			struct btrfs_key cur_key;
5643 			if (level + 1 >= BTRFS_MAX_LEVEL ||
5644 			    !path->nodes[level + 1])
5645 				return 1;
5646 
5647 			if (path->locks[level + 1]) {
5648 				level++;
5649 				continue;
5650 			}
5651 
5652 			slot = btrfs_header_nritems(c) - 1;
5653 			if (level == 0)
5654 				btrfs_item_key_to_cpu(c, &cur_key, slot);
5655 			else
5656 				btrfs_node_key_to_cpu(c, &cur_key, slot);
5657 
5658 			orig_lowest = path->lowest_level;
5659 			btrfs_release_path(path);
5660 			path->lowest_level = level;
5661 			ret = btrfs_search_slot(NULL, root, &cur_key, path,
5662 						0, 0);
5663 			path->lowest_level = orig_lowest;
5664 			if (ret < 0)
5665 				return ret;
5666 
5667 			c = path->nodes[level];
5668 			slot = path->slots[level];
5669 			if (ret == 0)
5670 				slot++;
5671 			goto next;
5672 		}
5673 
5674 		if (level == 0)
5675 			btrfs_item_key_to_cpu(c, key, slot);
5676 		else {
5677 			u64 gen = btrfs_node_ptr_generation(c, slot);
5678 
5679 			if (gen < min_trans) {
5680 				slot++;
5681 				goto next;
5682 			}
5683 			btrfs_node_key_to_cpu(c, key, slot);
5684 		}
5685 		return 0;
5686 	}
5687 	return 1;
5688 }
5689 
5690 /*
5691  * search the tree again to find a leaf with greater keys
5692  * returns 0 if it found something or 1 if there are no greater leaves.
5693  * returns < 0 on io errors.
5694  */
btrfs_next_leaf(struct btrfs_root * root,struct btrfs_path * path)5695 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5696 {
5697 	return btrfs_next_old_leaf(root, path, 0);
5698 }
5699 
btrfs_next_old_leaf(struct btrfs_root * root,struct btrfs_path * path,u64 time_seq)5700 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5701 			u64 time_seq)
5702 {
5703 	int slot;
5704 	int level;
5705 	struct extent_buffer *c;
5706 	struct extent_buffer *next;
5707 	struct btrfs_key key;
5708 	u32 nritems;
5709 	int ret;
5710 	int old_spinning = path->leave_spinning;
5711 	int next_rw_lock = 0;
5712 
5713 	nritems = btrfs_header_nritems(path->nodes[0]);
5714 	if (nritems == 0)
5715 		return 1;
5716 
5717 	btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5718 again:
5719 	level = 1;
5720 	next = NULL;
5721 	next_rw_lock = 0;
5722 	btrfs_release_path(path);
5723 
5724 	path->keep_locks = 1;
5725 	path->leave_spinning = 1;
5726 
5727 	if (time_seq)
5728 		ret = btrfs_search_old_slot(root, &key, path, time_seq);
5729 	else
5730 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5731 	path->keep_locks = 0;
5732 
5733 	if (ret < 0)
5734 		return ret;
5735 
5736 	nritems = btrfs_header_nritems(path->nodes[0]);
5737 	/*
5738 	 * by releasing the path above we dropped all our locks.  A balance
5739 	 * could have added more items next to the key that used to be
5740 	 * at the very end of the block.  So, check again here and
5741 	 * advance the path if there are now more items available.
5742 	 */
5743 	if (nritems > 0 && path->slots[0] < nritems - 1) {
5744 		if (ret == 0)
5745 			path->slots[0]++;
5746 		ret = 0;
5747 		goto done;
5748 	}
5749 	/*
5750 	 * So the above check misses one case:
5751 	 * - after releasing the path above, someone has removed the item that
5752 	 *   used to be at the very end of the block, and balance between leafs
5753 	 *   gets another one with bigger key.offset to replace it.
5754 	 *
5755 	 * This one should be returned as well, or we can get leaf corruption
5756 	 * later(esp. in __btrfs_drop_extents()).
5757 	 *
5758 	 * And a bit more explanation about this check,
5759 	 * with ret > 0, the key isn't found, the path points to the slot
5760 	 * where it should be inserted, so the path->slots[0] item must be the
5761 	 * bigger one.
5762 	 */
5763 	if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5764 		ret = 0;
5765 		goto done;
5766 	}
5767 
5768 	while (level < BTRFS_MAX_LEVEL) {
5769 		if (!path->nodes[level]) {
5770 			ret = 1;
5771 			goto done;
5772 		}
5773 
5774 		slot = path->slots[level] + 1;
5775 		c = path->nodes[level];
5776 		if (slot >= btrfs_header_nritems(c)) {
5777 			level++;
5778 			if (level == BTRFS_MAX_LEVEL) {
5779 				ret = 1;
5780 				goto done;
5781 			}
5782 			continue;
5783 		}
5784 
5785 		if (next) {
5786 			btrfs_tree_unlock_rw(next, next_rw_lock);
5787 			free_extent_buffer(next);
5788 		}
5789 
5790 		next = c;
5791 		next_rw_lock = path->locks[level];
5792 		ret = read_block_for_search(root, path, &next, level,
5793 					    slot, &key);
5794 		if (ret == -EAGAIN)
5795 			goto again;
5796 
5797 		if (ret < 0) {
5798 			btrfs_release_path(path);
5799 			goto done;
5800 		}
5801 
5802 		if (!path->skip_locking) {
5803 			ret = btrfs_try_tree_read_lock(next);
5804 			if (!ret && time_seq) {
5805 				/*
5806 				 * If we don't get the lock, we may be racing
5807 				 * with push_leaf_left, holding that lock while
5808 				 * itself waiting for the leaf we've currently
5809 				 * locked. To solve this situation, we give up
5810 				 * on our lock and cycle.
5811 				 */
5812 				free_extent_buffer(next);
5813 				btrfs_release_path(path);
5814 				cond_resched();
5815 				goto again;
5816 			}
5817 			if (!ret) {
5818 				btrfs_set_path_blocking(path);
5819 				btrfs_tree_read_lock(next);
5820 				btrfs_clear_path_blocking(path, next,
5821 							  BTRFS_READ_LOCK);
5822 			}
5823 			next_rw_lock = BTRFS_READ_LOCK;
5824 		}
5825 		break;
5826 	}
5827 	path->slots[level] = slot;
5828 	while (1) {
5829 		level--;
5830 		c = path->nodes[level];
5831 		if (path->locks[level])
5832 			btrfs_tree_unlock_rw(c, path->locks[level]);
5833 
5834 		free_extent_buffer(c);
5835 		path->nodes[level] = next;
5836 		path->slots[level] = 0;
5837 		if (!path->skip_locking)
5838 			path->locks[level] = next_rw_lock;
5839 		if (!level)
5840 			break;
5841 
5842 		ret = read_block_for_search(root, path, &next, level,
5843 					    0, &key);
5844 		if (ret == -EAGAIN)
5845 			goto again;
5846 
5847 		if (ret < 0) {
5848 			btrfs_release_path(path);
5849 			goto done;
5850 		}
5851 
5852 		if (!path->skip_locking) {
5853 			ret = btrfs_try_tree_read_lock(next);
5854 			if (!ret) {
5855 				btrfs_set_path_blocking(path);
5856 				btrfs_tree_read_lock(next);
5857 				btrfs_clear_path_blocking(path, next,
5858 							  BTRFS_READ_LOCK);
5859 			}
5860 			next_rw_lock = BTRFS_READ_LOCK;
5861 		}
5862 	}
5863 	ret = 0;
5864 done:
5865 	unlock_up(path, 0, 1, 0, NULL);
5866 	path->leave_spinning = old_spinning;
5867 	if (!old_spinning)
5868 		btrfs_set_path_blocking(path);
5869 
5870 	return ret;
5871 }
5872 
5873 /*
5874  * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5875  * searching until it gets past min_objectid or finds an item of 'type'
5876  *
5877  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5878  */
btrfs_previous_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid,int type)5879 int btrfs_previous_item(struct btrfs_root *root,
5880 			struct btrfs_path *path, u64 min_objectid,
5881 			int type)
5882 {
5883 	struct btrfs_key found_key;
5884 	struct extent_buffer *leaf;
5885 	u32 nritems;
5886 	int ret;
5887 
5888 	while (1) {
5889 		if (path->slots[0] == 0) {
5890 			btrfs_set_path_blocking(path);
5891 			ret = btrfs_prev_leaf(root, path);
5892 			if (ret != 0)
5893 				return ret;
5894 		} else {
5895 			path->slots[0]--;
5896 		}
5897 		leaf = path->nodes[0];
5898 		nritems = btrfs_header_nritems(leaf);
5899 		if (nritems == 0)
5900 			return 1;
5901 		if (path->slots[0] == nritems)
5902 			path->slots[0]--;
5903 
5904 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5905 		if (found_key.objectid < min_objectid)
5906 			break;
5907 		if (found_key.type == type)
5908 			return 0;
5909 		if (found_key.objectid == min_objectid &&
5910 		    found_key.type < type)
5911 			break;
5912 	}
5913 	return 1;
5914 }
5915 
5916 /*
5917  * search in extent tree to find a previous Metadata/Data extent item with
5918  * min objecitd.
5919  *
5920  * returns 0 if something is found, 1 if nothing was found and < 0 on error
5921  */
btrfs_previous_extent_item(struct btrfs_root * root,struct btrfs_path * path,u64 min_objectid)5922 int btrfs_previous_extent_item(struct btrfs_root *root,
5923 			struct btrfs_path *path, u64 min_objectid)
5924 {
5925 	struct btrfs_key found_key;
5926 	struct extent_buffer *leaf;
5927 	u32 nritems;
5928 	int ret;
5929 
5930 	while (1) {
5931 		if (path->slots[0] == 0) {
5932 			btrfs_set_path_blocking(path);
5933 			ret = btrfs_prev_leaf(root, path);
5934 			if (ret != 0)
5935 				return ret;
5936 		} else {
5937 			path->slots[0]--;
5938 		}
5939 		leaf = path->nodes[0];
5940 		nritems = btrfs_header_nritems(leaf);
5941 		if (nritems == 0)
5942 			return 1;
5943 		if (path->slots[0] == nritems)
5944 			path->slots[0]--;
5945 
5946 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5947 		if (found_key.objectid < min_objectid)
5948 			break;
5949 		if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5950 		    found_key.type == BTRFS_METADATA_ITEM_KEY)
5951 			return 0;
5952 		if (found_key.objectid == min_objectid &&
5953 		    found_key.type < BTRFS_EXTENT_ITEM_KEY)
5954 			break;
5955 	}
5956 	return 1;
5957 }
5958