• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/slab.h>
4 #include <trace/events/btrfs.h>
5 #include "ctree.h"
6 #include "extent-io-tree.h"
7 #include "btrfs_inode.h"
8 #include "misc.h"
9 
10 static struct kmem_cache *extent_state_cache;
11 
extent_state_in_tree(const struct extent_state * state)12 static inline bool extent_state_in_tree(const struct extent_state *state)
13 {
14 	return !RB_EMPTY_NODE(&state->rb_node);
15 }
16 
17 #ifdef CONFIG_BTRFS_DEBUG
18 static LIST_HEAD(states);
19 static DEFINE_SPINLOCK(leak_lock);
20 
btrfs_leak_debug_add_state(struct extent_state * state)21 static inline void btrfs_leak_debug_add_state(struct extent_state *state)
22 {
23 	unsigned long flags;
24 
25 	spin_lock_irqsave(&leak_lock, flags);
26 	list_add(&state->leak_list, &states);
27 	spin_unlock_irqrestore(&leak_lock, flags);
28 }
29 
btrfs_leak_debug_del_state(struct extent_state * state)30 static inline void btrfs_leak_debug_del_state(struct extent_state *state)
31 {
32 	unsigned long flags;
33 
34 	spin_lock_irqsave(&leak_lock, flags);
35 	list_del(&state->leak_list);
36 	spin_unlock_irqrestore(&leak_lock, flags);
37 }
38 
btrfs_extent_state_leak_debug_check(void)39 static inline void btrfs_extent_state_leak_debug_check(void)
40 {
41 	struct extent_state *state;
42 
43 	while (!list_empty(&states)) {
44 		state = list_entry(states.next, struct extent_state, leak_list);
45 		pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
46 		       state->start, state->end, state->state,
47 		       extent_state_in_tree(state),
48 		       refcount_read(&state->refs));
49 		list_del(&state->leak_list);
50 		kmem_cache_free(extent_state_cache, state);
51 	}
52 }
53 
54 #define btrfs_debug_check_extent_io_range(tree, start, end)		\
55 	__btrfs_debug_check_extent_io_range(__func__, (tree), (start), (end))
__btrfs_debug_check_extent_io_range(const char * caller,struct extent_io_tree * tree,u64 start,u64 end)56 static inline void __btrfs_debug_check_extent_io_range(const char *caller,
57 						       struct extent_io_tree *tree,
58 						       u64 start, u64 end)
59 {
60 	struct inode *inode = tree->private_data;
61 	u64 isize;
62 
63 	if (!inode)
64 		return;
65 
66 	isize = i_size_read(inode);
67 	if (end >= PAGE_SIZE && (end % 2) == 0 && end != isize - 1) {
68 		btrfs_debug_rl(BTRFS_I(inode)->root->fs_info,
69 		    "%s: ino %llu isize %llu odd range [%llu,%llu]",
70 			caller, btrfs_ino(BTRFS_I(inode)), isize, start, end);
71 	}
72 }
73 #else
74 #define btrfs_leak_debug_add_state(state)		do {} while (0)
75 #define btrfs_leak_debug_del_state(state)		do {} while (0)
76 #define btrfs_extent_state_leak_debug_check()		do {} while (0)
77 #define btrfs_debug_check_extent_io_range(c, s, e)	do {} while (0)
78 #endif
79 
80 /*
81  * For the file_extent_tree, we want to hold the inode lock when we lookup and
82  * update the disk_i_size, but lockdep will complain because our io_tree we hold
83  * the tree lock and get the inode lock when setting delalloc.  These two things
84  * are unrelated, so make a class for the file_extent_tree so we don't get the
85  * two locking patterns mixed up.
86  */
87 static struct lock_class_key file_extent_tree_class;
88 
89 struct tree_entry {
90 	u64 start;
91 	u64 end;
92 	struct rb_node rb_node;
93 };
94 
extent_io_tree_init(struct btrfs_fs_info * fs_info,struct extent_io_tree * tree,unsigned int owner,void * private_data)95 void extent_io_tree_init(struct btrfs_fs_info *fs_info,
96 			 struct extent_io_tree *tree, unsigned int owner,
97 			 void *private_data)
98 {
99 	tree->fs_info = fs_info;
100 	tree->state = RB_ROOT;
101 	spin_lock_init(&tree->lock);
102 	tree->private_data = private_data;
103 	tree->owner = owner;
104 	if (owner == IO_TREE_INODE_FILE_EXTENT)
105 		lockdep_set_class(&tree->lock, &file_extent_tree_class);
106 }
107 
extent_io_tree_release(struct extent_io_tree * tree)108 void extent_io_tree_release(struct extent_io_tree *tree)
109 {
110 	spin_lock(&tree->lock);
111 	/*
112 	 * Do a single barrier for the waitqueue_active check here, the state
113 	 * of the waitqueue should not change once extent_io_tree_release is
114 	 * called.
115 	 */
116 	smp_mb();
117 	while (!RB_EMPTY_ROOT(&tree->state)) {
118 		struct rb_node *node;
119 		struct extent_state *state;
120 
121 		node = rb_first(&tree->state);
122 		state = rb_entry(node, struct extent_state, rb_node);
123 		rb_erase(&state->rb_node, &tree->state);
124 		RB_CLEAR_NODE(&state->rb_node);
125 		/*
126 		 * btree io trees aren't supposed to have tasks waiting for
127 		 * changes in the flags of extent states ever.
128 		 */
129 		ASSERT(!waitqueue_active(&state->wq));
130 		free_extent_state(state);
131 
132 		cond_resched_lock(&tree->lock);
133 	}
134 	spin_unlock(&tree->lock);
135 }
136 
alloc_extent_state(gfp_t mask)137 static struct extent_state *alloc_extent_state(gfp_t mask)
138 {
139 	struct extent_state *state;
140 
141 	/*
142 	 * The given mask might be not appropriate for the slab allocator,
143 	 * drop the unsupported bits
144 	 */
145 	mask &= ~(__GFP_DMA32|__GFP_HIGHMEM);
146 	state = kmem_cache_alloc(extent_state_cache, mask);
147 	if (!state)
148 		return state;
149 	state->state = 0;
150 	RB_CLEAR_NODE(&state->rb_node);
151 	btrfs_leak_debug_add_state(state);
152 	refcount_set(&state->refs, 1);
153 	init_waitqueue_head(&state->wq);
154 	trace_alloc_extent_state(state, mask, _RET_IP_);
155 	return state;
156 }
157 
alloc_extent_state_atomic(struct extent_state * prealloc)158 static struct extent_state *alloc_extent_state_atomic(struct extent_state *prealloc)
159 {
160 	if (!prealloc)
161 		prealloc = alloc_extent_state(GFP_ATOMIC);
162 
163 	return prealloc;
164 }
165 
free_extent_state(struct extent_state * state)166 void free_extent_state(struct extent_state *state)
167 {
168 	if (!state)
169 		return;
170 	if (refcount_dec_and_test(&state->refs)) {
171 		WARN_ON(extent_state_in_tree(state));
172 		btrfs_leak_debug_del_state(state);
173 		trace_free_extent_state(state, _RET_IP_);
174 		kmem_cache_free(extent_state_cache, state);
175 	}
176 }
177 
add_extent_changeset(struct extent_state * state,u32 bits,struct extent_changeset * changeset,int set)178 static int add_extent_changeset(struct extent_state *state, u32 bits,
179 				 struct extent_changeset *changeset,
180 				 int set)
181 {
182 	int ret;
183 
184 	if (!changeset)
185 		return 0;
186 	if (set && (state->state & bits) == bits)
187 		return 0;
188 	if (!set && (state->state & bits) == 0)
189 		return 0;
190 	changeset->bytes_changed += state->end - state->start + 1;
191 	ret = ulist_add(&changeset->range_changed, state->start, state->end,
192 			GFP_ATOMIC);
193 	return ret;
194 }
195 
next_state(struct extent_state * state)196 static inline struct extent_state *next_state(struct extent_state *state)
197 {
198 	struct rb_node *next = rb_next(&state->rb_node);
199 
200 	if (next)
201 		return rb_entry(next, struct extent_state, rb_node);
202 	else
203 		return NULL;
204 }
205 
prev_state(struct extent_state * state)206 static inline struct extent_state *prev_state(struct extent_state *state)
207 {
208 	struct rb_node *next = rb_prev(&state->rb_node);
209 
210 	if (next)
211 		return rb_entry(next, struct extent_state, rb_node);
212 	else
213 		return NULL;
214 }
215 
216 /*
217  * Search @tree for an entry that contains @offset. Such entry would have
218  * entry->start <= offset && entry->end >= offset.
219  *
220  * @tree:       the tree to search
221  * @offset:     offset that should fall within an entry in @tree
222  * @node_ret:   pointer where new node should be anchored (used when inserting an
223  *	        entry in the tree)
224  * @parent_ret: points to entry which would have been the parent of the entry,
225  *               containing @offset
226  *
227  * Return a pointer to the entry that contains @offset byte address and don't change
228  * @node_ret and @parent_ret.
229  *
230  * If no such entry exists, return pointer to entry that ends before @offset
231  * and fill parameters @node_ret and @parent_ret, ie. does not return NULL.
232  */
tree_search_for_insert(struct extent_io_tree * tree,u64 offset,struct rb_node *** node_ret,struct rb_node ** parent_ret)233 static inline struct extent_state *tree_search_for_insert(struct extent_io_tree *tree,
234 							  u64 offset,
235 							  struct rb_node ***node_ret,
236 							  struct rb_node **parent_ret)
237 {
238 	struct rb_root *root = &tree->state;
239 	struct rb_node **node = &root->rb_node;
240 	struct rb_node *prev = NULL;
241 	struct extent_state *entry = NULL;
242 
243 	while (*node) {
244 		prev = *node;
245 		entry = rb_entry(prev, struct extent_state, rb_node);
246 
247 		if (offset < entry->start)
248 			node = &(*node)->rb_left;
249 		else if (offset > entry->end)
250 			node = &(*node)->rb_right;
251 		else
252 			return entry;
253 	}
254 
255 	if (node_ret)
256 		*node_ret = node;
257 	if (parent_ret)
258 		*parent_ret = prev;
259 
260 	/* Search neighbors until we find the first one past the end */
261 	while (entry && offset > entry->end)
262 		entry = next_state(entry);
263 
264 	return entry;
265 }
266 
267 /*
268  * Search offset in the tree or fill neighbor rbtree node pointers.
269  *
270  * @tree:      the tree to search
271  * @offset:    offset that should fall within an entry in @tree
272  * @next_ret:  pointer to the first entry whose range ends after @offset
273  * @prev_ret:  pointer to the first entry whose range begins before @offset
274  *
275  * Return a pointer to the entry that contains @offset byte address. If no
276  * such entry exists, then return NULL and fill @prev_ret and @next_ret.
277  * Otherwise return the found entry and other pointers are left untouched.
278  */
tree_search_prev_next(struct extent_io_tree * tree,u64 offset,struct extent_state ** prev_ret,struct extent_state ** next_ret)279 static struct extent_state *tree_search_prev_next(struct extent_io_tree *tree,
280 						  u64 offset,
281 						  struct extent_state **prev_ret,
282 						  struct extent_state **next_ret)
283 {
284 	struct rb_root *root = &tree->state;
285 	struct rb_node **node = &root->rb_node;
286 	struct extent_state *orig_prev;
287 	struct extent_state *entry = NULL;
288 
289 	ASSERT(prev_ret);
290 	ASSERT(next_ret);
291 
292 	while (*node) {
293 		entry = rb_entry(*node, struct extent_state, rb_node);
294 
295 		if (offset < entry->start)
296 			node = &(*node)->rb_left;
297 		else if (offset > entry->end)
298 			node = &(*node)->rb_right;
299 		else
300 			return entry;
301 	}
302 
303 	orig_prev = entry;
304 	while (entry && offset > entry->end)
305 		entry = next_state(entry);
306 	*next_ret = entry;
307 	entry = orig_prev;
308 
309 	while (entry && offset < entry->start)
310 		entry = prev_state(entry);
311 	*prev_ret = entry;
312 
313 	return NULL;
314 }
315 
316 /*
317  * Inexact rb-tree search, return the next entry if @offset is not found
318  */
tree_search(struct extent_io_tree * tree,u64 offset)319 static inline struct extent_state *tree_search(struct extent_io_tree *tree, u64 offset)
320 {
321 	return tree_search_for_insert(tree, offset, NULL, NULL);
322 }
323 
extent_io_tree_panic(struct extent_io_tree * tree,int err)324 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
325 {
326 	btrfs_panic(tree->fs_info, err,
327 	"locking error: extent tree was modified by another thread while locked");
328 }
329 
330 /*
331  * Utility function to look for merge candidates inside a given range.  Any
332  * extents with matching state are merged together into a single extent in the
333  * tree.  Extents with EXTENT_IO in their state field are not merged because
334  * the end_io handlers need to be able to do operations on them without
335  * sleeping (or doing allocations/splits).
336  *
337  * This should be called with the tree lock held.
338  */
merge_state(struct extent_io_tree * tree,struct extent_state * state)339 static void merge_state(struct extent_io_tree *tree, struct extent_state *state)
340 {
341 	struct extent_state *other;
342 
343 	if (state->state & (EXTENT_LOCKED | EXTENT_BOUNDARY))
344 		return;
345 
346 	other = prev_state(state);
347 	if (other && other->end == state->start - 1 &&
348 	    other->state == state->state) {
349 		if (tree->private_data)
350 			btrfs_merge_delalloc_extent(tree->private_data,
351 						    state, other);
352 		state->start = other->start;
353 		rb_erase(&other->rb_node, &tree->state);
354 		RB_CLEAR_NODE(&other->rb_node);
355 		free_extent_state(other);
356 	}
357 	other = next_state(state);
358 	if (other && other->start == state->end + 1 &&
359 	    other->state == state->state) {
360 		if (tree->private_data)
361 			btrfs_merge_delalloc_extent(tree->private_data, state,
362 						    other);
363 		state->end = other->end;
364 		rb_erase(&other->rb_node, &tree->state);
365 		RB_CLEAR_NODE(&other->rb_node);
366 		free_extent_state(other);
367 	}
368 }
369 
set_state_bits(struct extent_io_tree * tree,struct extent_state * state,u32 bits,struct extent_changeset * changeset)370 static void set_state_bits(struct extent_io_tree *tree,
371 			   struct extent_state *state,
372 			   u32 bits, struct extent_changeset *changeset)
373 {
374 	u32 bits_to_set = bits & ~EXTENT_CTLBITS;
375 	int ret;
376 
377 	if (tree->private_data)
378 		btrfs_set_delalloc_extent(tree->private_data, state, bits);
379 
380 	ret = add_extent_changeset(state, bits_to_set, changeset, 1);
381 	BUG_ON(ret < 0);
382 	state->state |= bits_to_set;
383 }
384 
385 /*
386  * Insert an extent_state struct into the tree.  'bits' are set on the
387  * struct before it is inserted.
388  *
389  * This may return -EEXIST if the extent is already there, in which case the
390  * state struct is freed.
391  *
392  * The tree lock is not taken internally.  This is a utility function and
393  * probably isn't what you want to call (see set/clear_extent_bit).
394  */
insert_state(struct extent_io_tree * tree,struct extent_state * state,u32 bits,struct extent_changeset * changeset)395 static int insert_state(struct extent_io_tree *tree,
396 			struct extent_state *state,
397 			u32 bits, struct extent_changeset *changeset)
398 {
399 	struct rb_node **node;
400 	struct rb_node *parent = NULL;
401 	const u64 end = state->end;
402 
403 	set_state_bits(tree, state, bits, changeset);
404 
405 	node = &tree->state.rb_node;
406 	while (*node) {
407 		struct extent_state *entry;
408 
409 		parent = *node;
410 		entry = rb_entry(parent, struct extent_state, rb_node);
411 
412 		if (end < entry->start) {
413 			node = &(*node)->rb_left;
414 		} else if (end > entry->end) {
415 			node = &(*node)->rb_right;
416 		} else {
417 			btrfs_err(tree->fs_info,
418 			       "found node %llu %llu on insert of %llu %llu",
419 			       entry->start, entry->end, state->start, end);
420 			return -EEXIST;
421 		}
422 	}
423 
424 	rb_link_node(&state->rb_node, parent, node);
425 	rb_insert_color(&state->rb_node, &tree->state);
426 
427 	merge_state(tree, state);
428 	return 0;
429 }
430 
431 /*
432  * Insert state to @tree to the location given by @node and @parent.
433  */
insert_state_fast(struct extent_io_tree * tree,struct extent_state * state,struct rb_node ** node,struct rb_node * parent,unsigned bits,struct extent_changeset * changeset)434 static void insert_state_fast(struct extent_io_tree *tree,
435 			      struct extent_state *state, struct rb_node **node,
436 			      struct rb_node *parent, unsigned bits,
437 			      struct extent_changeset *changeset)
438 {
439 	set_state_bits(tree, state, bits, changeset);
440 	rb_link_node(&state->rb_node, parent, node);
441 	rb_insert_color(&state->rb_node, &tree->state);
442 	merge_state(tree, state);
443 }
444 
445 /*
446  * Split a given extent state struct in two, inserting the preallocated
447  * struct 'prealloc' as the newly created second half.  'split' indicates an
448  * offset inside 'orig' where it should be split.
449  *
450  * Before calling,
451  * the tree has 'orig' at [orig->start, orig->end].  After calling, there
452  * are two extent state structs in the tree:
453  * prealloc: [orig->start, split - 1]
454  * orig: [ split, orig->end ]
455  *
456  * The tree locks are not taken by this function. They need to be held
457  * by the caller.
458  */
split_state(struct extent_io_tree * tree,struct extent_state * orig,struct extent_state * prealloc,u64 split)459 static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
460 		       struct extent_state *prealloc, u64 split)
461 {
462 	struct rb_node *parent = NULL;
463 	struct rb_node **node;
464 
465 	if (tree->private_data)
466 		btrfs_split_delalloc_extent(tree->private_data, orig, split);
467 
468 	prealloc->start = orig->start;
469 	prealloc->end = split - 1;
470 	prealloc->state = orig->state;
471 	orig->start = split;
472 
473 	parent = &orig->rb_node;
474 	node = &parent;
475 	while (*node) {
476 		struct extent_state *entry;
477 
478 		parent = *node;
479 		entry = rb_entry(parent, struct extent_state, rb_node);
480 
481 		if (prealloc->end < entry->start) {
482 			node = &(*node)->rb_left;
483 		} else if (prealloc->end > entry->end) {
484 			node = &(*node)->rb_right;
485 		} else {
486 			free_extent_state(prealloc);
487 			return -EEXIST;
488 		}
489 	}
490 
491 	rb_link_node(&prealloc->rb_node, parent, node);
492 	rb_insert_color(&prealloc->rb_node, &tree->state);
493 
494 	return 0;
495 }
496 
497 /*
498  * Utility function to clear some bits in an extent state struct.  It will
499  * optionally wake up anyone waiting on this state (wake == 1).
500  *
501  * If no bits are set on the state struct after clearing things, the
502  * struct is freed and removed from the tree
503  */
clear_state_bit(struct extent_io_tree * tree,struct extent_state * state,u32 bits,int wake,struct extent_changeset * changeset)504 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
505 					    struct extent_state *state,
506 					    u32 bits, int wake,
507 					    struct extent_changeset *changeset)
508 {
509 	struct extent_state *next;
510 	u32 bits_to_clear = bits & ~EXTENT_CTLBITS;
511 	int ret;
512 
513 	if (tree->private_data)
514 		btrfs_clear_delalloc_extent(tree->private_data, state, bits);
515 
516 	ret = add_extent_changeset(state, bits_to_clear, changeset, 0);
517 	BUG_ON(ret < 0);
518 	state->state &= ~bits_to_clear;
519 	if (wake)
520 		wake_up(&state->wq);
521 	if (state->state == 0) {
522 		next = next_state(state);
523 		if (extent_state_in_tree(state)) {
524 			rb_erase(&state->rb_node, &tree->state);
525 			RB_CLEAR_NODE(&state->rb_node);
526 			free_extent_state(state);
527 		} else {
528 			WARN_ON(1);
529 		}
530 	} else {
531 		merge_state(tree, state);
532 		next = next_state(state);
533 	}
534 	return next;
535 }
536 
537 /*
538  * Clear some bits on a range in the tree.  This may require splitting or
539  * inserting elements in the tree, so the gfp mask is used to indicate which
540  * allocations or sleeping are allowed.
541  *
542  * Pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove the given
543  * range from the tree regardless of state (ie for truncate).
544  *
545  * The range [start, end] is inclusive.
546  *
547  * This takes the tree lock, and returns 0 on success and < 0 on error.
548  */
__clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_state ** cached_state,gfp_t mask,struct extent_changeset * changeset)549 int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
550 		       u32 bits, struct extent_state **cached_state,
551 		       gfp_t mask, struct extent_changeset *changeset)
552 {
553 	struct extent_state *state;
554 	struct extent_state *cached;
555 	struct extent_state *prealloc = NULL;
556 	u64 last_end;
557 	int err;
558 	int clear = 0;
559 	int wake;
560 	int delete = (bits & EXTENT_CLEAR_ALL_BITS);
561 
562 	btrfs_debug_check_extent_io_range(tree, start, end);
563 	trace_btrfs_clear_extent_bit(tree, start, end - start + 1, bits);
564 
565 	if (delete)
566 		bits |= ~EXTENT_CTLBITS;
567 
568 	if (bits & EXTENT_DELALLOC)
569 		bits |= EXTENT_NORESERVE;
570 
571 	wake = (bits & EXTENT_LOCKED) ? 1 : 0;
572 	if (bits & (EXTENT_LOCKED | EXTENT_BOUNDARY))
573 		clear = 1;
574 again:
575 	if (!prealloc) {
576 		/*
577 		 * Don't care for allocation failure here because we might end
578 		 * up not needing the pre-allocated extent state at all, which
579 		 * is the case if we only have in the tree extent states that
580 		 * cover our input range and don't cover too any other range.
581 		 * If we end up needing a new extent state we allocate it later.
582 		 */
583 		prealloc = alloc_extent_state(mask);
584 	}
585 
586 	spin_lock(&tree->lock);
587 	if (cached_state) {
588 		cached = *cached_state;
589 
590 		if (clear) {
591 			*cached_state = NULL;
592 			cached_state = NULL;
593 		}
594 
595 		if (cached && extent_state_in_tree(cached) &&
596 		    cached->start <= start && cached->end > start) {
597 			if (clear)
598 				refcount_dec(&cached->refs);
599 			state = cached;
600 			goto hit_next;
601 		}
602 		if (clear)
603 			free_extent_state(cached);
604 	}
605 
606 	/* This search will find the extents that end after our range starts. */
607 	state = tree_search(tree, start);
608 	if (!state)
609 		goto out;
610 hit_next:
611 	if (state->start > end)
612 		goto out;
613 	WARN_ON(state->end < start);
614 	last_end = state->end;
615 
616 	/* The state doesn't have the wanted bits, go ahead. */
617 	if (!(state->state & bits)) {
618 		state = next_state(state);
619 		goto next;
620 	}
621 
622 	/*
623 	 *     | ---- desired range ---- |
624 	 *  | state | or
625 	 *  | ------------- state -------------- |
626 	 *
627 	 * We need to split the extent we found, and may flip bits on second
628 	 * half.
629 	 *
630 	 * If the extent we found extends past our range, we just split and
631 	 * search again.  It'll get split again the next time though.
632 	 *
633 	 * If the extent we found is inside our range, we clear the desired bit
634 	 * on it.
635 	 */
636 
637 	if (state->start < start) {
638 		prealloc = alloc_extent_state_atomic(prealloc);
639 		if (!prealloc)
640 			goto search_again;
641 		err = split_state(tree, state, prealloc, start);
642 		if (err)
643 			extent_io_tree_panic(tree, err);
644 
645 		prealloc = NULL;
646 		if (err)
647 			goto out;
648 		if (state->end <= end) {
649 			state = clear_state_bit(tree, state, bits, wake, changeset);
650 			goto next;
651 		}
652 		goto search_again;
653 	}
654 	/*
655 	 * | ---- desired range ---- |
656 	 *                        | state |
657 	 * We need to split the extent, and clear the bit on the first half.
658 	 */
659 	if (state->start <= end && state->end > end) {
660 		prealloc = alloc_extent_state_atomic(prealloc);
661 		if (!prealloc)
662 			goto search_again;
663 		err = split_state(tree, state, prealloc, end + 1);
664 		if (err)
665 			extent_io_tree_panic(tree, err);
666 
667 		if (wake)
668 			wake_up(&state->wq);
669 
670 		clear_state_bit(tree, prealloc, bits, wake, changeset);
671 
672 		prealloc = NULL;
673 		goto out;
674 	}
675 
676 	state = clear_state_bit(tree, state, bits, wake, changeset);
677 next:
678 	if (last_end == (u64)-1)
679 		goto out;
680 	start = last_end + 1;
681 	if (start <= end && state && !need_resched())
682 		goto hit_next;
683 
684 search_again:
685 	if (start > end)
686 		goto out;
687 	spin_unlock(&tree->lock);
688 	if (gfpflags_allow_blocking(mask))
689 		cond_resched();
690 	goto again;
691 
692 out:
693 	spin_unlock(&tree->lock);
694 	if (prealloc)
695 		free_extent_state(prealloc);
696 
697 	return 0;
698 
699 }
700 
wait_on_state(struct extent_io_tree * tree,struct extent_state * state)701 static void wait_on_state(struct extent_io_tree *tree,
702 			  struct extent_state *state)
703 		__releases(tree->lock)
704 		__acquires(tree->lock)
705 {
706 	DEFINE_WAIT(wait);
707 	prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
708 	spin_unlock(&tree->lock);
709 	schedule();
710 	spin_lock(&tree->lock);
711 	finish_wait(&state->wq, &wait);
712 }
713 
714 /*
715  * Wait for one or more bits to clear on a range in the state tree.
716  * The range [start, end] is inclusive.
717  * The tree lock is taken by this function
718  */
wait_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits)719 void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits)
720 {
721 	struct extent_state *state;
722 
723 	btrfs_debug_check_extent_io_range(tree, start, end);
724 
725 	spin_lock(&tree->lock);
726 again:
727 	while (1) {
728 		/*
729 		 * This search will find all the extents that end after our
730 		 * range starts.
731 		 */
732 		state = tree_search(tree, start);
733 process_node:
734 		if (!state)
735 			break;
736 		if (state->start > end)
737 			goto out;
738 
739 		if (state->state & bits) {
740 			start = state->start;
741 			refcount_inc(&state->refs);
742 			wait_on_state(tree, state);
743 			free_extent_state(state);
744 			goto again;
745 		}
746 		start = state->end + 1;
747 
748 		if (start > end)
749 			break;
750 
751 		if (!cond_resched_lock(&tree->lock)) {
752 			state = next_state(state);
753 			goto process_node;
754 		}
755 	}
756 out:
757 	spin_unlock(&tree->lock);
758 }
759 
cache_state_if_flags(struct extent_state * state,struct extent_state ** cached_ptr,unsigned flags)760 static void cache_state_if_flags(struct extent_state *state,
761 				 struct extent_state **cached_ptr,
762 				 unsigned flags)
763 {
764 	if (cached_ptr && !(*cached_ptr)) {
765 		if (!flags || (state->state & flags)) {
766 			*cached_ptr = state;
767 			refcount_inc(&state->refs);
768 		}
769 	}
770 }
771 
cache_state(struct extent_state * state,struct extent_state ** cached_ptr)772 static void cache_state(struct extent_state *state,
773 			struct extent_state **cached_ptr)
774 {
775 	return cache_state_if_flags(state, cached_ptr,
776 				    EXTENT_LOCKED | EXTENT_BOUNDARY);
777 }
778 
779 /*
780  * Find the first state struct with 'bits' set after 'start', and return it.
781  * tree->lock must be held.  NULL will returned if nothing was found after
782  * 'start'.
783  */
find_first_extent_bit_state(struct extent_io_tree * tree,u64 start,u32 bits)784 static struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
785 							u64 start, u32 bits)
786 {
787 	struct extent_state *state;
788 
789 	/*
790 	 * This search will find all the extents that end after our range
791 	 * starts.
792 	 */
793 	state = tree_search(tree, start);
794 	while (state) {
795 		if (state->end >= start && (state->state & bits))
796 			return state;
797 		state = next_state(state);
798 	}
799 	return NULL;
800 }
801 
802 /*
803  * Find the first offset in the io tree with one or more @bits set.
804  *
805  * Note: If there are multiple bits set in @bits, any of them will match.
806  *
807  * Return 0 if we find something, and update @start_ret and @end_ret.
808  * Return 1 if we found nothing.
809  */
find_first_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,u32 bits,struct extent_state ** cached_state)810 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
811 			  u64 *start_ret, u64 *end_ret, u32 bits,
812 			  struct extent_state **cached_state)
813 {
814 	struct extent_state *state;
815 	int ret = 1;
816 
817 	spin_lock(&tree->lock);
818 	if (cached_state && *cached_state) {
819 		state = *cached_state;
820 		if (state->end == start - 1 && extent_state_in_tree(state)) {
821 			while ((state = next_state(state)) != NULL) {
822 				if (state->state & bits)
823 					goto got_it;
824 			}
825 			free_extent_state(*cached_state);
826 			*cached_state = NULL;
827 			goto out;
828 		}
829 		free_extent_state(*cached_state);
830 		*cached_state = NULL;
831 	}
832 
833 	state = find_first_extent_bit_state(tree, start, bits);
834 got_it:
835 	if (state) {
836 		cache_state_if_flags(state, cached_state, 0);
837 		*start_ret = state->start;
838 		*end_ret = state->end;
839 		ret = 0;
840 	}
841 out:
842 	spin_unlock(&tree->lock);
843 	return ret;
844 }
845 
846 /*
847  * Find a contiguous area of bits
848  *
849  * @tree:      io tree to check
850  * @start:     offset to start the search from
851  * @start_ret: the first offset we found with the bits set
852  * @end_ret:   the final contiguous range of the bits that were set
853  * @bits:      bits to look for
854  *
855  * set_extent_bit and clear_extent_bit can temporarily split contiguous ranges
856  * to set bits appropriately, and then merge them again.  During this time it
857  * will drop the tree->lock, so use this helper if you want to find the actual
858  * contiguous area for given bits.  We will search to the first bit we find, and
859  * then walk down the tree until we find a non-contiguous area.  The area
860  * returned will be the full contiguous area with the bits set.
861  */
find_contiguous_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,u32 bits)862 int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start,
863 			       u64 *start_ret, u64 *end_ret, u32 bits)
864 {
865 	struct extent_state *state;
866 	int ret = 1;
867 
868 	spin_lock(&tree->lock);
869 	state = find_first_extent_bit_state(tree, start, bits);
870 	if (state) {
871 		*start_ret = state->start;
872 		*end_ret = state->end;
873 		while ((state = next_state(state)) != NULL) {
874 			if (state->start > (*end_ret + 1))
875 				break;
876 			*end_ret = state->end;
877 		}
878 		ret = 0;
879 	}
880 	spin_unlock(&tree->lock);
881 	return ret;
882 }
883 
884 /*
885  * Find a contiguous range of bytes in the file marked as delalloc, not more
886  * than 'max_bytes'.  start and end are used to return the range,
887  *
888  * True is returned if we find something, false if nothing was in the tree.
889  */
btrfs_find_delalloc_range(struct extent_io_tree * tree,u64 * start,u64 * end,u64 max_bytes,struct extent_state ** cached_state)890 bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
891 			       u64 *end, u64 max_bytes,
892 			       struct extent_state **cached_state)
893 {
894 	struct extent_state *state;
895 	u64 cur_start = *start;
896 	bool found = false;
897 	u64 total_bytes = 0;
898 
899 	spin_lock(&tree->lock);
900 
901 	/*
902 	 * This search will find all the extents that end after our range
903 	 * starts.
904 	 */
905 	state = tree_search(tree, cur_start);
906 	if (!state) {
907 		*end = (u64)-1;
908 		goto out;
909 	}
910 
911 	while (state) {
912 		if (found && (state->start != cur_start ||
913 			      (state->state & EXTENT_BOUNDARY))) {
914 			goto out;
915 		}
916 		if (!(state->state & EXTENT_DELALLOC)) {
917 			if (!found)
918 				*end = state->end;
919 			goto out;
920 		}
921 		if (!found) {
922 			*start = state->start;
923 			*cached_state = state;
924 			refcount_inc(&state->refs);
925 		}
926 		found = true;
927 		*end = state->end;
928 		cur_start = state->end + 1;
929 		total_bytes += state->end - state->start + 1;
930 		if (total_bytes >= max_bytes)
931 			break;
932 		state = next_state(state);
933 	}
934 out:
935 	spin_unlock(&tree->lock);
936 	return found;
937 }
938 
939 /*
940  * Set some bits on a range in the tree.  This may require allocations or
941  * sleeping, so the gfp mask is used to indicate what is allowed.
942  *
943  * If any of the exclusive bits are set, this will fail with -EEXIST if some
944  * part of the range already has the desired bits set.  The start of the
945  * existing range is returned in failed_start in this case.
946  *
947  * [start, end] is inclusive This takes the tree lock.
948  */
__set_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,u64 * failed_start,struct extent_state ** cached_state,struct extent_changeset * changeset,gfp_t mask)949 static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
950 			    u32 bits, u64 *failed_start,
951 			    struct extent_state **cached_state,
952 			    struct extent_changeset *changeset, gfp_t mask)
953 {
954 	struct extent_state *state;
955 	struct extent_state *prealloc = NULL;
956 	struct rb_node **p;
957 	struct rb_node *parent;
958 	int err = 0;
959 	u64 last_start;
960 	u64 last_end;
961 	u32 exclusive_bits = (bits & EXTENT_LOCKED);
962 
963 	btrfs_debug_check_extent_io_range(tree, start, end);
964 	trace_btrfs_set_extent_bit(tree, start, end - start + 1, bits);
965 
966 	if (exclusive_bits)
967 		ASSERT(failed_start);
968 	else
969 		ASSERT(failed_start == NULL);
970 again:
971 	if (!prealloc) {
972 		/*
973 		 * Don't care for allocation failure here because we might end
974 		 * up not needing the pre-allocated extent state at all, which
975 		 * is the case if we only have in the tree extent states that
976 		 * cover our input range and don't cover too any other range.
977 		 * If we end up needing a new extent state we allocate it later.
978 		 */
979 		prealloc = alloc_extent_state(mask);
980 	}
981 
982 	spin_lock(&tree->lock);
983 	if (cached_state && *cached_state) {
984 		state = *cached_state;
985 		if (state->start <= start && state->end > start &&
986 		    extent_state_in_tree(state))
987 			goto hit_next;
988 	}
989 	/*
990 	 * This search will find all the extents that end after our range
991 	 * starts.
992 	 */
993 	state = tree_search_for_insert(tree, start, &p, &parent);
994 	if (!state) {
995 		prealloc = alloc_extent_state_atomic(prealloc);
996 		if (!prealloc)
997 			goto search_again;
998 		prealloc->start = start;
999 		prealloc->end = end;
1000 		insert_state_fast(tree, prealloc, p, parent, bits, changeset);
1001 		cache_state(prealloc, cached_state);
1002 		prealloc = NULL;
1003 		goto out;
1004 	}
1005 hit_next:
1006 	last_start = state->start;
1007 	last_end = state->end;
1008 
1009 	/*
1010 	 * | ---- desired range ---- |
1011 	 * | state |
1012 	 *
1013 	 * Just lock what we found and keep going
1014 	 */
1015 	if (state->start == start && state->end <= end) {
1016 		if (state->state & exclusive_bits) {
1017 			*failed_start = state->start;
1018 			err = -EEXIST;
1019 			goto out;
1020 		}
1021 
1022 		set_state_bits(tree, state, bits, changeset);
1023 		cache_state(state, cached_state);
1024 		merge_state(tree, state);
1025 		if (last_end == (u64)-1)
1026 			goto out;
1027 		start = last_end + 1;
1028 		state = next_state(state);
1029 		if (start < end && state && state->start == start &&
1030 		    !need_resched())
1031 			goto hit_next;
1032 		goto search_again;
1033 	}
1034 
1035 	/*
1036 	 *     | ---- desired range ---- |
1037 	 * | state |
1038 	 *   or
1039 	 * | ------------- state -------------- |
1040 	 *
1041 	 * We need to split the extent we found, and may flip bits on second
1042 	 * half.
1043 	 *
1044 	 * If the extent we found extends past our range, we just split and
1045 	 * search again.  It'll get split again the next time though.
1046 	 *
1047 	 * If the extent we found is inside our range, we set the desired bit
1048 	 * on it.
1049 	 */
1050 	if (state->start < start) {
1051 		if (state->state & exclusive_bits) {
1052 			*failed_start = start;
1053 			err = -EEXIST;
1054 			goto out;
1055 		}
1056 
1057 		/*
1058 		 * If this extent already has all the bits we want set, then
1059 		 * skip it, not necessary to split it or do anything with it.
1060 		 */
1061 		if ((state->state & bits) == bits) {
1062 			start = state->end + 1;
1063 			cache_state(state, cached_state);
1064 			goto search_again;
1065 		}
1066 
1067 		prealloc = alloc_extent_state_atomic(prealloc);
1068 		if (!prealloc)
1069 			goto search_again;
1070 		err = split_state(tree, state, prealloc, start);
1071 		if (err)
1072 			extent_io_tree_panic(tree, err);
1073 
1074 		prealloc = NULL;
1075 		if (err)
1076 			goto out;
1077 		if (state->end <= end) {
1078 			set_state_bits(tree, state, bits, changeset);
1079 			cache_state(state, cached_state);
1080 			merge_state(tree, state);
1081 			if (last_end == (u64)-1)
1082 				goto out;
1083 			start = last_end + 1;
1084 			state = next_state(state);
1085 			if (start < end && state && state->start == start &&
1086 			    !need_resched())
1087 				goto hit_next;
1088 		}
1089 		goto search_again;
1090 	}
1091 	/*
1092 	 * | ---- desired range ---- |
1093 	 *     | state | or               | state |
1094 	 *
1095 	 * There's a hole, we need to insert something in it and ignore the
1096 	 * extent we found.
1097 	 */
1098 	if (state->start > start) {
1099 		u64 this_end;
1100 		if (end < last_start)
1101 			this_end = end;
1102 		else
1103 			this_end = last_start - 1;
1104 
1105 		prealloc = alloc_extent_state_atomic(prealloc);
1106 		if (!prealloc)
1107 			goto search_again;
1108 
1109 		/*
1110 		 * Avoid to free 'prealloc' if it can be merged with the later
1111 		 * extent.
1112 		 */
1113 		prealloc->start = start;
1114 		prealloc->end = this_end;
1115 		err = insert_state(tree, prealloc, bits, changeset);
1116 		if (err)
1117 			extent_io_tree_panic(tree, err);
1118 
1119 		cache_state(prealloc, cached_state);
1120 		prealloc = NULL;
1121 		start = this_end + 1;
1122 		goto search_again;
1123 	}
1124 	/*
1125 	 * | ---- desired range ---- |
1126 	 *                        | state |
1127 	 *
1128 	 * We need to split the extent, and set the bit on the first half
1129 	 */
1130 	if (state->start <= end && state->end > end) {
1131 		if (state->state & exclusive_bits) {
1132 			*failed_start = start;
1133 			err = -EEXIST;
1134 			goto out;
1135 		}
1136 
1137 		prealloc = alloc_extent_state_atomic(prealloc);
1138 		if (!prealloc)
1139 			goto search_again;
1140 		err = split_state(tree, state, prealloc, end + 1);
1141 		if (err)
1142 			extent_io_tree_panic(tree, err);
1143 
1144 		set_state_bits(tree, prealloc, bits, changeset);
1145 		cache_state(prealloc, cached_state);
1146 		merge_state(tree, prealloc);
1147 		prealloc = NULL;
1148 		goto out;
1149 	}
1150 
1151 search_again:
1152 	if (start > end)
1153 		goto out;
1154 	spin_unlock(&tree->lock);
1155 	if (gfpflags_allow_blocking(mask))
1156 		cond_resched();
1157 	goto again;
1158 
1159 out:
1160 	spin_unlock(&tree->lock);
1161 	if (prealloc)
1162 		free_extent_state(prealloc);
1163 
1164 	return err;
1165 
1166 }
1167 
set_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_state ** cached_state,gfp_t mask)1168 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1169 		   u32 bits, struct extent_state **cached_state, gfp_t mask)
1170 {
1171 	return __set_extent_bit(tree, start, end, bits, NULL, cached_state,
1172 				NULL, mask);
1173 }
1174 
1175 /*
1176  * Convert all bits in a given range from one bit to another
1177  *
1178  * @tree:	the io tree to search
1179  * @start:	the start offset in bytes
1180  * @end:	the end offset in bytes (inclusive)
1181  * @bits:	the bits to set in this range
1182  * @clear_bits:	the bits to clear in this range
1183  * @cached_state:	state that we're going to cache
1184  *
1185  * This will go through and set bits for the given range.  If any states exist
1186  * already in this range they are set with the given bit and cleared of the
1187  * clear_bits.  This is only meant to be used by things that are mergeable, ie.
1188  * converting from say DELALLOC to DIRTY.  This is not meant to be used with
1189  * boundary bits like LOCK.
1190  *
1191  * All allocations are done with GFP_NOFS.
1192  */
convert_extent_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,u32 clear_bits,struct extent_state ** cached_state)1193 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1194 		       u32 bits, u32 clear_bits,
1195 		       struct extent_state **cached_state)
1196 {
1197 	struct extent_state *state;
1198 	struct extent_state *prealloc = NULL;
1199 	struct rb_node **p;
1200 	struct rb_node *parent;
1201 	int err = 0;
1202 	u64 last_start;
1203 	u64 last_end;
1204 	bool first_iteration = true;
1205 
1206 	btrfs_debug_check_extent_io_range(tree, start, end);
1207 	trace_btrfs_convert_extent_bit(tree, start, end - start + 1, bits,
1208 				       clear_bits);
1209 
1210 again:
1211 	if (!prealloc) {
1212 		/*
1213 		 * Best effort, don't worry if extent state allocation fails
1214 		 * here for the first iteration. We might have a cached state
1215 		 * that matches exactly the target range, in which case no
1216 		 * extent state allocations are needed. We'll only know this
1217 		 * after locking the tree.
1218 		 */
1219 		prealloc = alloc_extent_state(GFP_NOFS);
1220 		if (!prealloc && !first_iteration)
1221 			return -ENOMEM;
1222 	}
1223 
1224 	spin_lock(&tree->lock);
1225 	if (cached_state && *cached_state) {
1226 		state = *cached_state;
1227 		if (state->start <= start && state->end > start &&
1228 		    extent_state_in_tree(state))
1229 			goto hit_next;
1230 	}
1231 
1232 	/*
1233 	 * This search will find all the extents that end after our range
1234 	 * starts.
1235 	 */
1236 	state = tree_search_for_insert(tree, start, &p, &parent);
1237 	if (!state) {
1238 		prealloc = alloc_extent_state_atomic(prealloc);
1239 		if (!prealloc) {
1240 			err = -ENOMEM;
1241 			goto out;
1242 		}
1243 		prealloc->start = start;
1244 		prealloc->end = end;
1245 		insert_state_fast(tree, prealloc, p, parent, bits, NULL);
1246 		cache_state(prealloc, cached_state);
1247 		prealloc = NULL;
1248 		goto out;
1249 	}
1250 hit_next:
1251 	last_start = state->start;
1252 	last_end = state->end;
1253 
1254 	/*
1255 	 * | ---- desired range ---- |
1256 	 * | state |
1257 	 *
1258 	 * Just lock what we found and keep going.
1259 	 */
1260 	if (state->start == start && state->end <= end) {
1261 		set_state_bits(tree, state, bits, NULL);
1262 		cache_state(state, cached_state);
1263 		state = clear_state_bit(tree, state, clear_bits, 0, NULL);
1264 		if (last_end == (u64)-1)
1265 			goto out;
1266 		start = last_end + 1;
1267 		if (start < end && state && state->start == start &&
1268 		    !need_resched())
1269 			goto hit_next;
1270 		goto search_again;
1271 	}
1272 
1273 	/*
1274 	 *     | ---- desired range ---- |
1275 	 * | state |
1276 	 *   or
1277 	 * | ------------- state -------------- |
1278 	 *
1279 	 * We need to split the extent we found, and may flip bits on second
1280 	 * half.
1281 	 *
1282 	 * If the extent we found extends past our range, we just split and
1283 	 * search again.  It'll get split again the next time though.
1284 	 *
1285 	 * If the extent we found is inside our range, we set the desired bit
1286 	 * on it.
1287 	 */
1288 	if (state->start < start) {
1289 		prealloc = alloc_extent_state_atomic(prealloc);
1290 		if (!prealloc) {
1291 			err = -ENOMEM;
1292 			goto out;
1293 		}
1294 		err = split_state(tree, state, prealloc, start);
1295 		if (err)
1296 			extent_io_tree_panic(tree, err);
1297 		prealloc = NULL;
1298 		if (err)
1299 			goto out;
1300 		if (state->end <= end) {
1301 			set_state_bits(tree, state, bits, NULL);
1302 			cache_state(state, cached_state);
1303 			state = clear_state_bit(tree, state, clear_bits, 0, NULL);
1304 			if (last_end == (u64)-1)
1305 				goto out;
1306 			start = last_end + 1;
1307 			if (start < end && state && state->start == start &&
1308 			    !need_resched())
1309 				goto hit_next;
1310 		}
1311 		goto search_again;
1312 	}
1313 	/*
1314 	 * | ---- desired range ---- |
1315 	 *     | state | or               | state |
1316 	 *
1317 	 * There's a hole, we need to insert something in it and ignore the
1318 	 * extent we found.
1319 	 */
1320 	if (state->start > start) {
1321 		u64 this_end;
1322 		if (end < last_start)
1323 			this_end = end;
1324 		else
1325 			this_end = last_start - 1;
1326 
1327 		prealloc = alloc_extent_state_atomic(prealloc);
1328 		if (!prealloc) {
1329 			err = -ENOMEM;
1330 			goto out;
1331 		}
1332 
1333 		/*
1334 		 * Avoid to free 'prealloc' if it can be merged with the later
1335 		 * extent.
1336 		 */
1337 		prealloc->start = start;
1338 		prealloc->end = this_end;
1339 		err = insert_state(tree, prealloc, bits, NULL);
1340 		if (err)
1341 			extent_io_tree_panic(tree, err);
1342 		cache_state(prealloc, cached_state);
1343 		prealloc = NULL;
1344 		start = this_end + 1;
1345 		goto search_again;
1346 	}
1347 	/*
1348 	 * | ---- desired range ---- |
1349 	 *                        | state |
1350 	 *
1351 	 * We need to split the extent, and set the bit on the first half.
1352 	 */
1353 	if (state->start <= end && state->end > end) {
1354 		prealloc = alloc_extent_state_atomic(prealloc);
1355 		if (!prealloc) {
1356 			err = -ENOMEM;
1357 			goto out;
1358 		}
1359 
1360 		err = split_state(tree, state, prealloc, end + 1);
1361 		if (err)
1362 			extent_io_tree_panic(tree, err);
1363 
1364 		set_state_bits(tree, prealloc, bits, NULL);
1365 		cache_state(prealloc, cached_state);
1366 		clear_state_bit(tree, prealloc, clear_bits, 0, NULL);
1367 		prealloc = NULL;
1368 		goto out;
1369 	}
1370 
1371 search_again:
1372 	if (start > end)
1373 		goto out;
1374 	spin_unlock(&tree->lock);
1375 	cond_resched();
1376 	first_iteration = false;
1377 	goto again;
1378 
1379 out:
1380 	spin_unlock(&tree->lock);
1381 	if (prealloc)
1382 		free_extent_state(prealloc);
1383 
1384 	return err;
1385 }
1386 
1387 /*
1388  * Find the first range that has @bits not set. This range could start before
1389  * @start.
1390  *
1391  * @tree:      the tree to search
1392  * @start:     offset at/after which the found extent should start
1393  * @start_ret: records the beginning of the range
1394  * @end_ret:   records the end of the range (inclusive)
1395  * @bits:      the set of bits which must be unset
1396  *
1397  * Since unallocated range is also considered one which doesn't have the bits
1398  * set it's possible that @end_ret contains -1, this happens in case the range
1399  * spans (last_range_end, end of device]. In this case it's up to the caller to
1400  * trim @end_ret to the appropriate size.
1401  */
find_first_clear_extent_bit(struct extent_io_tree * tree,u64 start,u64 * start_ret,u64 * end_ret,u32 bits)1402 void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
1403 				 u64 *start_ret, u64 *end_ret, u32 bits)
1404 {
1405 	struct extent_state *state;
1406 	struct extent_state *prev = NULL, *next;
1407 
1408 	spin_lock(&tree->lock);
1409 
1410 	/* Find first extent with bits cleared */
1411 	while (1) {
1412 		state = tree_search_prev_next(tree, start, &prev, &next);
1413 		if (!state && !next && !prev) {
1414 			/*
1415 			 * Tree is completely empty, send full range and let
1416 			 * caller deal with it
1417 			 */
1418 			*start_ret = 0;
1419 			*end_ret = -1;
1420 			goto out;
1421 		} else if (!state && !next) {
1422 			/*
1423 			 * We are past the last allocated chunk, set start at
1424 			 * the end of the last extent.
1425 			 */
1426 			*start_ret = prev->end + 1;
1427 			*end_ret = -1;
1428 			goto out;
1429 		} else if (!state) {
1430 			state = next;
1431 		}
1432 
1433 		/*
1434 		 * At this point 'state' either contains 'start' or start is
1435 		 * before 'state'
1436 		 */
1437 		if (in_range(start, state->start, state->end - state->start + 1)) {
1438 			if (state->state & bits) {
1439 				/*
1440 				 * |--range with bits sets--|
1441 				 *    |
1442 				 *    start
1443 				 */
1444 				start = state->end + 1;
1445 			} else {
1446 				/*
1447 				 * 'start' falls within a range that doesn't
1448 				 * have the bits set, so take its start as the
1449 				 * beginning of the desired range
1450 				 *
1451 				 * |--range with bits cleared----|
1452 				 *      |
1453 				 *      start
1454 				 */
1455 				*start_ret = state->start;
1456 				break;
1457 			}
1458 		} else {
1459 			/*
1460 			 * |---prev range---|---hole/unset---|---node range---|
1461 			 *                          |
1462 			 *                        start
1463 			 *
1464 			 *                        or
1465 			 *
1466 			 * |---hole/unset--||--first node--|
1467 			 * 0   |
1468 			 *    start
1469 			 */
1470 			if (prev)
1471 				*start_ret = prev->end + 1;
1472 			else
1473 				*start_ret = 0;
1474 			break;
1475 		}
1476 	}
1477 
1478 	/*
1479 	 * Find the longest stretch from start until an entry which has the
1480 	 * bits set
1481 	 */
1482 	while (state) {
1483 		if (state->end >= start && !(state->state & bits)) {
1484 			*end_ret = state->end;
1485 		} else {
1486 			*end_ret = state->start - 1;
1487 			break;
1488 		}
1489 		state = next_state(state);
1490 	}
1491 out:
1492 	spin_unlock(&tree->lock);
1493 }
1494 
1495 /*
1496  * Count the number of bytes in the tree that have a given bit(s) set.  This
1497  * can be fairly slow, except for EXTENT_DIRTY which is cached.  The total
1498  * number found is returned.
1499  */
count_range_bits(struct extent_io_tree * tree,u64 * start,u64 search_end,u64 max_bytes,u32 bits,int contig)1500 u64 count_range_bits(struct extent_io_tree *tree,
1501 		     u64 *start, u64 search_end, u64 max_bytes,
1502 		     u32 bits, int contig)
1503 {
1504 	struct extent_state *state;
1505 	u64 cur_start = *start;
1506 	u64 total_bytes = 0;
1507 	u64 last = 0;
1508 	int found = 0;
1509 
1510 	if (WARN_ON(search_end < cur_start))
1511 		return 0;
1512 
1513 	spin_lock(&tree->lock);
1514 
1515 	/*
1516 	 * This search will find all the extents that end after our range
1517 	 * starts.
1518 	 */
1519 	state = tree_search(tree, cur_start);
1520 	while (state) {
1521 		if (state->start > search_end)
1522 			break;
1523 		if (contig && found && state->start > last + 1)
1524 			break;
1525 		if (state->end >= cur_start && (state->state & bits) == bits) {
1526 			total_bytes += min(search_end, state->end) + 1 -
1527 				       max(cur_start, state->start);
1528 			if (total_bytes >= max_bytes)
1529 				break;
1530 			if (!found) {
1531 				*start = max(cur_start, state->start);
1532 				found = 1;
1533 			}
1534 			last = state->end;
1535 		} else if (contig && found) {
1536 			break;
1537 		}
1538 		state = next_state(state);
1539 	}
1540 	spin_unlock(&tree->lock);
1541 	return total_bytes;
1542 }
1543 
1544 /*
1545  * Searche a range in the state tree for a given mask.  If 'filled' == 1, this
1546  * returns 1 only if every extent in the tree has the bits set.  Otherwise, 1
1547  * is returned if any bit in the range is found set.
1548  */
test_range_bit(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,int filled,struct extent_state * cached)1549 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
1550 		   u32 bits, int filled, struct extent_state *cached)
1551 {
1552 	struct extent_state *state = NULL;
1553 	int bitset = 0;
1554 
1555 	spin_lock(&tree->lock);
1556 	if (cached && extent_state_in_tree(cached) && cached->start <= start &&
1557 	    cached->end > start)
1558 		state = cached;
1559 	else
1560 		state = tree_search(tree, start);
1561 	while (state && start <= end) {
1562 		if (filled && state->start > start) {
1563 			bitset = 0;
1564 			break;
1565 		}
1566 
1567 		if (state->start > end)
1568 			break;
1569 
1570 		if (state->state & bits) {
1571 			bitset = 1;
1572 			if (!filled)
1573 				break;
1574 		} else if (filled) {
1575 			bitset = 0;
1576 			break;
1577 		}
1578 
1579 		if (state->end == (u64)-1)
1580 			break;
1581 
1582 		start = state->end + 1;
1583 		if (start > end)
1584 			break;
1585 		state = next_state(state);
1586 	}
1587 
1588 	/* We ran out of states and were still inside of our range. */
1589 	if (filled && !state)
1590 		bitset = 0;
1591 	spin_unlock(&tree->lock);
1592 	return bitset;
1593 }
1594 
1595 /* Wrappers around set/clear extent bit */
set_record_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_changeset * changeset)1596 int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1597 			   u32 bits, struct extent_changeset *changeset)
1598 {
1599 	/*
1600 	 * We don't support EXTENT_LOCKED yet, as current changeset will
1601 	 * record any bits changed, so for EXTENT_LOCKED case, it will
1602 	 * either fail with -EEXIST or changeset will record the whole
1603 	 * range.
1604 	 */
1605 	ASSERT(!(bits & EXTENT_LOCKED));
1606 
1607 	return __set_extent_bit(tree, start, end, bits, NULL, NULL, changeset,
1608 				GFP_NOFS);
1609 }
1610 
clear_record_extent_bits(struct extent_io_tree * tree,u64 start,u64 end,u32 bits,struct extent_changeset * changeset)1611 int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
1612 			     u32 bits, struct extent_changeset *changeset)
1613 {
1614 	/*
1615 	 * Don't support EXTENT_LOCKED case, same reason as
1616 	 * set_record_extent_bits().
1617 	 */
1618 	ASSERT(!(bits & EXTENT_LOCKED));
1619 
1620 	return __clear_extent_bit(tree, start, end, bits, NULL, GFP_NOFS,
1621 				  changeset);
1622 }
1623 
try_lock_extent(struct extent_io_tree * tree,u64 start,u64 end)1624 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
1625 {
1626 	int err;
1627 	u64 failed_start;
1628 
1629 	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1630 			       NULL, NULL, GFP_NOFS);
1631 	if (err == -EEXIST) {
1632 		if (failed_start > start)
1633 			clear_extent_bit(tree, start, failed_start - 1,
1634 					 EXTENT_LOCKED, NULL);
1635 		return 0;
1636 	}
1637 	return 1;
1638 }
1639 
1640 /*
1641  * Either insert or lock state struct between start and end use mask to tell
1642  * us if waiting is desired.
1643  */
lock_extent(struct extent_io_tree * tree,u64 start,u64 end,struct extent_state ** cached_state)1644 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
1645 		struct extent_state **cached_state)
1646 {
1647 	int err;
1648 	u64 failed_start;
1649 
1650 	err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start,
1651 			       cached_state, NULL, GFP_NOFS);
1652 	while (err == -EEXIST) {
1653 		if (failed_start != start)
1654 			clear_extent_bit(tree, start, failed_start - 1,
1655 					 EXTENT_LOCKED, cached_state);
1656 
1657 		wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
1658 		err = __set_extent_bit(tree, start, end, EXTENT_LOCKED,
1659 				       &failed_start, cached_state, NULL,
1660 				       GFP_NOFS);
1661 	}
1662 	return err;
1663 }
1664 
extent_state_free_cachep(void)1665 void __cold extent_state_free_cachep(void)
1666 {
1667 	btrfs_extent_state_leak_debug_check();
1668 	kmem_cache_destroy(extent_state_cache);
1669 }
1670 
extent_state_init_cachep(void)1671 int __init extent_state_init_cachep(void)
1672 {
1673 	extent_state_cache = kmem_cache_create("btrfs_extent_state",
1674 			sizeof(struct extent_state), 0,
1675 			SLAB_MEM_SPREAD, NULL);
1676 	if (!extent_state_cache)
1677 		return -ENOMEM;
1678 
1679 	return 0;
1680 }
1681