• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "math.h"
37 #include "sysfs.h"
38 #include "qgroup.h"
39 
40 #undef SCRAMBLE_DELAYED_REFS
41 
42 /*
43  * control flags for do_chunk_alloc's force field
44  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
45  * if we really need one.
46  *
47  * CHUNK_ALLOC_LIMITED means to only try and allocate one
48  * if we have very few chunks already allocated.  This is
49  * used as part of the clustering code to help make sure
50  * we have a good pool of storage to cluster in, without
51  * filling the FS with empty chunks
52  *
53  * CHUNK_ALLOC_FORCE means it must try to allocate one
54  *
55  */
56 enum {
57 	CHUNK_ALLOC_NO_FORCE = 0,
58 	CHUNK_ALLOC_LIMITED = 1,
59 	CHUNK_ALLOC_FORCE = 2,
60 };
61 
62 /*
63  * Control how reservations are dealt with.
64  *
65  * RESERVE_FREE - freeing a reservation.
66  * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
67  *   ENOSPC accounting
68  * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
69  *   bytes_may_use as the ENOSPC accounting is done elsewhere
70  */
71 enum {
72 	RESERVE_FREE = 0,
73 	RESERVE_ALLOC = 1,
74 	RESERVE_ALLOC_NO_ACCOUNT = 2,
75 };
76 
77 static int update_block_group(struct btrfs_trans_handle *trans,
78 			      struct btrfs_root *root, u64 bytenr,
79 			      u64 num_bytes, int alloc);
80 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
81 				struct btrfs_root *root,
82 				struct btrfs_delayed_ref_node *node, u64 parent,
83 				u64 root_objectid, u64 owner_objectid,
84 				u64 owner_offset, int refs_to_drop,
85 				struct btrfs_delayed_extent_op *extra_op);
86 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
87 				    struct extent_buffer *leaf,
88 				    struct btrfs_extent_item *ei);
89 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
90 				      struct btrfs_root *root,
91 				      u64 parent, u64 root_objectid,
92 				      u64 flags, u64 owner, u64 offset,
93 				      struct btrfs_key *ins, int ref_mod);
94 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
95 				     struct btrfs_root *root,
96 				     u64 parent, u64 root_objectid,
97 				     u64 flags, struct btrfs_disk_key *key,
98 				     int level, struct btrfs_key *ins);
99 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
100 			  struct btrfs_root *extent_root, u64 flags,
101 			  int force);
102 static int find_next_key(struct btrfs_path *path, int level,
103 			 struct btrfs_key *key);
104 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
105 			    int dump_block_groups);
106 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
107 				       u64 num_bytes, int reserve,
108 				       int delalloc);
109 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
110 			       u64 num_bytes);
111 int btrfs_pin_extent(struct btrfs_root *root,
112 		     u64 bytenr, u64 num_bytes, int reserved);
113 
114 static noinline int
block_group_cache_done(struct btrfs_block_group_cache * cache)115 block_group_cache_done(struct btrfs_block_group_cache *cache)
116 {
117 	smp_mb();
118 	return cache->cached == BTRFS_CACHE_FINISHED ||
119 		cache->cached == BTRFS_CACHE_ERROR;
120 }
121 
block_group_bits(struct btrfs_block_group_cache * cache,u64 bits)122 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
123 {
124 	return (cache->flags & bits) == bits;
125 }
126 
btrfs_get_block_group(struct btrfs_block_group_cache * cache)127 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
128 {
129 	atomic_inc(&cache->count);
130 }
131 
btrfs_put_block_group(struct btrfs_block_group_cache * cache)132 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
133 {
134 	if (atomic_dec_and_test(&cache->count)) {
135 		WARN_ON(cache->pinned > 0);
136 		WARN_ON(cache->reserved > 0);
137 		kfree(cache->free_space_ctl);
138 		kfree(cache);
139 	}
140 }
141 
142 /*
143  * this adds the block group to the fs_info rb tree for the block group
144  * cache
145  */
btrfs_add_block_group_cache(struct btrfs_fs_info * info,struct btrfs_block_group_cache * block_group)146 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
147 				struct btrfs_block_group_cache *block_group)
148 {
149 	struct rb_node **p;
150 	struct rb_node *parent = NULL;
151 	struct btrfs_block_group_cache *cache;
152 
153 	spin_lock(&info->block_group_cache_lock);
154 	p = &info->block_group_cache_tree.rb_node;
155 
156 	while (*p) {
157 		parent = *p;
158 		cache = rb_entry(parent, struct btrfs_block_group_cache,
159 				 cache_node);
160 		if (block_group->key.objectid < cache->key.objectid) {
161 			p = &(*p)->rb_left;
162 		} else if (block_group->key.objectid > cache->key.objectid) {
163 			p = &(*p)->rb_right;
164 		} else {
165 			spin_unlock(&info->block_group_cache_lock);
166 			return -EEXIST;
167 		}
168 	}
169 
170 	rb_link_node(&block_group->cache_node, parent, p);
171 	rb_insert_color(&block_group->cache_node,
172 			&info->block_group_cache_tree);
173 
174 	if (info->first_logical_byte > block_group->key.objectid)
175 		info->first_logical_byte = block_group->key.objectid;
176 
177 	spin_unlock(&info->block_group_cache_lock);
178 
179 	return 0;
180 }
181 
182 /*
183  * This will return the block group at or after bytenr if contains is 0, else
184  * it will return the block group that contains the bytenr
185  */
186 static struct btrfs_block_group_cache *
block_group_cache_tree_search(struct btrfs_fs_info * info,u64 bytenr,int contains)187 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
188 			      int contains)
189 {
190 	struct btrfs_block_group_cache *cache, *ret = NULL;
191 	struct rb_node *n;
192 	u64 end, start;
193 
194 	spin_lock(&info->block_group_cache_lock);
195 	n = info->block_group_cache_tree.rb_node;
196 
197 	while (n) {
198 		cache = rb_entry(n, struct btrfs_block_group_cache,
199 				 cache_node);
200 		end = cache->key.objectid + cache->key.offset - 1;
201 		start = cache->key.objectid;
202 
203 		if (bytenr < start) {
204 			if (!contains && (!ret || start < ret->key.objectid))
205 				ret = cache;
206 			n = n->rb_left;
207 		} else if (bytenr > start) {
208 			if (contains && bytenr <= end) {
209 				ret = cache;
210 				break;
211 			}
212 			n = n->rb_right;
213 		} else {
214 			ret = cache;
215 			break;
216 		}
217 	}
218 	if (ret) {
219 		btrfs_get_block_group(ret);
220 		if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
221 			info->first_logical_byte = ret->key.objectid;
222 	}
223 	spin_unlock(&info->block_group_cache_lock);
224 
225 	return ret;
226 }
227 
add_excluded_extent(struct btrfs_root * root,u64 start,u64 num_bytes)228 static int add_excluded_extent(struct btrfs_root *root,
229 			       u64 start, u64 num_bytes)
230 {
231 	u64 end = start + num_bytes - 1;
232 	set_extent_bits(&root->fs_info->freed_extents[0],
233 			start, end, EXTENT_UPTODATE, GFP_NOFS);
234 	set_extent_bits(&root->fs_info->freed_extents[1],
235 			start, end, EXTENT_UPTODATE, GFP_NOFS);
236 	return 0;
237 }
238 
free_excluded_extents(struct btrfs_root * root,struct btrfs_block_group_cache * cache)239 static void free_excluded_extents(struct btrfs_root *root,
240 				  struct btrfs_block_group_cache *cache)
241 {
242 	u64 start, end;
243 
244 	start = cache->key.objectid;
245 	end = start + cache->key.offset - 1;
246 
247 	clear_extent_bits(&root->fs_info->freed_extents[0],
248 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
249 	clear_extent_bits(&root->fs_info->freed_extents[1],
250 			  start, end, EXTENT_UPTODATE, GFP_NOFS);
251 }
252 
exclude_super_stripes(struct btrfs_root * root,struct btrfs_block_group_cache * cache)253 static int exclude_super_stripes(struct btrfs_root *root,
254 				 struct btrfs_block_group_cache *cache)
255 {
256 	u64 bytenr;
257 	u64 *logical;
258 	int stripe_len;
259 	int i, nr, ret;
260 
261 	if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
262 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
263 		cache->bytes_super += stripe_len;
264 		ret = add_excluded_extent(root, cache->key.objectid,
265 					  stripe_len);
266 		if (ret)
267 			return ret;
268 	}
269 
270 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
271 		bytenr = btrfs_sb_offset(i);
272 		ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
273 				       cache->key.objectid, bytenr,
274 				       0, &logical, &nr, &stripe_len);
275 		if (ret)
276 			return ret;
277 
278 		while (nr--) {
279 			u64 start, len;
280 
281 			if (logical[nr] > cache->key.objectid +
282 			    cache->key.offset)
283 				continue;
284 
285 			if (logical[nr] + stripe_len <= cache->key.objectid)
286 				continue;
287 
288 			start = logical[nr];
289 			if (start < cache->key.objectid) {
290 				start = cache->key.objectid;
291 				len = (logical[nr] + stripe_len) - start;
292 			} else {
293 				len = min_t(u64, stripe_len,
294 					    cache->key.objectid +
295 					    cache->key.offset - start);
296 			}
297 
298 			cache->bytes_super += len;
299 			ret = add_excluded_extent(root, start, len);
300 			if (ret) {
301 				kfree(logical);
302 				return ret;
303 			}
304 		}
305 
306 		kfree(logical);
307 	}
308 	return 0;
309 }
310 
311 static struct btrfs_caching_control *
get_caching_control(struct btrfs_block_group_cache * cache)312 get_caching_control(struct btrfs_block_group_cache *cache)
313 {
314 	struct btrfs_caching_control *ctl;
315 
316 	spin_lock(&cache->lock);
317 	if (!cache->caching_ctl) {
318 		spin_unlock(&cache->lock);
319 		return NULL;
320 	}
321 
322 	ctl = cache->caching_ctl;
323 	atomic_inc(&ctl->count);
324 	spin_unlock(&cache->lock);
325 	return ctl;
326 }
327 
put_caching_control(struct btrfs_caching_control * ctl)328 static void put_caching_control(struct btrfs_caching_control *ctl)
329 {
330 	if (atomic_dec_and_test(&ctl->count))
331 		kfree(ctl);
332 }
333 
334 #ifdef CONFIG_BTRFS_DEBUG
fragment_free_space(struct btrfs_root * root,struct btrfs_block_group_cache * block_group)335 static void fragment_free_space(struct btrfs_root *root,
336 				struct btrfs_block_group_cache *block_group)
337 {
338 	u64 start = block_group->key.objectid;
339 	u64 len = block_group->key.offset;
340 	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
341 		root->nodesize : root->sectorsize;
342 	u64 step = chunk << 1;
343 
344 	while (len > chunk) {
345 		btrfs_remove_free_space(block_group, start, chunk);
346 		start += step;
347 		if (len < step)
348 			len = 0;
349 		else
350 			len -= step;
351 	}
352 }
353 #endif
354 
355 /*
356  * this is only called by cache_block_group, since we could have freed extents
357  * we need to check the pinned_extents for any extents that can't be used yet
358  * since their free space will be released as soon as the transaction commits.
359  */
add_new_free_space(struct btrfs_block_group_cache * block_group,struct btrfs_fs_info * info,u64 start,u64 end)360 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
361 			      struct btrfs_fs_info *info, u64 start, u64 end)
362 {
363 	u64 extent_start, extent_end, size, total_added = 0;
364 	int ret;
365 
366 	while (start < end) {
367 		ret = find_first_extent_bit(info->pinned_extents, start,
368 					    &extent_start, &extent_end,
369 					    EXTENT_DIRTY | EXTENT_UPTODATE,
370 					    NULL);
371 		if (ret)
372 			break;
373 
374 		if (extent_start <= start) {
375 			start = extent_end + 1;
376 		} else if (extent_start > start && extent_start < end) {
377 			size = extent_start - start;
378 			total_added += size;
379 			ret = btrfs_add_free_space(block_group, start,
380 						   size);
381 			BUG_ON(ret); /* -ENOMEM or logic error */
382 			start = extent_end + 1;
383 		} else {
384 			break;
385 		}
386 	}
387 
388 	if (start < end) {
389 		size = end - start;
390 		total_added += size;
391 		ret = btrfs_add_free_space(block_group, start, size);
392 		BUG_ON(ret); /* -ENOMEM or logic error */
393 	}
394 
395 	return total_added;
396 }
397 
caching_thread(struct btrfs_work * work)398 static noinline void caching_thread(struct btrfs_work *work)
399 {
400 	struct btrfs_block_group_cache *block_group;
401 	struct btrfs_fs_info *fs_info;
402 	struct btrfs_caching_control *caching_ctl;
403 	struct btrfs_root *extent_root;
404 	struct btrfs_path *path;
405 	struct extent_buffer *leaf;
406 	struct btrfs_key key;
407 	u64 total_found = 0;
408 	u64 last = 0;
409 	u32 nritems;
410 	int ret = -ENOMEM;
411 	bool wakeup = true;
412 
413 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
414 	block_group = caching_ctl->block_group;
415 	fs_info = block_group->fs_info;
416 	extent_root = fs_info->extent_root;
417 
418 	path = btrfs_alloc_path();
419 	if (!path)
420 		goto out;
421 
422 	last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
423 
424 #ifdef CONFIG_BTRFS_DEBUG
425 	/*
426 	 * If we're fragmenting we don't want to make anybody think we can
427 	 * allocate from this block group until we've had a chance to fragment
428 	 * the free space.
429 	 */
430 	if (btrfs_should_fragment_free_space(extent_root, block_group))
431 		wakeup = false;
432 #endif
433 	/*
434 	 * We don't want to deadlock with somebody trying to allocate a new
435 	 * extent for the extent root while also trying to search the extent
436 	 * root to add free space.  So we skip locking and search the commit
437 	 * root, since its read-only
438 	 */
439 	path->skip_locking = 1;
440 	path->search_commit_root = 1;
441 	path->reada = 1;
442 
443 	key.objectid = last;
444 	key.offset = 0;
445 	key.type = BTRFS_EXTENT_ITEM_KEY;
446 again:
447 	mutex_lock(&caching_ctl->mutex);
448 	/* need to make sure the commit_root doesn't disappear */
449 	down_read(&fs_info->commit_root_sem);
450 
451 next:
452 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
453 	if (ret < 0)
454 		goto err;
455 
456 	leaf = path->nodes[0];
457 	nritems = btrfs_header_nritems(leaf);
458 
459 	while (1) {
460 		if (btrfs_fs_closing(fs_info) > 1) {
461 			last = (u64)-1;
462 			break;
463 		}
464 
465 		if (path->slots[0] < nritems) {
466 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
467 		} else {
468 			ret = find_next_key(path, 0, &key);
469 			if (ret)
470 				break;
471 
472 			if (need_resched() ||
473 			    rwsem_is_contended(&fs_info->commit_root_sem)) {
474 				if (wakeup)
475 					caching_ctl->progress = last;
476 				btrfs_release_path(path);
477 				up_read(&fs_info->commit_root_sem);
478 				mutex_unlock(&caching_ctl->mutex);
479 				cond_resched();
480 				goto again;
481 			}
482 
483 			ret = btrfs_next_leaf(extent_root, path);
484 			if (ret < 0)
485 				goto err;
486 			if (ret)
487 				break;
488 			leaf = path->nodes[0];
489 			nritems = btrfs_header_nritems(leaf);
490 			continue;
491 		}
492 
493 		if (key.objectid < last) {
494 			key.objectid = last;
495 			key.offset = 0;
496 			key.type = BTRFS_EXTENT_ITEM_KEY;
497 
498 			if (wakeup)
499 				caching_ctl->progress = last;
500 			btrfs_release_path(path);
501 			goto next;
502 		}
503 
504 		if (key.objectid < block_group->key.objectid) {
505 			path->slots[0]++;
506 			continue;
507 		}
508 
509 		if (key.objectid >= block_group->key.objectid +
510 		    block_group->key.offset)
511 			break;
512 
513 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
514 		    key.type == BTRFS_METADATA_ITEM_KEY) {
515 			total_found += add_new_free_space(block_group,
516 							  fs_info, last,
517 							  key.objectid);
518 			if (key.type == BTRFS_METADATA_ITEM_KEY)
519 				last = key.objectid +
520 					fs_info->tree_root->nodesize;
521 			else
522 				last = key.objectid + key.offset;
523 
524 			if (total_found > (1024 * 1024 * 2)) {
525 				total_found = 0;
526 				if (wakeup)
527 					wake_up(&caching_ctl->wait);
528 			}
529 		}
530 		path->slots[0]++;
531 	}
532 	ret = 0;
533 
534 	total_found += add_new_free_space(block_group, fs_info, last,
535 					  block_group->key.objectid +
536 					  block_group->key.offset);
537 	spin_lock(&block_group->lock);
538 	block_group->caching_ctl = NULL;
539 	block_group->cached = BTRFS_CACHE_FINISHED;
540 	spin_unlock(&block_group->lock);
541 
542 #ifdef CONFIG_BTRFS_DEBUG
543 	if (btrfs_should_fragment_free_space(extent_root, block_group)) {
544 		u64 bytes_used;
545 
546 		spin_lock(&block_group->space_info->lock);
547 		spin_lock(&block_group->lock);
548 		bytes_used = block_group->key.offset -
549 			btrfs_block_group_used(&block_group->item);
550 		block_group->space_info->bytes_used += bytes_used >> 1;
551 		spin_unlock(&block_group->lock);
552 		spin_unlock(&block_group->space_info->lock);
553 		fragment_free_space(extent_root, block_group);
554 	}
555 #endif
556 
557 	caching_ctl->progress = (u64)-1;
558 err:
559 	btrfs_free_path(path);
560 	up_read(&fs_info->commit_root_sem);
561 
562 	free_excluded_extents(extent_root, block_group);
563 
564 	mutex_unlock(&caching_ctl->mutex);
565 out:
566 	if (ret) {
567 		spin_lock(&block_group->lock);
568 		block_group->caching_ctl = NULL;
569 		block_group->cached = BTRFS_CACHE_ERROR;
570 		spin_unlock(&block_group->lock);
571 	}
572 	wake_up(&caching_ctl->wait);
573 
574 	put_caching_control(caching_ctl);
575 	btrfs_put_block_group(block_group);
576 }
577 
cache_block_group(struct btrfs_block_group_cache * cache,int load_cache_only)578 static int cache_block_group(struct btrfs_block_group_cache *cache,
579 			     int load_cache_only)
580 {
581 	DEFINE_WAIT(wait);
582 	struct btrfs_fs_info *fs_info = cache->fs_info;
583 	struct btrfs_caching_control *caching_ctl;
584 	int ret = 0;
585 
586 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
587 	if (!caching_ctl)
588 		return -ENOMEM;
589 
590 	INIT_LIST_HEAD(&caching_ctl->list);
591 	mutex_init(&caching_ctl->mutex);
592 	init_waitqueue_head(&caching_ctl->wait);
593 	caching_ctl->block_group = cache;
594 	caching_ctl->progress = cache->key.objectid;
595 	atomic_set(&caching_ctl->count, 1);
596 	btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
597 			caching_thread, NULL, NULL);
598 
599 	spin_lock(&cache->lock);
600 	/*
601 	 * This should be a rare occasion, but this could happen I think in the
602 	 * case where one thread starts to load the space cache info, and then
603 	 * some other thread starts a transaction commit which tries to do an
604 	 * allocation while the other thread is still loading the space cache
605 	 * info.  The previous loop should have kept us from choosing this block
606 	 * group, but if we've moved to the state where we will wait on caching
607 	 * block groups we need to first check if we're doing a fast load here,
608 	 * so we can wait for it to finish, otherwise we could end up allocating
609 	 * from a block group who's cache gets evicted for one reason or
610 	 * another.
611 	 */
612 	while (cache->cached == BTRFS_CACHE_FAST) {
613 		struct btrfs_caching_control *ctl;
614 
615 		ctl = cache->caching_ctl;
616 		atomic_inc(&ctl->count);
617 		prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
618 		spin_unlock(&cache->lock);
619 
620 		schedule();
621 
622 		finish_wait(&ctl->wait, &wait);
623 		put_caching_control(ctl);
624 		spin_lock(&cache->lock);
625 	}
626 
627 	if (cache->cached != BTRFS_CACHE_NO) {
628 		spin_unlock(&cache->lock);
629 		kfree(caching_ctl);
630 		return 0;
631 	}
632 	WARN_ON(cache->caching_ctl);
633 	cache->caching_ctl = caching_ctl;
634 	cache->cached = BTRFS_CACHE_FAST;
635 	spin_unlock(&cache->lock);
636 
637 	if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
638 		mutex_lock(&caching_ctl->mutex);
639 		ret = load_free_space_cache(fs_info, cache);
640 
641 		spin_lock(&cache->lock);
642 		if (ret == 1) {
643 			cache->caching_ctl = NULL;
644 			cache->cached = BTRFS_CACHE_FINISHED;
645 			cache->last_byte_to_unpin = (u64)-1;
646 			caching_ctl->progress = (u64)-1;
647 		} else {
648 			if (load_cache_only) {
649 				cache->caching_ctl = NULL;
650 				cache->cached = BTRFS_CACHE_NO;
651 			} else {
652 				cache->cached = BTRFS_CACHE_STARTED;
653 				cache->has_caching_ctl = 1;
654 			}
655 		}
656 		spin_unlock(&cache->lock);
657 #ifdef CONFIG_BTRFS_DEBUG
658 		if (ret == 1 &&
659 		    btrfs_should_fragment_free_space(fs_info->extent_root,
660 						     cache)) {
661 			u64 bytes_used;
662 
663 			spin_lock(&cache->space_info->lock);
664 			spin_lock(&cache->lock);
665 			bytes_used = cache->key.offset -
666 				btrfs_block_group_used(&cache->item);
667 			cache->space_info->bytes_used += bytes_used >> 1;
668 			spin_unlock(&cache->lock);
669 			spin_unlock(&cache->space_info->lock);
670 			fragment_free_space(fs_info->extent_root, cache);
671 		}
672 #endif
673 		mutex_unlock(&caching_ctl->mutex);
674 
675 		wake_up(&caching_ctl->wait);
676 		if (ret == 1) {
677 			put_caching_control(caching_ctl);
678 			free_excluded_extents(fs_info->extent_root, cache);
679 			return 0;
680 		}
681 	} else {
682 		/*
683 		 * We are not going to do the fast caching, set cached to the
684 		 * appropriate value and wakeup any waiters.
685 		 */
686 		spin_lock(&cache->lock);
687 		if (load_cache_only) {
688 			cache->caching_ctl = NULL;
689 			cache->cached = BTRFS_CACHE_NO;
690 		} else {
691 			cache->cached = BTRFS_CACHE_STARTED;
692 			cache->has_caching_ctl = 1;
693 		}
694 		spin_unlock(&cache->lock);
695 		wake_up(&caching_ctl->wait);
696 	}
697 
698 	if (load_cache_only) {
699 		put_caching_control(caching_ctl);
700 		return 0;
701 	}
702 
703 	down_write(&fs_info->commit_root_sem);
704 	atomic_inc(&caching_ctl->count);
705 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
706 	up_write(&fs_info->commit_root_sem);
707 
708 	btrfs_get_block_group(cache);
709 
710 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
711 
712 	return ret;
713 }
714 
715 /*
716  * return the block group that starts at or after bytenr
717  */
718 static struct btrfs_block_group_cache *
btrfs_lookup_first_block_group(struct btrfs_fs_info * info,u64 bytenr)719 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
720 {
721 	struct btrfs_block_group_cache *cache;
722 
723 	cache = block_group_cache_tree_search(info, bytenr, 0);
724 
725 	return cache;
726 }
727 
728 /*
729  * return the block group that contains the given bytenr
730  */
btrfs_lookup_block_group(struct btrfs_fs_info * info,u64 bytenr)731 struct btrfs_block_group_cache *btrfs_lookup_block_group(
732 						 struct btrfs_fs_info *info,
733 						 u64 bytenr)
734 {
735 	struct btrfs_block_group_cache *cache;
736 
737 	cache = block_group_cache_tree_search(info, bytenr, 1);
738 
739 	return cache;
740 }
741 
__find_space_info(struct btrfs_fs_info * info,u64 flags)742 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
743 						  u64 flags)
744 {
745 	struct list_head *head = &info->space_info;
746 	struct btrfs_space_info *found;
747 
748 	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
749 
750 	rcu_read_lock();
751 	list_for_each_entry_rcu(found, head, list) {
752 		if (found->flags & flags) {
753 			rcu_read_unlock();
754 			return found;
755 		}
756 	}
757 	rcu_read_unlock();
758 	return NULL;
759 }
760 
761 /*
762  * after adding space to the filesystem, we need to clear the full flags
763  * on all the space infos.
764  */
btrfs_clear_space_info_full(struct btrfs_fs_info * info)765 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
766 {
767 	struct list_head *head = &info->space_info;
768 	struct btrfs_space_info *found;
769 
770 	rcu_read_lock();
771 	list_for_each_entry_rcu(found, head, list)
772 		found->full = 0;
773 	rcu_read_unlock();
774 }
775 
776 /* simple helper to search for an existing data extent at a given offset */
btrfs_lookup_data_extent(struct btrfs_root * root,u64 start,u64 len)777 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
778 {
779 	int ret;
780 	struct btrfs_key key;
781 	struct btrfs_path *path;
782 
783 	path = btrfs_alloc_path();
784 	if (!path)
785 		return -ENOMEM;
786 
787 	key.objectid = start;
788 	key.offset = len;
789 	key.type = BTRFS_EXTENT_ITEM_KEY;
790 	ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
791 				0, 0);
792 	btrfs_free_path(path);
793 	return ret;
794 }
795 
796 /*
797  * helper function to lookup reference count and flags of a tree block.
798  *
799  * the head node for delayed ref is used to store the sum of all the
800  * reference count modifications queued up in the rbtree. the head
801  * node may also store the extent flags to set. This way you can check
802  * to see what the reference count and extent flags would be if all of
803  * the delayed refs are not processed.
804  */
btrfs_lookup_extent_info(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 offset,int metadata,u64 * refs,u64 * flags)805 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
806 			     struct btrfs_root *root, u64 bytenr,
807 			     u64 offset, int metadata, u64 *refs, u64 *flags)
808 {
809 	struct btrfs_delayed_ref_head *head;
810 	struct btrfs_delayed_ref_root *delayed_refs;
811 	struct btrfs_path *path;
812 	struct btrfs_extent_item *ei;
813 	struct extent_buffer *leaf;
814 	struct btrfs_key key;
815 	u32 item_size;
816 	u64 num_refs;
817 	u64 extent_flags;
818 	int ret;
819 
820 	/*
821 	 * If we don't have skinny metadata, don't bother doing anything
822 	 * different
823 	 */
824 	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
825 		offset = root->nodesize;
826 		metadata = 0;
827 	}
828 
829 	path = btrfs_alloc_path();
830 	if (!path)
831 		return -ENOMEM;
832 
833 	if (!trans) {
834 		path->skip_locking = 1;
835 		path->search_commit_root = 1;
836 	}
837 
838 search_again:
839 	key.objectid = bytenr;
840 	key.offset = offset;
841 	if (metadata)
842 		key.type = BTRFS_METADATA_ITEM_KEY;
843 	else
844 		key.type = BTRFS_EXTENT_ITEM_KEY;
845 
846 	ret = btrfs_search_slot(trans, root->fs_info->extent_root,
847 				&key, path, 0, 0);
848 	if (ret < 0)
849 		goto out_free;
850 
851 	if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
852 		if (path->slots[0]) {
853 			path->slots[0]--;
854 			btrfs_item_key_to_cpu(path->nodes[0], &key,
855 					      path->slots[0]);
856 			if (key.objectid == bytenr &&
857 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
858 			    key.offset == root->nodesize)
859 				ret = 0;
860 		}
861 	}
862 
863 	if (ret == 0) {
864 		leaf = path->nodes[0];
865 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
866 		if (item_size >= sizeof(*ei)) {
867 			ei = btrfs_item_ptr(leaf, path->slots[0],
868 					    struct btrfs_extent_item);
869 			num_refs = btrfs_extent_refs(leaf, ei);
870 			extent_flags = btrfs_extent_flags(leaf, ei);
871 		} else {
872 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
873 			struct btrfs_extent_item_v0 *ei0;
874 			BUG_ON(item_size != sizeof(*ei0));
875 			ei0 = btrfs_item_ptr(leaf, path->slots[0],
876 					     struct btrfs_extent_item_v0);
877 			num_refs = btrfs_extent_refs_v0(leaf, ei0);
878 			/* FIXME: this isn't correct for data */
879 			extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
880 #else
881 			BUG();
882 #endif
883 		}
884 		BUG_ON(num_refs == 0);
885 	} else {
886 		num_refs = 0;
887 		extent_flags = 0;
888 		ret = 0;
889 	}
890 
891 	if (!trans)
892 		goto out;
893 
894 	delayed_refs = &trans->transaction->delayed_refs;
895 	spin_lock(&delayed_refs->lock);
896 	head = btrfs_find_delayed_ref_head(trans, bytenr);
897 	if (head) {
898 		if (!mutex_trylock(&head->mutex)) {
899 			atomic_inc(&head->node.refs);
900 			spin_unlock(&delayed_refs->lock);
901 
902 			btrfs_release_path(path);
903 
904 			/*
905 			 * Mutex was contended, block until it's released and try
906 			 * again
907 			 */
908 			mutex_lock(&head->mutex);
909 			mutex_unlock(&head->mutex);
910 			btrfs_put_delayed_ref(&head->node);
911 			goto search_again;
912 		}
913 		spin_lock(&head->lock);
914 		if (head->extent_op && head->extent_op->update_flags)
915 			extent_flags |= head->extent_op->flags_to_set;
916 		else
917 			BUG_ON(num_refs == 0);
918 
919 		num_refs += head->node.ref_mod;
920 		spin_unlock(&head->lock);
921 		mutex_unlock(&head->mutex);
922 	}
923 	spin_unlock(&delayed_refs->lock);
924 out:
925 	WARN_ON(num_refs == 0);
926 	if (refs)
927 		*refs = num_refs;
928 	if (flags)
929 		*flags = extent_flags;
930 out_free:
931 	btrfs_free_path(path);
932 	return ret;
933 }
934 
935 /*
936  * Back reference rules.  Back refs have three main goals:
937  *
938  * 1) differentiate between all holders of references to an extent so that
939  *    when a reference is dropped we can make sure it was a valid reference
940  *    before freeing the extent.
941  *
942  * 2) Provide enough information to quickly find the holders of an extent
943  *    if we notice a given block is corrupted or bad.
944  *
945  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
946  *    maintenance.  This is actually the same as #2, but with a slightly
947  *    different use case.
948  *
949  * There are two kinds of back refs. The implicit back refs is optimized
950  * for pointers in non-shared tree blocks. For a given pointer in a block,
951  * back refs of this kind provide information about the block's owner tree
952  * and the pointer's key. These information allow us to find the block by
953  * b-tree searching. The full back refs is for pointers in tree blocks not
954  * referenced by their owner trees. The location of tree block is recorded
955  * in the back refs. Actually the full back refs is generic, and can be
956  * used in all cases the implicit back refs is used. The major shortcoming
957  * of the full back refs is its overhead. Every time a tree block gets
958  * COWed, we have to update back refs entry for all pointers in it.
959  *
960  * For a newly allocated tree block, we use implicit back refs for
961  * pointers in it. This means most tree related operations only involve
962  * implicit back refs. For a tree block created in old transaction, the
963  * only way to drop a reference to it is COW it. So we can detect the
964  * event that tree block loses its owner tree's reference and do the
965  * back refs conversion.
966  *
967  * When a tree block is COW'd through a tree, there are four cases:
968  *
969  * The reference count of the block is one and the tree is the block's
970  * owner tree. Nothing to do in this case.
971  *
972  * The reference count of the block is one and the tree is not the
973  * block's owner tree. In this case, full back refs is used for pointers
974  * in the block. Remove these full back refs, add implicit back refs for
975  * every pointers in the new block.
976  *
977  * The reference count of the block is greater than one and the tree is
978  * the block's owner tree. In this case, implicit back refs is used for
979  * pointers in the block. Add full back refs for every pointers in the
980  * block, increase lower level extents' reference counts. The original
981  * implicit back refs are entailed to the new block.
982  *
983  * The reference count of the block is greater than one and the tree is
984  * not the block's owner tree. Add implicit back refs for every pointer in
985  * the new block, increase lower level extents' reference count.
986  *
987  * Back Reference Key composing:
988  *
989  * The key objectid corresponds to the first byte in the extent,
990  * The key type is used to differentiate between types of back refs.
991  * There are different meanings of the key offset for different types
992  * of back refs.
993  *
994  * File extents can be referenced by:
995  *
996  * - multiple snapshots, subvolumes, or different generations in one subvol
997  * - different files inside a single subvolume
998  * - different offsets inside a file (bookend extents in file.c)
999  *
1000  * The extent ref structure for the implicit back refs has fields for:
1001  *
1002  * - Objectid of the subvolume root
1003  * - objectid of the file holding the reference
1004  * - original offset in the file
1005  * - how many bookend extents
1006  *
1007  * The key offset for the implicit back refs is hash of the first
1008  * three fields.
1009  *
1010  * The extent ref structure for the full back refs has field for:
1011  *
1012  * - number of pointers in the tree leaf
1013  *
1014  * The key offset for the implicit back refs is the first byte of
1015  * the tree leaf
1016  *
1017  * When a file extent is allocated, The implicit back refs is used.
1018  * the fields are filled in:
1019  *
1020  *     (root_key.objectid, inode objectid, offset in file, 1)
1021  *
1022  * When a file extent is removed file truncation, we find the
1023  * corresponding implicit back refs and check the following fields:
1024  *
1025  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
1026  *
1027  * Btree extents can be referenced by:
1028  *
1029  * - Different subvolumes
1030  *
1031  * Both the implicit back refs and the full back refs for tree blocks
1032  * only consist of key. The key offset for the implicit back refs is
1033  * objectid of block's owner tree. The key offset for the full back refs
1034  * is the first byte of parent block.
1035  *
1036  * When implicit back refs is used, information about the lowest key and
1037  * level of the tree block are required. These information are stored in
1038  * tree block info structure.
1039  */
1040 
1041 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
convert_extent_item_v0(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 owner,u32 extra_size)1042 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1043 				  struct btrfs_root *root,
1044 				  struct btrfs_path *path,
1045 				  u64 owner, u32 extra_size)
1046 {
1047 	struct btrfs_extent_item *item;
1048 	struct btrfs_extent_item_v0 *ei0;
1049 	struct btrfs_extent_ref_v0 *ref0;
1050 	struct btrfs_tree_block_info *bi;
1051 	struct extent_buffer *leaf;
1052 	struct btrfs_key key;
1053 	struct btrfs_key found_key;
1054 	u32 new_size = sizeof(*item);
1055 	u64 refs;
1056 	int ret;
1057 
1058 	leaf = path->nodes[0];
1059 	BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1060 
1061 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1062 	ei0 = btrfs_item_ptr(leaf, path->slots[0],
1063 			     struct btrfs_extent_item_v0);
1064 	refs = btrfs_extent_refs_v0(leaf, ei0);
1065 
1066 	if (owner == (u64)-1) {
1067 		while (1) {
1068 			if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1069 				ret = btrfs_next_leaf(root, path);
1070 				if (ret < 0)
1071 					return ret;
1072 				BUG_ON(ret > 0); /* Corruption */
1073 				leaf = path->nodes[0];
1074 			}
1075 			btrfs_item_key_to_cpu(leaf, &found_key,
1076 					      path->slots[0]);
1077 			BUG_ON(key.objectid != found_key.objectid);
1078 			if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1079 				path->slots[0]++;
1080 				continue;
1081 			}
1082 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1083 					      struct btrfs_extent_ref_v0);
1084 			owner = btrfs_ref_objectid_v0(leaf, ref0);
1085 			break;
1086 		}
1087 	}
1088 	btrfs_release_path(path);
1089 
1090 	if (owner < BTRFS_FIRST_FREE_OBJECTID)
1091 		new_size += sizeof(*bi);
1092 
1093 	new_size -= sizeof(*ei0);
1094 	ret = btrfs_search_slot(trans, root, &key, path,
1095 				new_size + extra_size, 1);
1096 	if (ret < 0)
1097 		return ret;
1098 	BUG_ON(ret); /* Corruption */
1099 
1100 	btrfs_extend_item(root, path, new_size);
1101 
1102 	leaf = path->nodes[0];
1103 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1104 	btrfs_set_extent_refs(leaf, item, refs);
1105 	/* FIXME: get real generation */
1106 	btrfs_set_extent_generation(leaf, item, 0);
1107 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1108 		btrfs_set_extent_flags(leaf, item,
1109 				       BTRFS_EXTENT_FLAG_TREE_BLOCK |
1110 				       BTRFS_BLOCK_FLAG_FULL_BACKREF);
1111 		bi = (struct btrfs_tree_block_info *)(item + 1);
1112 		/* FIXME: get first key of the block */
1113 		memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1114 		btrfs_set_tree_block_level(leaf, bi, (int)owner);
1115 	} else {
1116 		btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1117 	}
1118 	btrfs_mark_buffer_dirty(leaf);
1119 	return 0;
1120 }
1121 #endif
1122 
hash_extent_data_ref(u64 root_objectid,u64 owner,u64 offset)1123 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1124 {
1125 	u32 high_crc = ~(u32)0;
1126 	u32 low_crc = ~(u32)0;
1127 	__le64 lenum;
1128 
1129 	lenum = cpu_to_le64(root_objectid);
1130 	high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1131 	lenum = cpu_to_le64(owner);
1132 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1133 	lenum = cpu_to_le64(offset);
1134 	low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1135 
1136 	return ((u64)high_crc << 31) ^ (u64)low_crc;
1137 }
1138 
hash_extent_data_ref_item(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref)1139 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1140 				     struct btrfs_extent_data_ref *ref)
1141 {
1142 	return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1143 				    btrfs_extent_data_ref_objectid(leaf, ref),
1144 				    btrfs_extent_data_ref_offset(leaf, ref));
1145 }
1146 
match_extent_data_ref(struct extent_buffer * leaf,struct btrfs_extent_data_ref * ref,u64 root_objectid,u64 owner,u64 offset)1147 static int match_extent_data_ref(struct extent_buffer *leaf,
1148 				 struct btrfs_extent_data_ref *ref,
1149 				 u64 root_objectid, u64 owner, u64 offset)
1150 {
1151 	if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1152 	    btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1153 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
1154 		return 0;
1155 	return 1;
1156 }
1157 
lookup_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset)1158 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1159 					   struct btrfs_root *root,
1160 					   struct btrfs_path *path,
1161 					   u64 bytenr, u64 parent,
1162 					   u64 root_objectid,
1163 					   u64 owner, u64 offset)
1164 {
1165 	struct btrfs_key key;
1166 	struct btrfs_extent_data_ref *ref;
1167 	struct extent_buffer *leaf;
1168 	u32 nritems;
1169 	int ret;
1170 	int recow;
1171 	int err = -ENOENT;
1172 
1173 	key.objectid = bytenr;
1174 	if (parent) {
1175 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1176 		key.offset = parent;
1177 	} else {
1178 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1179 		key.offset = hash_extent_data_ref(root_objectid,
1180 						  owner, offset);
1181 	}
1182 again:
1183 	recow = 0;
1184 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1185 	if (ret < 0) {
1186 		err = ret;
1187 		goto fail;
1188 	}
1189 
1190 	if (parent) {
1191 		if (!ret)
1192 			return 0;
1193 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1194 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1195 		btrfs_release_path(path);
1196 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1197 		if (ret < 0) {
1198 			err = ret;
1199 			goto fail;
1200 		}
1201 		if (!ret)
1202 			return 0;
1203 #endif
1204 		goto fail;
1205 	}
1206 
1207 	leaf = path->nodes[0];
1208 	nritems = btrfs_header_nritems(leaf);
1209 	while (1) {
1210 		if (path->slots[0] >= nritems) {
1211 			ret = btrfs_next_leaf(root, path);
1212 			if (ret < 0)
1213 				err = ret;
1214 			if (ret)
1215 				goto fail;
1216 
1217 			leaf = path->nodes[0];
1218 			nritems = btrfs_header_nritems(leaf);
1219 			recow = 1;
1220 		}
1221 
1222 		btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1223 		if (key.objectid != bytenr ||
1224 		    key.type != BTRFS_EXTENT_DATA_REF_KEY)
1225 			goto fail;
1226 
1227 		ref = btrfs_item_ptr(leaf, path->slots[0],
1228 				     struct btrfs_extent_data_ref);
1229 
1230 		if (match_extent_data_ref(leaf, ref, root_objectid,
1231 					  owner, offset)) {
1232 			if (recow) {
1233 				btrfs_release_path(path);
1234 				goto again;
1235 			}
1236 			err = 0;
1237 			break;
1238 		}
1239 		path->slots[0]++;
1240 	}
1241 fail:
1242 	return err;
1243 }
1244 
insert_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add)1245 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1246 					   struct btrfs_root *root,
1247 					   struct btrfs_path *path,
1248 					   u64 bytenr, u64 parent,
1249 					   u64 root_objectid, u64 owner,
1250 					   u64 offset, int refs_to_add)
1251 {
1252 	struct btrfs_key key;
1253 	struct extent_buffer *leaf;
1254 	u32 size;
1255 	u32 num_refs;
1256 	int ret;
1257 
1258 	key.objectid = bytenr;
1259 	if (parent) {
1260 		key.type = BTRFS_SHARED_DATA_REF_KEY;
1261 		key.offset = parent;
1262 		size = sizeof(struct btrfs_shared_data_ref);
1263 	} else {
1264 		key.type = BTRFS_EXTENT_DATA_REF_KEY;
1265 		key.offset = hash_extent_data_ref(root_objectid,
1266 						  owner, offset);
1267 		size = sizeof(struct btrfs_extent_data_ref);
1268 	}
1269 
1270 	ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1271 	if (ret && ret != -EEXIST)
1272 		goto fail;
1273 
1274 	leaf = path->nodes[0];
1275 	if (parent) {
1276 		struct btrfs_shared_data_ref *ref;
1277 		ref = btrfs_item_ptr(leaf, path->slots[0],
1278 				     struct btrfs_shared_data_ref);
1279 		if (ret == 0) {
1280 			btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1281 		} else {
1282 			num_refs = btrfs_shared_data_ref_count(leaf, ref);
1283 			num_refs += refs_to_add;
1284 			btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1285 		}
1286 	} else {
1287 		struct btrfs_extent_data_ref *ref;
1288 		while (ret == -EEXIST) {
1289 			ref = btrfs_item_ptr(leaf, path->slots[0],
1290 					     struct btrfs_extent_data_ref);
1291 			if (match_extent_data_ref(leaf, ref, root_objectid,
1292 						  owner, offset))
1293 				break;
1294 			btrfs_release_path(path);
1295 			key.offset++;
1296 			ret = btrfs_insert_empty_item(trans, root, path, &key,
1297 						      size);
1298 			if (ret && ret != -EEXIST)
1299 				goto fail;
1300 
1301 			leaf = path->nodes[0];
1302 		}
1303 		ref = btrfs_item_ptr(leaf, path->slots[0],
1304 				     struct btrfs_extent_data_ref);
1305 		if (ret == 0) {
1306 			btrfs_set_extent_data_ref_root(leaf, ref,
1307 						       root_objectid);
1308 			btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1309 			btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1310 			btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1311 		} else {
1312 			num_refs = btrfs_extent_data_ref_count(leaf, ref);
1313 			num_refs += refs_to_add;
1314 			btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1315 		}
1316 	}
1317 	btrfs_mark_buffer_dirty(leaf);
1318 	ret = 0;
1319 fail:
1320 	btrfs_release_path(path);
1321 	return ret;
1322 }
1323 
remove_extent_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,int refs_to_drop,int * last_ref)1324 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1325 					   struct btrfs_root *root,
1326 					   struct btrfs_path *path,
1327 					   int refs_to_drop, int *last_ref)
1328 {
1329 	struct btrfs_key key;
1330 	struct btrfs_extent_data_ref *ref1 = NULL;
1331 	struct btrfs_shared_data_ref *ref2 = NULL;
1332 	struct extent_buffer *leaf;
1333 	u32 num_refs = 0;
1334 	int ret = 0;
1335 
1336 	leaf = path->nodes[0];
1337 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1338 
1339 	if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1340 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1341 				      struct btrfs_extent_data_ref);
1342 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1343 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1344 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1345 				      struct btrfs_shared_data_ref);
1346 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1347 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1348 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1349 		struct btrfs_extent_ref_v0 *ref0;
1350 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1351 				      struct btrfs_extent_ref_v0);
1352 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1353 #endif
1354 	} else {
1355 		BUG();
1356 	}
1357 
1358 	BUG_ON(num_refs < refs_to_drop);
1359 	num_refs -= refs_to_drop;
1360 
1361 	if (num_refs == 0) {
1362 		ret = btrfs_del_item(trans, root, path);
1363 		*last_ref = 1;
1364 	} else {
1365 		if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1366 			btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1367 		else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1368 			btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1369 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1370 		else {
1371 			struct btrfs_extent_ref_v0 *ref0;
1372 			ref0 = btrfs_item_ptr(leaf, path->slots[0],
1373 					struct btrfs_extent_ref_v0);
1374 			btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1375 		}
1376 #endif
1377 		btrfs_mark_buffer_dirty(leaf);
1378 	}
1379 	return ret;
1380 }
1381 
extent_data_ref_count(struct btrfs_path * path,struct btrfs_extent_inline_ref * iref)1382 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1383 					  struct btrfs_extent_inline_ref *iref)
1384 {
1385 	struct btrfs_key key;
1386 	struct extent_buffer *leaf;
1387 	struct btrfs_extent_data_ref *ref1;
1388 	struct btrfs_shared_data_ref *ref2;
1389 	u32 num_refs = 0;
1390 
1391 	leaf = path->nodes[0];
1392 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1393 	if (iref) {
1394 		if (btrfs_extent_inline_ref_type(leaf, iref) ==
1395 		    BTRFS_EXTENT_DATA_REF_KEY) {
1396 			ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1397 			num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1398 		} else {
1399 			ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1400 			num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1401 		}
1402 	} else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1403 		ref1 = btrfs_item_ptr(leaf, path->slots[0],
1404 				      struct btrfs_extent_data_ref);
1405 		num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1406 	} else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1407 		ref2 = btrfs_item_ptr(leaf, path->slots[0],
1408 				      struct btrfs_shared_data_ref);
1409 		num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1410 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1411 	} else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1412 		struct btrfs_extent_ref_v0 *ref0;
1413 		ref0 = btrfs_item_ptr(leaf, path->slots[0],
1414 				      struct btrfs_extent_ref_v0);
1415 		num_refs = btrfs_ref_count_v0(leaf, ref0);
1416 #endif
1417 	} else {
1418 		WARN_ON(1);
1419 	}
1420 	return num_refs;
1421 }
1422 
lookup_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)1423 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1424 					  struct btrfs_root *root,
1425 					  struct btrfs_path *path,
1426 					  u64 bytenr, u64 parent,
1427 					  u64 root_objectid)
1428 {
1429 	struct btrfs_key key;
1430 	int ret;
1431 
1432 	key.objectid = bytenr;
1433 	if (parent) {
1434 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1435 		key.offset = parent;
1436 	} else {
1437 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1438 		key.offset = root_objectid;
1439 	}
1440 
1441 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1442 	if (ret > 0)
1443 		ret = -ENOENT;
1444 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1445 	if (ret == -ENOENT && parent) {
1446 		btrfs_release_path(path);
1447 		key.type = BTRFS_EXTENT_REF_V0_KEY;
1448 		ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1449 		if (ret > 0)
1450 			ret = -ENOENT;
1451 	}
1452 #endif
1453 	return ret;
1454 }
1455 
insert_tree_block_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid)1456 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1457 					  struct btrfs_root *root,
1458 					  struct btrfs_path *path,
1459 					  u64 bytenr, u64 parent,
1460 					  u64 root_objectid)
1461 {
1462 	struct btrfs_key key;
1463 	int ret;
1464 
1465 	key.objectid = bytenr;
1466 	if (parent) {
1467 		key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1468 		key.offset = parent;
1469 	} else {
1470 		key.type = BTRFS_TREE_BLOCK_REF_KEY;
1471 		key.offset = root_objectid;
1472 	}
1473 
1474 	ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1475 	btrfs_release_path(path);
1476 	return ret;
1477 }
1478 
extent_ref_type(u64 parent,u64 owner)1479 static inline int extent_ref_type(u64 parent, u64 owner)
1480 {
1481 	int type;
1482 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1483 		if (parent > 0)
1484 			type = BTRFS_SHARED_BLOCK_REF_KEY;
1485 		else
1486 			type = BTRFS_TREE_BLOCK_REF_KEY;
1487 	} else {
1488 		if (parent > 0)
1489 			type = BTRFS_SHARED_DATA_REF_KEY;
1490 		else
1491 			type = BTRFS_EXTENT_DATA_REF_KEY;
1492 	}
1493 	return type;
1494 }
1495 
find_next_key(struct btrfs_path * path,int level,struct btrfs_key * key)1496 static int find_next_key(struct btrfs_path *path, int level,
1497 			 struct btrfs_key *key)
1498 
1499 {
1500 	for (; level < BTRFS_MAX_LEVEL; level++) {
1501 		if (!path->nodes[level])
1502 			break;
1503 		if (path->slots[level] + 1 >=
1504 		    btrfs_header_nritems(path->nodes[level]))
1505 			continue;
1506 		if (level == 0)
1507 			btrfs_item_key_to_cpu(path->nodes[level], key,
1508 					      path->slots[level] + 1);
1509 		else
1510 			btrfs_node_key_to_cpu(path->nodes[level], key,
1511 					      path->slots[level] + 1);
1512 		return 0;
1513 	}
1514 	return 1;
1515 }
1516 
1517 /*
1518  * look for inline back ref. if back ref is found, *ref_ret is set
1519  * to the address of inline back ref, and 0 is returned.
1520  *
1521  * if back ref isn't found, *ref_ret is set to the address where it
1522  * should be inserted, and -ENOENT is returned.
1523  *
1524  * if insert is true and there are too many inline back refs, the path
1525  * points to the extent item, and -EAGAIN is returned.
1526  *
1527  * NOTE: inline back refs are ordered in the same way that back ref
1528  *	 items in the tree are ordered.
1529  */
1530 static noinline_for_stack
lookup_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int insert)1531 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1532 				 struct btrfs_root *root,
1533 				 struct btrfs_path *path,
1534 				 struct btrfs_extent_inline_ref **ref_ret,
1535 				 u64 bytenr, u64 num_bytes,
1536 				 u64 parent, u64 root_objectid,
1537 				 u64 owner, u64 offset, int insert)
1538 {
1539 	struct btrfs_key key;
1540 	struct extent_buffer *leaf;
1541 	struct btrfs_extent_item *ei;
1542 	struct btrfs_extent_inline_ref *iref;
1543 	u64 flags;
1544 	u64 item_size;
1545 	unsigned long ptr;
1546 	unsigned long end;
1547 	int extra_size;
1548 	int type;
1549 	int want;
1550 	int ret;
1551 	int err = 0;
1552 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1553 						 SKINNY_METADATA);
1554 
1555 	key.objectid = bytenr;
1556 	key.type = BTRFS_EXTENT_ITEM_KEY;
1557 	key.offset = num_bytes;
1558 
1559 	want = extent_ref_type(parent, owner);
1560 	if (insert) {
1561 		extra_size = btrfs_extent_inline_ref_size(want);
1562 		path->keep_locks = 1;
1563 	} else
1564 		extra_size = -1;
1565 
1566 	/*
1567 	 * Owner is our parent level, so we can just add one to get the level
1568 	 * for the block we are interested in.
1569 	 */
1570 	if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1571 		key.type = BTRFS_METADATA_ITEM_KEY;
1572 		key.offset = owner;
1573 	}
1574 
1575 again:
1576 	ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1577 	if (ret < 0) {
1578 		err = ret;
1579 		goto out;
1580 	}
1581 
1582 	/*
1583 	 * We may be a newly converted file system which still has the old fat
1584 	 * extent entries for metadata, so try and see if we have one of those.
1585 	 */
1586 	if (ret > 0 && skinny_metadata) {
1587 		skinny_metadata = false;
1588 		if (path->slots[0]) {
1589 			path->slots[0]--;
1590 			btrfs_item_key_to_cpu(path->nodes[0], &key,
1591 					      path->slots[0]);
1592 			if (key.objectid == bytenr &&
1593 			    key.type == BTRFS_EXTENT_ITEM_KEY &&
1594 			    key.offset == num_bytes)
1595 				ret = 0;
1596 		}
1597 		if (ret) {
1598 			key.objectid = bytenr;
1599 			key.type = BTRFS_EXTENT_ITEM_KEY;
1600 			key.offset = num_bytes;
1601 			btrfs_release_path(path);
1602 			goto again;
1603 		}
1604 	}
1605 
1606 	if (ret && !insert) {
1607 		err = -ENOENT;
1608 		goto out;
1609 	} else if (WARN_ON(ret)) {
1610 		err = -EIO;
1611 		goto out;
1612 	}
1613 
1614 	leaf = path->nodes[0];
1615 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1616 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1617 	if (item_size < sizeof(*ei)) {
1618 		if (!insert) {
1619 			err = -ENOENT;
1620 			goto out;
1621 		}
1622 		ret = convert_extent_item_v0(trans, root, path, owner,
1623 					     extra_size);
1624 		if (ret < 0) {
1625 			err = ret;
1626 			goto out;
1627 		}
1628 		leaf = path->nodes[0];
1629 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1630 	}
1631 #endif
1632 	BUG_ON(item_size < sizeof(*ei));
1633 
1634 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1635 	flags = btrfs_extent_flags(leaf, ei);
1636 
1637 	ptr = (unsigned long)(ei + 1);
1638 	end = (unsigned long)ei + item_size;
1639 
1640 	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1641 		ptr += sizeof(struct btrfs_tree_block_info);
1642 		BUG_ON(ptr > end);
1643 	}
1644 
1645 	err = -ENOENT;
1646 	while (1) {
1647 		if (ptr >= end) {
1648 			WARN_ON(ptr > end);
1649 			break;
1650 		}
1651 		iref = (struct btrfs_extent_inline_ref *)ptr;
1652 		type = btrfs_extent_inline_ref_type(leaf, iref);
1653 		if (want < type)
1654 			break;
1655 		if (want > type) {
1656 			ptr += btrfs_extent_inline_ref_size(type);
1657 			continue;
1658 		}
1659 
1660 		if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1661 			struct btrfs_extent_data_ref *dref;
1662 			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1663 			if (match_extent_data_ref(leaf, dref, root_objectid,
1664 						  owner, offset)) {
1665 				err = 0;
1666 				break;
1667 			}
1668 			if (hash_extent_data_ref_item(leaf, dref) <
1669 			    hash_extent_data_ref(root_objectid, owner, offset))
1670 				break;
1671 		} else {
1672 			u64 ref_offset;
1673 			ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1674 			if (parent > 0) {
1675 				if (parent == ref_offset) {
1676 					err = 0;
1677 					break;
1678 				}
1679 				if (ref_offset < parent)
1680 					break;
1681 			} else {
1682 				if (root_objectid == ref_offset) {
1683 					err = 0;
1684 					break;
1685 				}
1686 				if (ref_offset < root_objectid)
1687 					break;
1688 			}
1689 		}
1690 		ptr += btrfs_extent_inline_ref_size(type);
1691 	}
1692 	if (err == -ENOENT && insert) {
1693 		if (item_size + extra_size >=
1694 		    BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1695 			err = -EAGAIN;
1696 			goto out;
1697 		}
1698 		/*
1699 		 * To add new inline back ref, we have to make sure
1700 		 * there is no corresponding back ref item.
1701 		 * For simplicity, we just do not add new inline back
1702 		 * ref if there is any kind of item for this block
1703 		 */
1704 		if (find_next_key(path, 0, &key) == 0 &&
1705 		    key.objectid == bytenr &&
1706 		    key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1707 			err = -EAGAIN;
1708 			goto out;
1709 		}
1710 	}
1711 	*ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1712 out:
1713 	if (insert) {
1714 		path->keep_locks = 0;
1715 		btrfs_unlock_up_safe(path, 1);
1716 	}
1717 	return err;
1718 }
1719 
1720 /*
1721  * helper to add new inline back ref
1722  */
1723 static noinline_for_stack
setup_inline_extent_backref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1724 void setup_inline_extent_backref(struct btrfs_root *root,
1725 				 struct btrfs_path *path,
1726 				 struct btrfs_extent_inline_ref *iref,
1727 				 u64 parent, u64 root_objectid,
1728 				 u64 owner, u64 offset, int refs_to_add,
1729 				 struct btrfs_delayed_extent_op *extent_op)
1730 {
1731 	struct extent_buffer *leaf;
1732 	struct btrfs_extent_item *ei;
1733 	unsigned long ptr;
1734 	unsigned long end;
1735 	unsigned long item_offset;
1736 	u64 refs;
1737 	int size;
1738 	int type;
1739 
1740 	leaf = path->nodes[0];
1741 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1742 	item_offset = (unsigned long)iref - (unsigned long)ei;
1743 
1744 	type = extent_ref_type(parent, owner);
1745 	size = btrfs_extent_inline_ref_size(type);
1746 
1747 	btrfs_extend_item(root, path, size);
1748 
1749 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1750 	refs = btrfs_extent_refs(leaf, ei);
1751 	refs += refs_to_add;
1752 	btrfs_set_extent_refs(leaf, ei, refs);
1753 	if (extent_op)
1754 		__run_delayed_extent_op(extent_op, leaf, ei);
1755 
1756 	ptr = (unsigned long)ei + item_offset;
1757 	end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1758 	if (ptr < end - size)
1759 		memmove_extent_buffer(leaf, ptr + size, ptr,
1760 				      end - size - ptr);
1761 
1762 	iref = (struct btrfs_extent_inline_ref *)ptr;
1763 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
1764 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1765 		struct btrfs_extent_data_ref *dref;
1766 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1767 		btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1768 		btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1769 		btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1770 		btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1771 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1772 		struct btrfs_shared_data_ref *sref;
1773 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1774 		btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1775 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1776 	} else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1777 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1778 	} else {
1779 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1780 	}
1781 	btrfs_mark_buffer_dirty(leaf);
1782 }
1783 
lookup_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref ** ref_ret,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)1784 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1785 				 struct btrfs_root *root,
1786 				 struct btrfs_path *path,
1787 				 struct btrfs_extent_inline_ref **ref_ret,
1788 				 u64 bytenr, u64 num_bytes, u64 parent,
1789 				 u64 root_objectid, u64 owner, u64 offset)
1790 {
1791 	int ret;
1792 
1793 	ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1794 					   bytenr, num_bytes, parent,
1795 					   root_objectid, owner, offset, 0);
1796 	if (ret != -ENOENT)
1797 		return ret;
1798 
1799 	btrfs_release_path(path);
1800 	*ref_ret = NULL;
1801 
1802 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1803 		ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1804 					    root_objectid);
1805 	} else {
1806 		ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1807 					     root_objectid, owner, offset);
1808 	}
1809 	return ret;
1810 }
1811 
1812 /*
1813  * helper to update/remove inline back ref
1814  */
1815 static noinline_for_stack
update_inline_extent_backref(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_mod,struct btrfs_delayed_extent_op * extent_op,int * last_ref)1816 void update_inline_extent_backref(struct btrfs_root *root,
1817 				  struct btrfs_path *path,
1818 				  struct btrfs_extent_inline_ref *iref,
1819 				  int refs_to_mod,
1820 				  struct btrfs_delayed_extent_op *extent_op,
1821 				  int *last_ref)
1822 {
1823 	struct extent_buffer *leaf;
1824 	struct btrfs_extent_item *ei;
1825 	struct btrfs_extent_data_ref *dref = NULL;
1826 	struct btrfs_shared_data_ref *sref = NULL;
1827 	unsigned long ptr;
1828 	unsigned long end;
1829 	u32 item_size;
1830 	int size;
1831 	int type;
1832 	u64 refs;
1833 
1834 	leaf = path->nodes[0];
1835 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836 	refs = btrfs_extent_refs(leaf, ei);
1837 	WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1838 	refs += refs_to_mod;
1839 	btrfs_set_extent_refs(leaf, ei, refs);
1840 	if (extent_op)
1841 		__run_delayed_extent_op(extent_op, leaf, ei);
1842 
1843 	type = btrfs_extent_inline_ref_type(leaf, iref);
1844 
1845 	if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1846 		dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1847 		refs = btrfs_extent_data_ref_count(leaf, dref);
1848 	} else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1849 		sref = (struct btrfs_shared_data_ref *)(iref + 1);
1850 		refs = btrfs_shared_data_ref_count(leaf, sref);
1851 	} else {
1852 		refs = 1;
1853 		BUG_ON(refs_to_mod != -1);
1854 	}
1855 
1856 	BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1857 	refs += refs_to_mod;
1858 
1859 	if (refs > 0) {
1860 		if (type == BTRFS_EXTENT_DATA_REF_KEY)
1861 			btrfs_set_extent_data_ref_count(leaf, dref, refs);
1862 		else
1863 			btrfs_set_shared_data_ref_count(leaf, sref, refs);
1864 	} else {
1865 		*last_ref = 1;
1866 		size =  btrfs_extent_inline_ref_size(type);
1867 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1868 		ptr = (unsigned long)iref;
1869 		end = (unsigned long)ei + item_size;
1870 		if (ptr + size < end)
1871 			memmove_extent_buffer(leaf, ptr, ptr + size,
1872 					      end - ptr - size);
1873 		item_size -= size;
1874 		btrfs_truncate_item(root, path, item_size, 1);
1875 	}
1876 	btrfs_mark_buffer_dirty(leaf);
1877 }
1878 
1879 static noinline_for_stack
insert_inline_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)1880 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1881 				 struct btrfs_root *root,
1882 				 struct btrfs_path *path,
1883 				 u64 bytenr, u64 num_bytes, u64 parent,
1884 				 u64 root_objectid, u64 owner,
1885 				 u64 offset, int refs_to_add,
1886 				 struct btrfs_delayed_extent_op *extent_op)
1887 {
1888 	struct btrfs_extent_inline_ref *iref;
1889 	int ret;
1890 
1891 	ret = lookup_inline_extent_backref(trans, root, path, &iref,
1892 					   bytenr, num_bytes, parent,
1893 					   root_objectid, owner, offset, 1);
1894 	if (ret == 0) {
1895 		BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1896 		update_inline_extent_backref(root, path, iref,
1897 					     refs_to_add, extent_op, NULL);
1898 	} else if (ret == -ENOENT) {
1899 		setup_inline_extent_backref(root, path, iref, parent,
1900 					    root_objectid, owner, offset,
1901 					    refs_to_add, extent_op);
1902 		ret = 0;
1903 	}
1904 	return ret;
1905 }
1906 
insert_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 bytenr,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add)1907 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1908 				 struct btrfs_root *root,
1909 				 struct btrfs_path *path,
1910 				 u64 bytenr, u64 parent, u64 root_objectid,
1911 				 u64 owner, u64 offset, int refs_to_add)
1912 {
1913 	int ret;
1914 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1915 		BUG_ON(refs_to_add != 1);
1916 		ret = insert_tree_block_ref(trans, root, path, bytenr,
1917 					    parent, root_objectid);
1918 	} else {
1919 		ret = insert_extent_data_ref(trans, root, path, bytenr,
1920 					     parent, root_objectid,
1921 					     owner, offset, refs_to_add);
1922 	}
1923 	return ret;
1924 }
1925 
remove_extent_backref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_extent_inline_ref * iref,int refs_to_drop,int is_data,int * last_ref)1926 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1927 				 struct btrfs_root *root,
1928 				 struct btrfs_path *path,
1929 				 struct btrfs_extent_inline_ref *iref,
1930 				 int refs_to_drop, int is_data, int *last_ref)
1931 {
1932 	int ret = 0;
1933 
1934 	BUG_ON(!is_data && refs_to_drop != 1);
1935 	if (iref) {
1936 		update_inline_extent_backref(root, path, iref,
1937 					     -refs_to_drop, NULL, last_ref);
1938 	} else if (is_data) {
1939 		ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1940 					     last_ref);
1941 	} else {
1942 		*last_ref = 1;
1943 		ret = btrfs_del_item(trans, root, path);
1944 	}
1945 	return ret;
1946 }
1947 
1948 #define in_range(b, first, len)        ((b) >= (first) && (b) < (first) + (len))
btrfs_issue_discard(struct block_device * bdev,u64 start,u64 len,u64 * discarded_bytes)1949 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1950 			       u64 *discarded_bytes)
1951 {
1952 	int j, ret = 0;
1953 	u64 bytes_left, end;
1954 	u64 aligned_start = ALIGN(start, 1 << 9);
1955 
1956 	if (WARN_ON(start != aligned_start)) {
1957 		len -= aligned_start - start;
1958 		len = round_down(len, 1 << 9);
1959 		start = aligned_start;
1960 	}
1961 
1962 	*discarded_bytes = 0;
1963 
1964 	if (!len)
1965 		return 0;
1966 
1967 	end = start + len;
1968 	bytes_left = len;
1969 
1970 	/* Skip any superblocks on this device. */
1971 	for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1972 		u64 sb_start = btrfs_sb_offset(j);
1973 		u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1974 		u64 size = sb_start - start;
1975 
1976 		if (!in_range(sb_start, start, bytes_left) &&
1977 		    !in_range(sb_end, start, bytes_left) &&
1978 		    !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1979 			continue;
1980 
1981 		/*
1982 		 * Superblock spans beginning of range.  Adjust start and
1983 		 * try again.
1984 		 */
1985 		if (sb_start <= start) {
1986 			start += sb_end - start;
1987 			if (start > end) {
1988 				bytes_left = 0;
1989 				break;
1990 			}
1991 			bytes_left = end - start;
1992 			continue;
1993 		}
1994 
1995 		if (size) {
1996 			ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
1997 						   GFP_NOFS, 0);
1998 			if (!ret)
1999 				*discarded_bytes += size;
2000 			else if (ret != -EOPNOTSUPP)
2001 				return ret;
2002 		}
2003 
2004 		start = sb_end;
2005 		if (start > end) {
2006 			bytes_left = 0;
2007 			break;
2008 		}
2009 		bytes_left = end - start;
2010 	}
2011 
2012 	if (bytes_left) {
2013 		ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2014 					   GFP_NOFS, 0);
2015 		if (!ret)
2016 			*discarded_bytes += bytes_left;
2017 	}
2018 	return ret;
2019 }
2020 
btrfs_discard_extent(struct btrfs_root * root,u64 bytenr,u64 num_bytes,u64 * actual_bytes)2021 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2022 			 u64 num_bytes, u64 *actual_bytes)
2023 {
2024 	int ret;
2025 	u64 discarded_bytes = 0;
2026 	struct btrfs_bio *bbio = NULL;
2027 
2028 
2029 	/* Tell the block device(s) that the sectors can be discarded */
2030 	ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2031 			      bytenr, &num_bytes, &bbio, 0);
2032 	/* Error condition is -ENOMEM */
2033 	if (!ret) {
2034 		struct btrfs_bio_stripe *stripe = bbio->stripes;
2035 		int i;
2036 
2037 
2038 		for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2039 			u64 bytes;
2040 			if (!stripe->dev->can_discard)
2041 				continue;
2042 
2043 			ret = btrfs_issue_discard(stripe->dev->bdev,
2044 						  stripe->physical,
2045 						  stripe->length,
2046 						  &bytes);
2047 			if (!ret)
2048 				discarded_bytes += bytes;
2049 			else if (ret != -EOPNOTSUPP)
2050 				break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2051 
2052 			/*
2053 			 * Just in case we get back EOPNOTSUPP for some reason,
2054 			 * just ignore the return value so we don't screw up
2055 			 * people calling discard_extent.
2056 			 */
2057 			ret = 0;
2058 		}
2059 		btrfs_put_bbio(bbio);
2060 	}
2061 
2062 	if (actual_bytes)
2063 		*actual_bytes = discarded_bytes;
2064 
2065 
2066 	if (ret == -EOPNOTSUPP)
2067 		ret = 0;
2068 	return ret;
2069 }
2070 
2071 /* Can return -ENOMEM */
btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)2072 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2073 			 struct btrfs_root *root,
2074 			 u64 bytenr, u64 num_bytes, u64 parent,
2075 			 u64 root_objectid, u64 owner, u64 offset)
2076 {
2077 	int ret;
2078 	struct btrfs_fs_info *fs_info = root->fs_info;
2079 
2080 	BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2081 	       root_objectid == BTRFS_TREE_LOG_OBJECTID);
2082 
2083 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2084 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2085 					num_bytes,
2086 					parent, root_objectid, (int)owner,
2087 					BTRFS_ADD_DELAYED_REF, NULL);
2088 	} else {
2089 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2090 					num_bytes, parent, root_objectid,
2091 					owner, offset, 0,
2092 					BTRFS_ADD_DELAYED_REF, NULL);
2093 	}
2094 	return ret;
2095 }
2096 
__btrfs_inc_extent_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner,u64 offset,int refs_to_add,struct btrfs_delayed_extent_op * extent_op)2097 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2098 				  struct btrfs_root *root,
2099 				  struct btrfs_delayed_ref_node *node,
2100 				  u64 parent, u64 root_objectid,
2101 				  u64 owner, u64 offset, int refs_to_add,
2102 				  struct btrfs_delayed_extent_op *extent_op)
2103 {
2104 	struct btrfs_fs_info *fs_info = root->fs_info;
2105 	struct btrfs_path *path;
2106 	struct extent_buffer *leaf;
2107 	struct btrfs_extent_item *item;
2108 	struct btrfs_key key;
2109 	u64 bytenr = node->bytenr;
2110 	u64 num_bytes = node->num_bytes;
2111 	u64 refs;
2112 	int ret;
2113 
2114 	path = btrfs_alloc_path();
2115 	if (!path)
2116 		return -ENOMEM;
2117 
2118 	path->reada = 1;
2119 	path->leave_spinning = 1;
2120 	/* this will setup the path even if it fails to insert the back ref */
2121 	ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2122 					   bytenr, num_bytes, parent,
2123 					   root_objectid, owner, offset,
2124 					   refs_to_add, extent_op);
2125 	if ((ret < 0 && ret != -EAGAIN) || !ret)
2126 		goto out;
2127 
2128 	/*
2129 	 * Ok we had -EAGAIN which means we didn't have space to insert and
2130 	 * inline extent ref, so just update the reference count and add a
2131 	 * normal backref.
2132 	 */
2133 	leaf = path->nodes[0];
2134 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2135 	item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2136 	refs = btrfs_extent_refs(leaf, item);
2137 	btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2138 	if (extent_op)
2139 		__run_delayed_extent_op(extent_op, leaf, item);
2140 
2141 	btrfs_mark_buffer_dirty(leaf);
2142 	btrfs_release_path(path);
2143 
2144 	path->reada = 1;
2145 	path->leave_spinning = 1;
2146 	/* now insert the actual backref */
2147 	ret = insert_extent_backref(trans, root->fs_info->extent_root,
2148 				    path, bytenr, parent, root_objectid,
2149 				    owner, offset, refs_to_add);
2150 	if (ret)
2151 		btrfs_abort_transaction(trans, root, ret);
2152 out:
2153 	btrfs_free_path(path);
2154 	return ret;
2155 }
2156 
run_delayed_data_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)2157 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2158 				struct btrfs_root *root,
2159 				struct btrfs_delayed_ref_node *node,
2160 				struct btrfs_delayed_extent_op *extent_op,
2161 				int insert_reserved)
2162 {
2163 	int ret = 0;
2164 	struct btrfs_delayed_data_ref *ref;
2165 	struct btrfs_key ins;
2166 	u64 parent = 0;
2167 	u64 ref_root = 0;
2168 	u64 flags = 0;
2169 
2170 	ins.objectid = node->bytenr;
2171 	ins.offset = node->num_bytes;
2172 	ins.type = BTRFS_EXTENT_ITEM_KEY;
2173 
2174 	ref = btrfs_delayed_node_to_data_ref(node);
2175 	trace_run_delayed_data_ref(node, ref, node->action);
2176 
2177 	if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2178 		parent = ref->parent;
2179 	ref_root = ref->root;
2180 
2181 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2182 		if (extent_op)
2183 			flags |= extent_op->flags_to_set;
2184 		ret = alloc_reserved_file_extent(trans, root,
2185 						 parent, ref_root, flags,
2186 						 ref->objectid, ref->offset,
2187 						 &ins, node->ref_mod);
2188 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2189 		ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2190 					     ref_root, ref->objectid,
2191 					     ref->offset, node->ref_mod,
2192 					     extent_op);
2193 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2194 		ret = __btrfs_free_extent(trans, root, node, parent,
2195 					  ref_root, ref->objectid,
2196 					  ref->offset, node->ref_mod,
2197 					  extent_op);
2198 	} else {
2199 		BUG();
2200 	}
2201 	return ret;
2202 }
2203 
__run_delayed_extent_op(struct btrfs_delayed_extent_op * extent_op,struct extent_buffer * leaf,struct btrfs_extent_item * ei)2204 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2205 				    struct extent_buffer *leaf,
2206 				    struct btrfs_extent_item *ei)
2207 {
2208 	u64 flags = btrfs_extent_flags(leaf, ei);
2209 	if (extent_op->update_flags) {
2210 		flags |= extent_op->flags_to_set;
2211 		btrfs_set_extent_flags(leaf, ei, flags);
2212 	}
2213 
2214 	if (extent_op->update_key) {
2215 		struct btrfs_tree_block_info *bi;
2216 		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2217 		bi = (struct btrfs_tree_block_info *)(ei + 1);
2218 		btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2219 	}
2220 }
2221 
run_delayed_extent_op(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op)2222 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2223 				 struct btrfs_root *root,
2224 				 struct btrfs_delayed_ref_node *node,
2225 				 struct btrfs_delayed_extent_op *extent_op)
2226 {
2227 	struct btrfs_key key;
2228 	struct btrfs_path *path;
2229 	struct btrfs_extent_item *ei;
2230 	struct extent_buffer *leaf;
2231 	u32 item_size;
2232 	int ret;
2233 	int err = 0;
2234 	int metadata = !extent_op->is_data;
2235 
2236 	if (trans->aborted)
2237 		return 0;
2238 
2239 	if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2240 		metadata = 0;
2241 
2242 	path = btrfs_alloc_path();
2243 	if (!path)
2244 		return -ENOMEM;
2245 
2246 	key.objectid = node->bytenr;
2247 
2248 	if (metadata) {
2249 		key.type = BTRFS_METADATA_ITEM_KEY;
2250 		key.offset = extent_op->level;
2251 	} else {
2252 		key.type = BTRFS_EXTENT_ITEM_KEY;
2253 		key.offset = node->num_bytes;
2254 	}
2255 
2256 again:
2257 	path->reada = 1;
2258 	path->leave_spinning = 1;
2259 	ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2260 				path, 0, 1);
2261 	if (ret < 0) {
2262 		err = ret;
2263 		goto out;
2264 	}
2265 	if (ret > 0) {
2266 		if (metadata) {
2267 			if (path->slots[0] > 0) {
2268 				path->slots[0]--;
2269 				btrfs_item_key_to_cpu(path->nodes[0], &key,
2270 						      path->slots[0]);
2271 				if (key.objectid == node->bytenr &&
2272 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
2273 				    key.offset == node->num_bytes)
2274 					ret = 0;
2275 			}
2276 			if (ret > 0) {
2277 				btrfs_release_path(path);
2278 				metadata = 0;
2279 
2280 				key.objectid = node->bytenr;
2281 				key.offset = node->num_bytes;
2282 				key.type = BTRFS_EXTENT_ITEM_KEY;
2283 				goto again;
2284 			}
2285 		} else {
2286 			err = -EIO;
2287 			goto out;
2288 		}
2289 	}
2290 
2291 	leaf = path->nodes[0];
2292 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2293 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2294 	if (item_size < sizeof(*ei)) {
2295 		ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2296 					     path, (u64)-1, 0);
2297 		if (ret < 0) {
2298 			err = ret;
2299 			goto out;
2300 		}
2301 		leaf = path->nodes[0];
2302 		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2303 	}
2304 #endif
2305 	BUG_ON(item_size < sizeof(*ei));
2306 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2307 	__run_delayed_extent_op(extent_op, leaf, ei);
2308 
2309 	btrfs_mark_buffer_dirty(leaf);
2310 out:
2311 	btrfs_free_path(path);
2312 	return err;
2313 }
2314 
run_delayed_tree_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)2315 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2316 				struct btrfs_root *root,
2317 				struct btrfs_delayed_ref_node *node,
2318 				struct btrfs_delayed_extent_op *extent_op,
2319 				int insert_reserved)
2320 {
2321 	int ret = 0;
2322 	struct btrfs_delayed_tree_ref *ref;
2323 	struct btrfs_key ins;
2324 	u64 parent = 0;
2325 	u64 ref_root = 0;
2326 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2327 						 SKINNY_METADATA);
2328 
2329 	ref = btrfs_delayed_node_to_tree_ref(node);
2330 	trace_run_delayed_tree_ref(node, ref, node->action);
2331 
2332 	if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2333 		parent = ref->parent;
2334 	ref_root = ref->root;
2335 
2336 	ins.objectid = node->bytenr;
2337 	if (skinny_metadata) {
2338 		ins.offset = ref->level;
2339 		ins.type = BTRFS_METADATA_ITEM_KEY;
2340 	} else {
2341 		ins.offset = node->num_bytes;
2342 		ins.type = BTRFS_EXTENT_ITEM_KEY;
2343 	}
2344 
2345 	if (node->ref_mod != 1) {
2346 		btrfs_err(root->fs_info,
2347 	"btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
2348 			  node->bytenr, node->ref_mod, node->action, ref_root,
2349 			  parent);
2350 		return -EIO;
2351 	}
2352 	if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2353 		BUG_ON(!extent_op || !extent_op->update_flags);
2354 		ret = alloc_reserved_tree_block(trans, root,
2355 						parent, ref_root,
2356 						extent_op->flags_to_set,
2357 						&extent_op->key,
2358 						ref->level, &ins);
2359 	} else if (node->action == BTRFS_ADD_DELAYED_REF) {
2360 		ret = __btrfs_inc_extent_ref(trans, root, node,
2361 					     parent, ref_root,
2362 					     ref->level, 0, 1,
2363 					     extent_op);
2364 	} else if (node->action == BTRFS_DROP_DELAYED_REF) {
2365 		ret = __btrfs_free_extent(trans, root, node,
2366 					  parent, ref_root,
2367 					  ref->level, 0, 1, extent_op);
2368 	} else {
2369 		BUG();
2370 	}
2371 	return ret;
2372 }
2373 
2374 /* helper function to actually process a single delayed ref entry */
run_one_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,struct btrfs_delayed_extent_op * extent_op,int insert_reserved)2375 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2376 			       struct btrfs_root *root,
2377 			       struct btrfs_delayed_ref_node *node,
2378 			       struct btrfs_delayed_extent_op *extent_op,
2379 			       int insert_reserved)
2380 {
2381 	int ret = 0;
2382 
2383 	if (trans->aborted) {
2384 		if (insert_reserved)
2385 			btrfs_pin_extent(root, node->bytenr,
2386 					 node->num_bytes, 1);
2387 		return 0;
2388 	}
2389 
2390 	if (btrfs_delayed_ref_is_head(node)) {
2391 		struct btrfs_delayed_ref_head *head;
2392 		/*
2393 		 * we've hit the end of the chain and we were supposed
2394 		 * to insert this extent into the tree.  But, it got
2395 		 * deleted before we ever needed to insert it, so all
2396 		 * we have to do is clean up the accounting
2397 		 */
2398 		BUG_ON(extent_op);
2399 		head = btrfs_delayed_node_to_head(node);
2400 		trace_run_delayed_ref_head(node, head, node->action);
2401 
2402 		if (insert_reserved) {
2403 			btrfs_pin_extent(root, node->bytenr,
2404 					 node->num_bytes, 1);
2405 			if (head->is_data) {
2406 				ret = btrfs_del_csums(trans, root,
2407 						      node->bytenr,
2408 						      node->num_bytes);
2409 			}
2410 		}
2411 
2412 		/* Also free its reserved qgroup space */
2413 		btrfs_qgroup_free_delayed_ref(root->fs_info,
2414 					      head->qgroup_ref_root,
2415 					      head->qgroup_reserved);
2416 		return ret;
2417 	}
2418 
2419 	if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2420 	    node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2421 		ret = run_delayed_tree_ref(trans, root, node, extent_op,
2422 					   insert_reserved);
2423 	else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2424 		 node->type == BTRFS_SHARED_DATA_REF_KEY)
2425 		ret = run_delayed_data_ref(trans, root, node, extent_op,
2426 					   insert_reserved);
2427 	else
2428 		BUG();
2429 	return ret;
2430 }
2431 
2432 static inline struct btrfs_delayed_ref_node *
select_delayed_ref(struct btrfs_delayed_ref_head * head)2433 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2434 {
2435 	struct btrfs_delayed_ref_node *ref;
2436 
2437 	if (list_empty(&head->ref_list))
2438 		return NULL;
2439 
2440 	/*
2441 	 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2442 	 * This is to prevent a ref count from going down to zero, which deletes
2443 	 * the extent item from the extent tree, when there still are references
2444 	 * to add, which would fail because they would not find the extent item.
2445 	 */
2446 	list_for_each_entry(ref, &head->ref_list, list) {
2447 		if (ref->action == BTRFS_ADD_DELAYED_REF)
2448 			return ref;
2449 	}
2450 
2451 	return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2452 			  list);
2453 }
2454 
2455 /*
2456  * Returns 0 on success or if called with an already aborted transaction.
2457  * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2458  */
__btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root,unsigned long nr)2459 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2460 					     struct btrfs_root *root,
2461 					     unsigned long nr)
2462 {
2463 	struct btrfs_delayed_ref_root *delayed_refs;
2464 	struct btrfs_delayed_ref_node *ref;
2465 	struct btrfs_delayed_ref_head *locked_ref = NULL;
2466 	struct btrfs_delayed_extent_op *extent_op;
2467 	struct btrfs_fs_info *fs_info = root->fs_info;
2468 	ktime_t start = ktime_get();
2469 	int ret;
2470 	unsigned long count = 0;
2471 	unsigned long actual_count = 0;
2472 	int must_insert_reserved = 0;
2473 
2474 	delayed_refs = &trans->transaction->delayed_refs;
2475 	while (1) {
2476 		if (!locked_ref) {
2477 			if (count >= nr)
2478 				break;
2479 
2480 			spin_lock(&delayed_refs->lock);
2481 			locked_ref = btrfs_select_ref_head(trans);
2482 			if (!locked_ref) {
2483 				spin_unlock(&delayed_refs->lock);
2484 				break;
2485 			}
2486 
2487 			/* grab the lock that says we are going to process
2488 			 * all the refs for this head */
2489 			ret = btrfs_delayed_ref_lock(trans, locked_ref);
2490 			spin_unlock(&delayed_refs->lock);
2491 			/*
2492 			 * we may have dropped the spin lock to get the head
2493 			 * mutex lock, and that might have given someone else
2494 			 * time to free the head.  If that's true, it has been
2495 			 * removed from our list and we can move on.
2496 			 */
2497 			if (ret == -EAGAIN) {
2498 				locked_ref = NULL;
2499 				count++;
2500 				continue;
2501 			}
2502 		}
2503 
2504 		/*
2505 		 * We need to try and merge add/drops of the same ref since we
2506 		 * can run into issues with relocate dropping the implicit ref
2507 		 * and then it being added back again before the drop can
2508 		 * finish.  If we merged anything we need to re-loop so we can
2509 		 * get a good ref.
2510 		 * Or we can get node references of the same type that weren't
2511 		 * merged when created due to bumps in the tree mod seq, and
2512 		 * we need to merge them to prevent adding an inline extent
2513 		 * backref before dropping it (triggering a BUG_ON at
2514 		 * insert_inline_extent_backref()).
2515 		 */
2516 		spin_lock(&locked_ref->lock);
2517 		btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2518 					 locked_ref);
2519 
2520 		/*
2521 		 * locked_ref is the head node, so we have to go one
2522 		 * node back for any delayed ref updates
2523 		 */
2524 		ref = select_delayed_ref(locked_ref);
2525 
2526 		if (ref && ref->seq &&
2527 		    btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2528 			spin_unlock(&locked_ref->lock);
2529 			spin_lock(&delayed_refs->lock);
2530 			locked_ref->processing = 0;
2531 			delayed_refs->num_heads_ready++;
2532 			spin_unlock(&delayed_refs->lock);
2533 			btrfs_delayed_ref_unlock(locked_ref);
2534 			locked_ref = NULL;
2535 			cond_resched();
2536 			count++;
2537 			continue;
2538 		}
2539 
2540 		/*
2541 		 * record the must insert reserved flag before we
2542 		 * drop the spin lock.
2543 		 */
2544 		must_insert_reserved = locked_ref->must_insert_reserved;
2545 		locked_ref->must_insert_reserved = 0;
2546 
2547 		extent_op = locked_ref->extent_op;
2548 		locked_ref->extent_op = NULL;
2549 
2550 		if (!ref) {
2551 
2552 
2553 			/* All delayed refs have been processed, Go ahead
2554 			 * and send the head node to run_one_delayed_ref,
2555 			 * so that any accounting fixes can happen
2556 			 */
2557 			ref = &locked_ref->node;
2558 
2559 			if (extent_op && must_insert_reserved) {
2560 				btrfs_free_delayed_extent_op(extent_op);
2561 				extent_op = NULL;
2562 			}
2563 
2564 			if (extent_op) {
2565 				spin_unlock(&locked_ref->lock);
2566 				ret = run_delayed_extent_op(trans, root,
2567 							    ref, extent_op);
2568 				btrfs_free_delayed_extent_op(extent_op);
2569 
2570 				if (ret) {
2571 					/*
2572 					 * Need to reset must_insert_reserved if
2573 					 * there was an error so the abort stuff
2574 					 * can cleanup the reserved space
2575 					 * properly.
2576 					 */
2577 					if (must_insert_reserved)
2578 						locked_ref->must_insert_reserved = 1;
2579 					spin_lock(&delayed_refs->lock);
2580 					locked_ref->processing = 0;
2581 					delayed_refs->num_heads_ready++;
2582 					spin_unlock(&delayed_refs->lock);
2583 					btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2584 					btrfs_delayed_ref_unlock(locked_ref);
2585 					return ret;
2586 				}
2587 				continue;
2588 			}
2589 
2590 			/*
2591 			 * Need to drop our head ref lock and re-aqcuire the
2592 			 * delayed ref lock and then re-check to make sure
2593 			 * nobody got added.
2594 			 */
2595 			spin_unlock(&locked_ref->lock);
2596 			spin_lock(&delayed_refs->lock);
2597 			spin_lock(&locked_ref->lock);
2598 			if (!list_empty(&locked_ref->ref_list) ||
2599 			    locked_ref->extent_op) {
2600 				spin_unlock(&locked_ref->lock);
2601 				spin_unlock(&delayed_refs->lock);
2602 				continue;
2603 			}
2604 			ref->in_tree = 0;
2605 			delayed_refs->num_heads--;
2606 			rb_erase(&locked_ref->href_node,
2607 				 &delayed_refs->href_root);
2608 			spin_unlock(&delayed_refs->lock);
2609 		} else {
2610 			actual_count++;
2611 			ref->in_tree = 0;
2612 			list_del(&ref->list);
2613 		}
2614 		atomic_dec(&delayed_refs->num_entries);
2615 
2616 		if (!btrfs_delayed_ref_is_head(ref)) {
2617 			/*
2618 			 * when we play the delayed ref, also correct the
2619 			 * ref_mod on head
2620 			 */
2621 			switch (ref->action) {
2622 			case BTRFS_ADD_DELAYED_REF:
2623 			case BTRFS_ADD_DELAYED_EXTENT:
2624 				locked_ref->node.ref_mod -= ref->ref_mod;
2625 				break;
2626 			case BTRFS_DROP_DELAYED_REF:
2627 				locked_ref->node.ref_mod += ref->ref_mod;
2628 				break;
2629 			default:
2630 				WARN_ON(1);
2631 			}
2632 		}
2633 		spin_unlock(&locked_ref->lock);
2634 
2635 		ret = run_one_delayed_ref(trans, root, ref, extent_op,
2636 					  must_insert_reserved);
2637 
2638 		btrfs_free_delayed_extent_op(extent_op);
2639 		if (ret) {
2640 			locked_ref->processing = 0;
2641 			btrfs_delayed_ref_unlock(locked_ref);
2642 			btrfs_put_delayed_ref(ref);
2643 			btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2644 			return ret;
2645 		}
2646 
2647 		/*
2648 		 * If this node is a head, that means all the refs in this head
2649 		 * have been dealt with, and we will pick the next head to deal
2650 		 * with, so we must unlock the head and drop it from the cluster
2651 		 * list before we release it.
2652 		 */
2653 		if (btrfs_delayed_ref_is_head(ref)) {
2654 			if (locked_ref->is_data &&
2655 			    locked_ref->total_ref_mod < 0) {
2656 				spin_lock(&delayed_refs->lock);
2657 				delayed_refs->pending_csums -= ref->num_bytes;
2658 				spin_unlock(&delayed_refs->lock);
2659 			}
2660 			btrfs_delayed_ref_unlock(locked_ref);
2661 			locked_ref = NULL;
2662 		}
2663 		btrfs_put_delayed_ref(ref);
2664 		count++;
2665 		cond_resched();
2666 	}
2667 
2668 	/*
2669 	 * We don't want to include ref heads since we can have empty ref heads
2670 	 * and those will drastically skew our runtime down since we just do
2671 	 * accounting, no actual extent tree updates.
2672 	 */
2673 	if (actual_count > 0) {
2674 		u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2675 		u64 avg;
2676 
2677 		/*
2678 		 * We weigh the current average higher than our current runtime
2679 		 * to avoid large swings in the average.
2680 		 */
2681 		spin_lock(&delayed_refs->lock);
2682 		avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2683 		fs_info->avg_delayed_ref_runtime = avg >> 2;	/* div by 4 */
2684 		spin_unlock(&delayed_refs->lock);
2685 	}
2686 	return 0;
2687 }
2688 
2689 #ifdef SCRAMBLE_DELAYED_REFS
2690 /*
2691  * Normally delayed refs get processed in ascending bytenr order. This
2692  * correlates in most cases to the order added. To expose dependencies on this
2693  * order, we start to process the tree in the middle instead of the beginning
2694  */
find_middle(struct rb_root * root)2695 static u64 find_middle(struct rb_root *root)
2696 {
2697 	struct rb_node *n = root->rb_node;
2698 	struct btrfs_delayed_ref_node *entry;
2699 	int alt = 1;
2700 	u64 middle;
2701 	u64 first = 0, last = 0;
2702 
2703 	n = rb_first(root);
2704 	if (n) {
2705 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2706 		first = entry->bytenr;
2707 	}
2708 	n = rb_last(root);
2709 	if (n) {
2710 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2711 		last = entry->bytenr;
2712 	}
2713 	n = root->rb_node;
2714 
2715 	while (n) {
2716 		entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2717 		WARN_ON(!entry->in_tree);
2718 
2719 		middle = entry->bytenr;
2720 
2721 		if (alt)
2722 			n = n->rb_left;
2723 		else
2724 			n = n->rb_right;
2725 
2726 		alt = 1 - alt;
2727 	}
2728 	return middle;
2729 }
2730 #endif
2731 
heads_to_leaves(struct btrfs_root * root,u64 heads)2732 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2733 {
2734 	u64 num_bytes;
2735 
2736 	num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2737 			     sizeof(struct btrfs_extent_inline_ref));
2738 	if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2739 		num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2740 
2741 	/*
2742 	 * We don't ever fill up leaves all the way so multiply by 2 just to be
2743 	 * closer to what we're really going to want to ouse.
2744 	 */
2745 	return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2746 }
2747 
2748 /*
2749  * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2750  * would require to store the csums for that many bytes.
2751  */
btrfs_csum_bytes_to_leaves(struct btrfs_root * root,u64 csum_bytes)2752 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2753 {
2754 	u64 csum_size;
2755 	u64 num_csums_per_leaf;
2756 	u64 num_csums;
2757 
2758 	csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2759 	num_csums_per_leaf = div64_u64(csum_size,
2760 			(u64)btrfs_super_csum_size(root->fs_info->super_copy));
2761 	num_csums = div64_u64(csum_bytes, root->sectorsize);
2762 	num_csums += num_csums_per_leaf - 1;
2763 	num_csums = div64_u64(num_csums, num_csums_per_leaf);
2764 	return num_csums;
2765 }
2766 
btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root)2767 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2768 				       struct btrfs_root *root)
2769 {
2770 	struct btrfs_block_rsv *global_rsv;
2771 	u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2772 	u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2773 	u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2774 	u64 num_bytes, num_dirty_bgs_bytes;
2775 	int ret = 0;
2776 
2777 	num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2778 	num_heads = heads_to_leaves(root, num_heads);
2779 	if (num_heads > 1)
2780 		num_bytes += (num_heads - 1) * root->nodesize;
2781 	num_bytes <<= 1;
2782 	num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2783 	num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2784 							     num_dirty_bgs);
2785 	global_rsv = &root->fs_info->global_block_rsv;
2786 
2787 	/*
2788 	 * If we can't allocate any more chunks lets make sure we have _lots_ of
2789 	 * wiggle room since running delayed refs can create more delayed refs.
2790 	 */
2791 	if (global_rsv->space_info->full) {
2792 		num_dirty_bgs_bytes <<= 1;
2793 		num_bytes <<= 1;
2794 	}
2795 
2796 	spin_lock(&global_rsv->lock);
2797 	if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2798 		ret = 1;
2799 	spin_unlock(&global_rsv->lock);
2800 	return ret;
2801 }
2802 
btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root)2803 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2804 				       struct btrfs_root *root)
2805 {
2806 	struct btrfs_fs_info *fs_info = root->fs_info;
2807 	u64 num_entries =
2808 		atomic_read(&trans->transaction->delayed_refs.num_entries);
2809 	u64 avg_runtime;
2810 	u64 val;
2811 
2812 	smp_mb();
2813 	avg_runtime = fs_info->avg_delayed_ref_runtime;
2814 	val = num_entries * avg_runtime;
2815 	if (num_entries * avg_runtime >= NSEC_PER_SEC)
2816 		return 1;
2817 	if (val >= NSEC_PER_SEC / 2)
2818 		return 2;
2819 
2820 	return btrfs_check_space_for_delayed_refs(trans, root);
2821 }
2822 
2823 struct async_delayed_refs {
2824 	struct btrfs_root *root;
2825 	int count;
2826 	int error;
2827 	int sync;
2828 	struct completion wait;
2829 	struct btrfs_work work;
2830 };
2831 
delayed_ref_async_start(struct btrfs_work * work)2832 static void delayed_ref_async_start(struct btrfs_work *work)
2833 {
2834 	struct async_delayed_refs *async;
2835 	struct btrfs_trans_handle *trans;
2836 	int ret;
2837 
2838 	async = container_of(work, struct async_delayed_refs, work);
2839 
2840 	trans = btrfs_join_transaction(async->root);
2841 	if (IS_ERR(trans)) {
2842 		async->error = PTR_ERR(trans);
2843 		goto done;
2844 	}
2845 
2846 	/*
2847 	 * trans->sync means that when we call end_transaciton, we won't
2848 	 * wait on delayed refs
2849 	 */
2850 	trans->sync = true;
2851 	ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2852 	if (ret)
2853 		async->error = ret;
2854 
2855 	ret = btrfs_end_transaction(trans, async->root);
2856 	if (ret && !async->error)
2857 		async->error = ret;
2858 done:
2859 	if (async->sync)
2860 		complete(&async->wait);
2861 	else
2862 		kfree(async);
2863 }
2864 
btrfs_async_run_delayed_refs(struct btrfs_root * root,unsigned long count,int wait)2865 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2866 				 unsigned long count, int wait)
2867 {
2868 	struct async_delayed_refs *async;
2869 	int ret;
2870 
2871 	async = kmalloc(sizeof(*async), GFP_NOFS);
2872 	if (!async)
2873 		return -ENOMEM;
2874 
2875 	async->root = root->fs_info->tree_root;
2876 	async->count = count;
2877 	async->error = 0;
2878 	if (wait)
2879 		async->sync = 1;
2880 	else
2881 		async->sync = 0;
2882 	init_completion(&async->wait);
2883 
2884 	btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2885 			delayed_ref_async_start, NULL, NULL);
2886 
2887 	btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2888 
2889 	if (wait) {
2890 		wait_for_completion(&async->wait);
2891 		ret = async->error;
2892 		kfree(async);
2893 		return ret;
2894 	}
2895 	return 0;
2896 }
2897 
2898 /*
2899  * this starts processing the delayed reference count updates and
2900  * extent insertions we have queued up so far.  count can be
2901  * 0, which means to process everything in the tree at the start
2902  * of the run (but not newly added entries), or it can be some target
2903  * number you'd like to process.
2904  *
2905  * Returns 0 on success or if called with an aborted transaction
2906  * Returns <0 on error and aborts the transaction
2907  */
btrfs_run_delayed_refs(struct btrfs_trans_handle * trans,struct btrfs_root * root,unsigned long count)2908 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2909 			   struct btrfs_root *root, unsigned long count)
2910 {
2911 	struct rb_node *node;
2912 	struct btrfs_delayed_ref_root *delayed_refs;
2913 	struct btrfs_delayed_ref_head *head;
2914 	int ret;
2915 	int run_all = count == (unsigned long)-1;
2916 	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2917 
2918 	/* We'll clean this up in btrfs_cleanup_transaction */
2919 	if (trans->aborted)
2920 		return 0;
2921 
2922 	if (root == root->fs_info->extent_root)
2923 		root = root->fs_info->tree_root;
2924 
2925 	delayed_refs = &trans->transaction->delayed_refs;
2926 	if (count == 0)
2927 		count = atomic_read(&delayed_refs->num_entries) * 2;
2928 
2929 again:
2930 #ifdef SCRAMBLE_DELAYED_REFS
2931 	delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2932 #endif
2933 	trans->can_flush_pending_bgs = false;
2934 	ret = __btrfs_run_delayed_refs(trans, root, count);
2935 	if (ret < 0) {
2936 		btrfs_abort_transaction(trans, root, ret);
2937 		return ret;
2938 	}
2939 
2940 	if (run_all) {
2941 		if (!list_empty(&trans->new_bgs))
2942 			btrfs_create_pending_block_groups(trans, root);
2943 
2944 		spin_lock(&delayed_refs->lock);
2945 		node = rb_first(&delayed_refs->href_root);
2946 		if (!node) {
2947 			spin_unlock(&delayed_refs->lock);
2948 			goto out;
2949 		}
2950 		count = (unsigned long)-1;
2951 
2952 		while (node) {
2953 			head = rb_entry(node, struct btrfs_delayed_ref_head,
2954 					href_node);
2955 			if (btrfs_delayed_ref_is_head(&head->node)) {
2956 				struct btrfs_delayed_ref_node *ref;
2957 
2958 				ref = &head->node;
2959 				atomic_inc(&ref->refs);
2960 
2961 				spin_unlock(&delayed_refs->lock);
2962 				/*
2963 				 * Mutex was contended, block until it's
2964 				 * released and try again
2965 				 */
2966 				mutex_lock(&head->mutex);
2967 				mutex_unlock(&head->mutex);
2968 
2969 				btrfs_put_delayed_ref(ref);
2970 				cond_resched();
2971 				goto again;
2972 			} else {
2973 				WARN_ON(1);
2974 			}
2975 			node = rb_next(node);
2976 		}
2977 		spin_unlock(&delayed_refs->lock);
2978 		cond_resched();
2979 		goto again;
2980 	}
2981 out:
2982 	assert_qgroups_uptodate(trans);
2983 	trans->can_flush_pending_bgs = can_flush_pending_bgs;
2984 	return 0;
2985 }
2986 
btrfs_set_disk_extent_flags(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 num_bytes,u64 flags,int level,int is_data)2987 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2988 				struct btrfs_root *root,
2989 				u64 bytenr, u64 num_bytes, u64 flags,
2990 				int level, int is_data)
2991 {
2992 	struct btrfs_delayed_extent_op *extent_op;
2993 	int ret;
2994 
2995 	extent_op = btrfs_alloc_delayed_extent_op();
2996 	if (!extent_op)
2997 		return -ENOMEM;
2998 
2999 	extent_op->flags_to_set = flags;
3000 	extent_op->update_flags = 1;
3001 	extent_op->update_key = 0;
3002 	extent_op->is_data = is_data ? 1 : 0;
3003 	extent_op->level = level;
3004 
3005 	ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3006 					  num_bytes, extent_op);
3007 	if (ret)
3008 		btrfs_free_delayed_extent_op(extent_op);
3009 	return ret;
3010 }
3011 
check_delayed_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr)3012 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3013 				      struct btrfs_root *root,
3014 				      struct btrfs_path *path,
3015 				      u64 objectid, u64 offset, u64 bytenr)
3016 {
3017 	struct btrfs_delayed_ref_head *head;
3018 	struct btrfs_delayed_ref_node *ref;
3019 	struct btrfs_delayed_data_ref *data_ref;
3020 	struct btrfs_delayed_ref_root *delayed_refs;
3021 	int ret = 0;
3022 
3023 	delayed_refs = &trans->transaction->delayed_refs;
3024 	spin_lock(&delayed_refs->lock);
3025 	head = btrfs_find_delayed_ref_head(trans, bytenr);
3026 	if (!head) {
3027 		spin_unlock(&delayed_refs->lock);
3028 		return 0;
3029 	}
3030 
3031 	if (!mutex_trylock(&head->mutex)) {
3032 		atomic_inc(&head->node.refs);
3033 		spin_unlock(&delayed_refs->lock);
3034 
3035 		btrfs_release_path(path);
3036 
3037 		/*
3038 		 * Mutex was contended, block until it's released and let
3039 		 * caller try again
3040 		 */
3041 		mutex_lock(&head->mutex);
3042 		mutex_unlock(&head->mutex);
3043 		btrfs_put_delayed_ref(&head->node);
3044 		return -EAGAIN;
3045 	}
3046 	spin_unlock(&delayed_refs->lock);
3047 
3048 	spin_lock(&head->lock);
3049 	list_for_each_entry(ref, &head->ref_list, list) {
3050 		/* If it's a shared ref we know a cross reference exists */
3051 		if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3052 			ret = 1;
3053 			break;
3054 		}
3055 
3056 		data_ref = btrfs_delayed_node_to_data_ref(ref);
3057 
3058 		/*
3059 		 * If our ref doesn't match the one we're currently looking at
3060 		 * then we have a cross reference.
3061 		 */
3062 		if (data_ref->root != root->root_key.objectid ||
3063 		    data_ref->objectid != objectid ||
3064 		    data_ref->offset != offset) {
3065 			ret = 1;
3066 			break;
3067 		}
3068 	}
3069 	spin_unlock(&head->lock);
3070 	mutex_unlock(&head->mutex);
3071 	return ret;
3072 }
3073 
check_committed_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,u64 objectid,u64 offset,u64 bytenr)3074 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3075 					struct btrfs_root *root,
3076 					struct btrfs_path *path,
3077 					u64 objectid, u64 offset, u64 bytenr)
3078 {
3079 	struct btrfs_root *extent_root = root->fs_info->extent_root;
3080 	struct extent_buffer *leaf;
3081 	struct btrfs_extent_data_ref *ref;
3082 	struct btrfs_extent_inline_ref *iref;
3083 	struct btrfs_extent_item *ei;
3084 	struct btrfs_key key;
3085 	u32 item_size;
3086 	int ret;
3087 
3088 	key.objectid = bytenr;
3089 	key.offset = (u64)-1;
3090 	key.type = BTRFS_EXTENT_ITEM_KEY;
3091 
3092 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3093 	if (ret < 0)
3094 		goto out;
3095 	BUG_ON(ret == 0); /* Corruption */
3096 
3097 	ret = -ENOENT;
3098 	if (path->slots[0] == 0)
3099 		goto out;
3100 
3101 	path->slots[0]--;
3102 	leaf = path->nodes[0];
3103 	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3104 
3105 	if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3106 		goto out;
3107 
3108 	ret = 1;
3109 	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3110 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3111 	if (item_size < sizeof(*ei)) {
3112 		WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3113 		goto out;
3114 	}
3115 #endif
3116 	ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3117 
3118 	if (item_size != sizeof(*ei) +
3119 	    btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3120 		goto out;
3121 
3122 	if (btrfs_extent_generation(leaf, ei) <=
3123 	    btrfs_root_last_snapshot(&root->root_item))
3124 		goto out;
3125 
3126 	iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3127 	if (btrfs_extent_inline_ref_type(leaf, iref) !=
3128 	    BTRFS_EXTENT_DATA_REF_KEY)
3129 		goto out;
3130 
3131 	ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3132 	if (btrfs_extent_refs(leaf, ei) !=
3133 	    btrfs_extent_data_ref_count(leaf, ref) ||
3134 	    btrfs_extent_data_ref_root(leaf, ref) !=
3135 	    root->root_key.objectid ||
3136 	    btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3137 	    btrfs_extent_data_ref_offset(leaf, ref) != offset)
3138 		goto out;
3139 
3140 	ret = 0;
3141 out:
3142 	return ret;
3143 }
3144 
btrfs_cross_ref_exist(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 objectid,u64 offset,u64 bytenr)3145 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3146 			  struct btrfs_root *root,
3147 			  u64 objectid, u64 offset, u64 bytenr)
3148 {
3149 	struct btrfs_path *path;
3150 	int ret;
3151 	int ret2;
3152 
3153 	path = btrfs_alloc_path();
3154 	if (!path)
3155 		return -ENOENT;
3156 
3157 	do {
3158 		ret = check_committed_ref(trans, root, path, objectid,
3159 					  offset, bytenr);
3160 		if (ret && ret != -ENOENT)
3161 			goto out;
3162 
3163 		ret2 = check_delayed_ref(trans, root, path, objectid,
3164 					 offset, bytenr);
3165 	} while (ret2 == -EAGAIN);
3166 
3167 	if (ret2 && ret2 != -ENOENT) {
3168 		ret = ret2;
3169 		goto out;
3170 	}
3171 
3172 	if (ret != -ENOENT || ret2 != -ENOENT)
3173 		ret = 0;
3174 out:
3175 	btrfs_free_path(path);
3176 	if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3177 		WARN_ON(ret > 0);
3178 	return ret;
3179 }
3180 
__btrfs_mod_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref,int inc)3181 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3182 			   struct btrfs_root *root,
3183 			   struct extent_buffer *buf,
3184 			   int full_backref, int inc)
3185 {
3186 	u64 bytenr;
3187 	u64 num_bytes;
3188 	u64 parent;
3189 	u64 ref_root;
3190 	u32 nritems;
3191 	struct btrfs_key key;
3192 	struct btrfs_file_extent_item *fi;
3193 	int i;
3194 	int level;
3195 	int ret = 0;
3196 	int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3197 			    u64, u64, u64, u64, u64, u64);
3198 
3199 
3200 	if (btrfs_test_is_dummy_root(root))
3201 		return 0;
3202 
3203 	ref_root = btrfs_header_owner(buf);
3204 	nritems = btrfs_header_nritems(buf);
3205 	level = btrfs_header_level(buf);
3206 
3207 	if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3208 		return 0;
3209 
3210 	if (inc)
3211 		process_func = btrfs_inc_extent_ref;
3212 	else
3213 		process_func = btrfs_free_extent;
3214 
3215 	if (full_backref)
3216 		parent = buf->start;
3217 	else
3218 		parent = 0;
3219 
3220 	for (i = 0; i < nritems; i++) {
3221 		if (level == 0) {
3222 			btrfs_item_key_to_cpu(buf, &key, i);
3223 			if (key.type != BTRFS_EXTENT_DATA_KEY)
3224 				continue;
3225 			fi = btrfs_item_ptr(buf, i,
3226 					    struct btrfs_file_extent_item);
3227 			if (btrfs_file_extent_type(buf, fi) ==
3228 			    BTRFS_FILE_EXTENT_INLINE)
3229 				continue;
3230 			bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3231 			if (bytenr == 0)
3232 				continue;
3233 
3234 			num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3235 			key.offset -= btrfs_file_extent_offset(buf, fi);
3236 			ret = process_func(trans, root, bytenr, num_bytes,
3237 					   parent, ref_root, key.objectid,
3238 					   key.offset);
3239 			if (ret)
3240 				goto fail;
3241 		} else {
3242 			bytenr = btrfs_node_blockptr(buf, i);
3243 			num_bytes = root->nodesize;
3244 			ret = process_func(trans, root, bytenr, num_bytes,
3245 					   parent, ref_root, level - 1, 0);
3246 			if (ret)
3247 				goto fail;
3248 		}
3249 	}
3250 	return 0;
3251 fail:
3252 	return ret;
3253 }
3254 
btrfs_inc_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)3255 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3256 		  struct extent_buffer *buf, int full_backref)
3257 {
3258 	return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3259 }
3260 
btrfs_dec_ref(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,int full_backref)3261 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3262 		  struct extent_buffer *buf, int full_backref)
3263 {
3264 	return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3265 }
3266 
write_one_cache_group(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct btrfs_block_group_cache * cache)3267 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3268 				 struct btrfs_root *root,
3269 				 struct btrfs_path *path,
3270 				 struct btrfs_block_group_cache *cache)
3271 {
3272 	int ret;
3273 	struct btrfs_root *extent_root = root->fs_info->extent_root;
3274 	unsigned long bi;
3275 	struct extent_buffer *leaf;
3276 
3277 	ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3278 	if (ret) {
3279 		if (ret > 0)
3280 			ret = -ENOENT;
3281 		goto fail;
3282 	}
3283 
3284 	leaf = path->nodes[0];
3285 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3286 	write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3287 	btrfs_mark_buffer_dirty(leaf);
3288 fail:
3289 	btrfs_release_path(path);
3290 	return ret;
3291 
3292 }
3293 
3294 static struct btrfs_block_group_cache *
next_block_group(struct btrfs_root * root,struct btrfs_block_group_cache * cache)3295 next_block_group(struct btrfs_root *root,
3296 		 struct btrfs_block_group_cache *cache)
3297 {
3298 	struct rb_node *node;
3299 
3300 	spin_lock(&root->fs_info->block_group_cache_lock);
3301 
3302 	/* If our block group was removed, we need a full search. */
3303 	if (RB_EMPTY_NODE(&cache->cache_node)) {
3304 		const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3305 
3306 		spin_unlock(&root->fs_info->block_group_cache_lock);
3307 		btrfs_put_block_group(cache);
3308 		cache = btrfs_lookup_first_block_group(root->fs_info,
3309 						       next_bytenr);
3310 		return cache;
3311 	}
3312 	node = rb_next(&cache->cache_node);
3313 	btrfs_put_block_group(cache);
3314 	if (node) {
3315 		cache = rb_entry(node, struct btrfs_block_group_cache,
3316 				 cache_node);
3317 		btrfs_get_block_group(cache);
3318 	} else
3319 		cache = NULL;
3320 	spin_unlock(&root->fs_info->block_group_cache_lock);
3321 	return cache;
3322 }
3323 
cache_save_setup(struct btrfs_block_group_cache * block_group,struct btrfs_trans_handle * trans,struct btrfs_path * path)3324 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3325 			    struct btrfs_trans_handle *trans,
3326 			    struct btrfs_path *path)
3327 {
3328 	struct btrfs_root *root = block_group->fs_info->tree_root;
3329 	struct inode *inode = NULL;
3330 	u64 alloc_hint = 0;
3331 	int dcs = BTRFS_DC_ERROR;
3332 	u64 num_pages = 0;
3333 	int retries = 0;
3334 	int ret = 0;
3335 
3336 	/*
3337 	 * If this block group is smaller than 100 megs don't bother caching the
3338 	 * block group.
3339 	 */
3340 	if (block_group->key.offset < (100 * 1024 * 1024)) {
3341 		spin_lock(&block_group->lock);
3342 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3343 		spin_unlock(&block_group->lock);
3344 		return 0;
3345 	}
3346 
3347 	if (trans->aborted)
3348 		return 0;
3349 again:
3350 	inode = lookup_free_space_inode(root, block_group, path);
3351 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3352 		ret = PTR_ERR(inode);
3353 		btrfs_release_path(path);
3354 		goto out;
3355 	}
3356 
3357 	if (IS_ERR(inode)) {
3358 		BUG_ON(retries);
3359 		retries++;
3360 
3361 		if (block_group->ro)
3362 			goto out_free;
3363 
3364 		ret = create_free_space_inode(root, trans, block_group, path);
3365 		if (ret)
3366 			goto out_free;
3367 		goto again;
3368 	}
3369 
3370 	/*
3371 	 * We want to set the generation to 0, that way if anything goes wrong
3372 	 * from here on out we know not to trust this cache when we load up next
3373 	 * time.
3374 	 */
3375 	BTRFS_I(inode)->generation = 0;
3376 	ret = btrfs_update_inode(trans, root, inode);
3377 	if (ret) {
3378 		/*
3379 		 * So theoretically we could recover from this, simply set the
3380 		 * super cache generation to 0 so we know to invalidate the
3381 		 * cache, but then we'd have to keep track of the block groups
3382 		 * that fail this way so we know we _have_ to reset this cache
3383 		 * before the next commit or risk reading stale cache.  So to
3384 		 * limit our exposure to horrible edge cases lets just abort the
3385 		 * transaction, this only happens in really bad situations
3386 		 * anyway.
3387 		 */
3388 		btrfs_abort_transaction(trans, root, ret);
3389 		goto out_put;
3390 	}
3391 	WARN_ON(ret);
3392 
3393 	/* We've already setup this transaction, go ahead and exit */
3394 	if (block_group->cache_generation == trans->transid &&
3395 	    i_size_read(inode)) {
3396 		dcs = BTRFS_DC_SETUP;
3397 		goto out_put;
3398 	}
3399 
3400 	if (i_size_read(inode) > 0) {
3401 		ret = btrfs_check_trunc_cache_free_space(root,
3402 					&root->fs_info->global_block_rsv);
3403 		if (ret)
3404 			goto out_put;
3405 
3406 		ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3407 		if (ret)
3408 			goto out_put;
3409 	}
3410 
3411 	spin_lock(&block_group->lock);
3412 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
3413 	    !btrfs_test_opt(root, SPACE_CACHE)) {
3414 		/*
3415 		 * don't bother trying to write stuff out _if_
3416 		 * a) we're not cached,
3417 		 * b) we're with nospace_cache mount option.
3418 		 */
3419 		dcs = BTRFS_DC_WRITTEN;
3420 		spin_unlock(&block_group->lock);
3421 		goto out_put;
3422 	}
3423 	spin_unlock(&block_group->lock);
3424 
3425 	/*
3426 	 * We hit an ENOSPC when setting up the cache in this transaction, just
3427 	 * skip doing the setup, we've already cleared the cache so we're safe.
3428 	 */
3429 	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3430 		ret = -ENOSPC;
3431 		goto out_put;
3432 	}
3433 
3434 	/*
3435 	 * Try to preallocate enough space based on how big the block group is.
3436 	 * Keep in mind this has to include any pinned space which could end up
3437 	 * taking up quite a bit since it's not folded into the other space
3438 	 * cache.
3439 	 */
3440 	num_pages = div_u64(block_group->key.offset, 256 * 1024 * 1024);
3441 	if (!num_pages)
3442 		num_pages = 1;
3443 
3444 	num_pages *= 16;
3445 	num_pages *= PAGE_CACHE_SIZE;
3446 
3447 	ret = btrfs_check_data_free_space(inode, 0, num_pages);
3448 	if (ret)
3449 		goto out_put;
3450 
3451 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3452 					      num_pages, num_pages,
3453 					      &alloc_hint);
3454 	/*
3455 	 * Our cache requires contiguous chunks so that we don't modify a bunch
3456 	 * of metadata or split extents when writing the cache out, which means
3457 	 * we can enospc if we are heavily fragmented in addition to just normal
3458 	 * out of space conditions.  So if we hit this just skip setting up any
3459 	 * other block groups for this transaction, maybe we'll unpin enough
3460 	 * space the next time around.
3461 	 */
3462 	if (!ret)
3463 		dcs = BTRFS_DC_SETUP;
3464 	else if (ret == -ENOSPC)
3465 		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3466 	btrfs_free_reserved_data_space(inode, 0, num_pages);
3467 
3468 out_put:
3469 	iput(inode);
3470 out_free:
3471 	btrfs_release_path(path);
3472 out:
3473 	spin_lock(&block_group->lock);
3474 	if (!ret && dcs == BTRFS_DC_SETUP)
3475 		block_group->cache_generation = trans->transid;
3476 	block_group->disk_cache_state = dcs;
3477 	spin_unlock(&block_group->lock);
3478 
3479 	return ret;
3480 }
3481 
btrfs_setup_space_cache(struct btrfs_trans_handle * trans,struct btrfs_root * root)3482 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3483 			    struct btrfs_root *root)
3484 {
3485 	struct btrfs_block_group_cache *cache, *tmp;
3486 	struct btrfs_transaction *cur_trans = trans->transaction;
3487 	struct btrfs_path *path;
3488 
3489 	if (list_empty(&cur_trans->dirty_bgs) ||
3490 	    !btrfs_test_opt(root, SPACE_CACHE))
3491 		return 0;
3492 
3493 	path = btrfs_alloc_path();
3494 	if (!path)
3495 		return -ENOMEM;
3496 
3497 	/* Could add new block groups, use _safe just in case */
3498 	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3499 				 dirty_list) {
3500 		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3501 			cache_save_setup(cache, trans, path);
3502 	}
3503 
3504 	btrfs_free_path(path);
3505 	return 0;
3506 }
3507 
3508 /*
3509  * transaction commit does final block group cache writeback during a
3510  * critical section where nothing is allowed to change the FS.  This is
3511  * required in order for the cache to actually match the block group,
3512  * but can introduce a lot of latency into the commit.
3513  *
3514  * So, btrfs_start_dirty_block_groups is here to kick off block group
3515  * cache IO.  There's a chance we'll have to redo some of it if the
3516  * block group changes again during the commit, but it greatly reduces
3517  * the commit latency by getting rid of the easy block groups while
3518  * we're still allowing others to join the commit.
3519  */
btrfs_start_dirty_block_groups(struct btrfs_trans_handle * trans,struct btrfs_root * root)3520 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3521 				   struct btrfs_root *root)
3522 {
3523 	struct btrfs_block_group_cache *cache;
3524 	struct btrfs_transaction *cur_trans = trans->transaction;
3525 	int ret = 0;
3526 	int should_put;
3527 	struct btrfs_path *path = NULL;
3528 	LIST_HEAD(dirty);
3529 	struct list_head *io = &cur_trans->io_bgs;
3530 	int num_started = 0;
3531 	int loops = 0;
3532 
3533 	spin_lock(&cur_trans->dirty_bgs_lock);
3534 	if (list_empty(&cur_trans->dirty_bgs)) {
3535 		spin_unlock(&cur_trans->dirty_bgs_lock);
3536 		return 0;
3537 	}
3538 	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3539 	spin_unlock(&cur_trans->dirty_bgs_lock);
3540 
3541 again:
3542 	/*
3543 	 * make sure all the block groups on our dirty list actually
3544 	 * exist
3545 	 */
3546 	btrfs_create_pending_block_groups(trans, root);
3547 
3548 	if (!path) {
3549 		path = btrfs_alloc_path();
3550 		if (!path)
3551 			return -ENOMEM;
3552 	}
3553 
3554 	/*
3555 	 * cache_write_mutex is here only to save us from balance or automatic
3556 	 * removal of empty block groups deleting this block group while we are
3557 	 * writing out the cache
3558 	 */
3559 	mutex_lock(&trans->transaction->cache_write_mutex);
3560 	while (!list_empty(&dirty)) {
3561 		cache = list_first_entry(&dirty,
3562 					 struct btrfs_block_group_cache,
3563 					 dirty_list);
3564 		/*
3565 		 * this can happen if something re-dirties a block
3566 		 * group that is already under IO.  Just wait for it to
3567 		 * finish and then do it all again
3568 		 */
3569 		if (!list_empty(&cache->io_list)) {
3570 			list_del_init(&cache->io_list);
3571 			btrfs_wait_cache_io(root, trans, cache,
3572 					    &cache->io_ctl, path,
3573 					    cache->key.objectid);
3574 			btrfs_put_block_group(cache);
3575 		}
3576 
3577 
3578 		/*
3579 		 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3580 		 * if it should update the cache_state.  Don't delete
3581 		 * until after we wait.
3582 		 *
3583 		 * Since we're not running in the commit critical section
3584 		 * we need the dirty_bgs_lock to protect from update_block_group
3585 		 */
3586 		spin_lock(&cur_trans->dirty_bgs_lock);
3587 		list_del_init(&cache->dirty_list);
3588 		spin_unlock(&cur_trans->dirty_bgs_lock);
3589 
3590 		should_put = 1;
3591 
3592 		cache_save_setup(cache, trans, path);
3593 
3594 		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3595 			cache->io_ctl.inode = NULL;
3596 			ret = btrfs_write_out_cache(root, trans, cache, path);
3597 			if (ret == 0 && cache->io_ctl.inode) {
3598 				num_started++;
3599 				should_put = 0;
3600 
3601 				/*
3602 				 * the cache_write_mutex is protecting
3603 				 * the io_list
3604 				 */
3605 				list_add_tail(&cache->io_list, io);
3606 			} else {
3607 				/*
3608 				 * if we failed to write the cache, the
3609 				 * generation will be bad and life goes on
3610 				 */
3611 				ret = 0;
3612 			}
3613 		}
3614 		if (!ret) {
3615 			ret = write_one_cache_group(trans, root, path, cache);
3616 			/*
3617 			 * Our block group might still be attached to the list
3618 			 * of new block groups in the transaction handle of some
3619 			 * other task (struct btrfs_trans_handle->new_bgs). This
3620 			 * means its block group item isn't yet in the extent
3621 			 * tree. If this happens ignore the error, as we will
3622 			 * try again later in the critical section of the
3623 			 * transaction commit.
3624 			 */
3625 			if (ret == -ENOENT) {
3626 				ret = 0;
3627 				spin_lock(&cur_trans->dirty_bgs_lock);
3628 				if (list_empty(&cache->dirty_list)) {
3629 					list_add_tail(&cache->dirty_list,
3630 						      &cur_trans->dirty_bgs);
3631 					btrfs_get_block_group(cache);
3632 				}
3633 				spin_unlock(&cur_trans->dirty_bgs_lock);
3634 			} else if (ret) {
3635 				btrfs_abort_transaction(trans, root, ret);
3636 			}
3637 		}
3638 
3639 		/* if its not on the io list, we need to put the block group */
3640 		if (should_put)
3641 			btrfs_put_block_group(cache);
3642 
3643 		if (ret)
3644 			break;
3645 
3646 		/*
3647 		 * Avoid blocking other tasks for too long. It might even save
3648 		 * us from writing caches for block groups that are going to be
3649 		 * removed.
3650 		 */
3651 		mutex_unlock(&trans->transaction->cache_write_mutex);
3652 		mutex_lock(&trans->transaction->cache_write_mutex);
3653 	}
3654 	mutex_unlock(&trans->transaction->cache_write_mutex);
3655 
3656 	/*
3657 	 * go through delayed refs for all the stuff we've just kicked off
3658 	 * and then loop back (just once)
3659 	 */
3660 	ret = btrfs_run_delayed_refs(trans, root, 0);
3661 	if (!ret && loops == 0) {
3662 		loops++;
3663 		spin_lock(&cur_trans->dirty_bgs_lock);
3664 		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3665 		/*
3666 		 * dirty_bgs_lock protects us from concurrent block group
3667 		 * deletes too (not just cache_write_mutex).
3668 		 */
3669 		if (!list_empty(&dirty)) {
3670 			spin_unlock(&cur_trans->dirty_bgs_lock);
3671 			goto again;
3672 		}
3673 		spin_unlock(&cur_trans->dirty_bgs_lock);
3674 	}
3675 
3676 	btrfs_free_path(path);
3677 	return ret;
3678 }
3679 
btrfs_write_dirty_block_groups(struct btrfs_trans_handle * trans,struct btrfs_root * root)3680 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3681 				   struct btrfs_root *root)
3682 {
3683 	struct btrfs_block_group_cache *cache;
3684 	struct btrfs_transaction *cur_trans = trans->transaction;
3685 	int ret = 0;
3686 	int should_put;
3687 	struct btrfs_path *path;
3688 	struct list_head *io = &cur_trans->io_bgs;
3689 	int num_started = 0;
3690 
3691 	path = btrfs_alloc_path();
3692 	if (!path)
3693 		return -ENOMEM;
3694 
3695 	/*
3696 	 * We don't need the lock here since we are protected by the transaction
3697 	 * commit.  We want to do the cache_save_setup first and then run the
3698 	 * delayed refs to make sure we have the best chance at doing this all
3699 	 * in one shot.
3700 	 */
3701 	while (!list_empty(&cur_trans->dirty_bgs)) {
3702 		cache = list_first_entry(&cur_trans->dirty_bgs,
3703 					 struct btrfs_block_group_cache,
3704 					 dirty_list);
3705 
3706 		/*
3707 		 * this can happen if cache_save_setup re-dirties a block
3708 		 * group that is already under IO.  Just wait for it to
3709 		 * finish and then do it all again
3710 		 */
3711 		if (!list_empty(&cache->io_list)) {
3712 			list_del_init(&cache->io_list);
3713 			btrfs_wait_cache_io(root, trans, cache,
3714 					    &cache->io_ctl, path,
3715 					    cache->key.objectid);
3716 			btrfs_put_block_group(cache);
3717 		}
3718 
3719 		/*
3720 		 * don't remove from the dirty list until after we've waited
3721 		 * on any pending IO
3722 		 */
3723 		list_del_init(&cache->dirty_list);
3724 		should_put = 1;
3725 
3726 		cache_save_setup(cache, trans, path);
3727 
3728 		if (!ret)
3729 			ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3730 
3731 		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3732 			cache->io_ctl.inode = NULL;
3733 			ret = btrfs_write_out_cache(root, trans, cache, path);
3734 			if (ret == 0 && cache->io_ctl.inode) {
3735 				num_started++;
3736 				should_put = 0;
3737 				list_add_tail(&cache->io_list, io);
3738 			} else {
3739 				/*
3740 				 * if we failed to write the cache, the
3741 				 * generation will be bad and life goes on
3742 				 */
3743 				ret = 0;
3744 			}
3745 		}
3746 		if (!ret) {
3747 			ret = write_one_cache_group(trans, root, path, cache);
3748 			if (ret)
3749 				btrfs_abort_transaction(trans, root, ret);
3750 		}
3751 
3752 		/* if its not on the io list, we need to put the block group */
3753 		if (should_put)
3754 			btrfs_put_block_group(cache);
3755 	}
3756 
3757 	while (!list_empty(io)) {
3758 		cache = list_first_entry(io, struct btrfs_block_group_cache,
3759 					 io_list);
3760 		list_del_init(&cache->io_list);
3761 		btrfs_wait_cache_io(root, trans, cache,
3762 				    &cache->io_ctl, path, cache->key.objectid);
3763 		btrfs_put_block_group(cache);
3764 	}
3765 
3766 	btrfs_free_path(path);
3767 	return ret;
3768 }
3769 
btrfs_extent_readonly(struct btrfs_root * root,u64 bytenr)3770 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3771 {
3772 	struct btrfs_block_group_cache *block_group;
3773 	int readonly = 0;
3774 
3775 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3776 	if (!block_group || block_group->ro)
3777 		readonly = 1;
3778 	if (block_group)
3779 		btrfs_put_block_group(block_group);
3780 	return readonly;
3781 }
3782 
alloc_name(u64 flags)3783 static const char *alloc_name(u64 flags)
3784 {
3785 	switch (flags) {
3786 	case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3787 		return "mixed";
3788 	case BTRFS_BLOCK_GROUP_METADATA:
3789 		return "metadata";
3790 	case BTRFS_BLOCK_GROUP_DATA:
3791 		return "data";
3792 	case BTRFS_BLOCK_GROUP_SYSTEM:
3793 		return "system";
3794 	default:
3795 		WARN_ON(1);
3796 		return "invalid-combination";
3797 	};
3798 }
3799 
update_space_info(struct btrfs_fs_info * info,u64 flags,u64 total_bytes,u64 bytes_used,struct btrfs_space_info ** space_info)3800 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3801 			     u64 total_bytes, u64 bytes_used,
3802 			     struct btrfs_space_info **space_info)
3803 {
3804 	struct btrfs_space_info *found;
3805 	int i;
3806 	int factor;
3807 	int ret;
3808 
3809 	if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3810 		     BTRFS_BLOCK_GROUP_RAID10))
3811 		factor = 2;
3812 	else
3813 		factor = 1;
3814 
3815 	found = __find_space_info(info, flags);
3816 	if (found) {
3817 		spin_lock(&found->lock);
3818 		found->total_bytes += total_bytes;
3819 		found->disk_total += total_bytes * factor;
3820 		found->bytes_used += bytes_used;
3821 		found->disk_used += bytes_used * factor;
3822 		if (total_bytes > 0)
3823 			found->full = 0;
3824 		spin_unlock(&found->lock);
3825 		*space_info = found;
3826 		return 0;
3827 	}
3828 	found = kzalloc(sizeof(*found), GFP_NOFS);
3829 	if (!found)
3830 		return -ENOMEM;
3831 
3832 	ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3833 	if (ret) {
3834 		kfree(found);
3835 		return ret;
3836 	}
3837 
3838 	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3839 		INIT_LIST_HEAD(&found->block_groups[i]);
3840 	init_rwsem(&found->groups_sem);
3841 	spin_lock_init(&found->lock);
3842 	found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3843 	found->total_bytes = total_bytes;
3844 	found->disk_total = total_bytes * factor;
3845 	found->bytes_used = bytes_used;
3846 	found->disk_used = bytes_used * factor;
3847 	found->bytes_pinned = 0;
3848 	found->bytes_reserved = 0;
3849 	found->bytes_readonly = 0;
3850 	found->bytes_may_use = 0;
3851 	found->full = 0;
3852 	found->max_extent_size = 0;
3853 	found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3854 	found->chunk_alloc = 0;
3855 	found->flush = 0;
3856 	init_waitqueue_head(&found->wait);
3857 	INIT_LIST_HEAD(&found->ro_bgs);
3858 
3859 	ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3860 				    info->space_info_kobj, "%s",
3861 				    alloc_name(found->flags));
3862 	if (ret) {
3863 		percpu_counter_destroy(&found->total_bytes_pinned);
3864 		kfree(found);
3865 		return ret;
3866 	}
3867 
3868 	*space_info = found;
3869 	list_add_rcu(&found->list, &info->space_info);
3870 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3871 		info->data_sinfo = found;
3872 
3873 	return ret;
3874 }
3875 
set_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)3876 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3877 {
3878 	u64 extra_flags = chunk_to_extended(flags) &
3879 				BTRFS_EXTENDED_PROFILE_MASK;
3880 
3881 	write_seqlock(&fs_info->profiles_lock);
3882 	if (flags & BTRFS_BLOCK_GROUP_DATA)
3883 		fs_info->avail_data_alloc_bits |= extra_flags;
3884 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
3885 		fs_info->avail_metadata_alloc_bits |= extra_flags;
3886 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3887 		fs_info->avail_system_alloc_bits |= extra_flags;
3888 	write_sequnlock(&fs_info->profiles_lock);
3889 }
3890 
3891 /*
3892  * returns target flags in extended format or 0 if restripe for this
3893  * chunk_type is not in progress
3894  *
3895  * should be called with either volume_mutex or balance_lock held
3896  */
get_restripe_target(struct btrfs_fs_info * fs_info,u64 flags)3897 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3898 {
3899 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3900 	u64 target = 0;
3901 
3902 	if (!bctl)
3903 		return 0;
3904 
3905 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
3906 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3907 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3908 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3909 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3910 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3911 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3912 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3913 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3914 	}
3915 
3916 	return target;
3917 }
3918 
3919 /*
3920  * @flags: available profiles in extended format (see ctree.h)
3921  *
3922  * Returns reduced profile in chunk format.  If profile changing is in
3923  * progress (either running or paused) picks the target profile (if it's
3924  * already available), otherwise falls back to plain reducing.
3925  */
btrfs_reduce_alloc_profile(struct btrfs_root * root,u64 flags)3926 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3927 {
3928 	u64 num_devices = root->fs_info->fs_devices->rw_devices;
3929 	u64 target;
3930 	u64 raid_type;
3931 	u64 allowed = 0;
3932 
3933 	/*
3934 	 * see if restripe for this chunk_type is in progress, if so
3935 	 * try to reduce to the target profile
3936 	 */
3937 	spin_lock(&root->fs_info->balance_lock);
3938 	target = get_restripe_target(root->fs_info, flags);
3939 	if (target) {
3940 		/* pick target profile only if it's already available */
3941 		if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3942 			spin_unlock(&root->fs_info->balance_lock);
3943 			return extended_to_chunk(target);
3944 		}
3945 	}
3946 	spin_unlock(&root->fs_info->balance_lock);
3947 
3948 	/* First, mask out the RAID levels which aren't possible */
3949 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3950 		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3951 			allowed |= btrfs_raid_group[raid_type];
3952 	}
3953 	allowed &= flags;
3954 
3955 	if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3956 		allowed = BTRFS_BLOCK_GROUP_RAID6;
3957 	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
3958 		allowed = BTRFS_BLOCK_GROUP_RAID5;
3959 	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
3960 		allowed = BTRFS_BLOCK_GROUP_RAID10;
3961 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
3962 		allowed = BTRFS_BLOCK_GROUP_RAID1;
3963 	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
3964 		allowed = BTRFS_BLOCK_GROUP_RAID0;
3965 
3966 	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
3967 
3968 	return extended_to_chunk(flags | allowed);
3969 }
3970 
get_alloc_profile(struct btrfs_root * root,u64 orig_flags)3971 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
3972 {
3973 	unsigned seq;
3974 	u64 flags;
3975 
3976 	do {
3977 		flags = orig_flags;
3978 		seq = read_seqbegin(&root->fs_info->profiles_lock);
3979 
3980 		if (flags & BTRFS_BLOCK_GROUP_DATA)
3981 			flags |= root->fs_info->avail_data_alloc_bits;
3982 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3983 			flags |= root->fs_info->avail_system_alloc_bits;
3984 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3985 			flags |= root->fs_info->avail_metadata_alloc_bits;
3986 	} while (read_seqretry(&root->fs_info->profiles_lock, seq));
3987 
3988 	return btrfs_reduce_alloc_profile(root, flags);
3989 }
3990 
btrfs_get_alloc_profile(struct btrfs_root * root,int data)3991 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3992 {
3993 	u64 flags;
3994 	u64 ret;
3995 
3996 	if (data)
3997 		flags = BTRFS_BLOCK_GROUP_DATA;
3998 	else if (root == root->fs_info->chunk_root)
3999 		flags = BTRFS_BLOCK_GROUP_SYSTEM;
4000 	else
4001 		flags = BTRFS_BLOCK_GROUP_METADATA;
4002 
4003 	ret = get_alloc_profile(root, flags);
4004 	return ret;
4005 }
4006 
btrfs_alloc_data_chunk_ondemand(struct inode * inode,u64 bytes)4007 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4008 {
4009 	struct btrfs_space_info *data_sinfo;
4010 	struct btrfs_root *root = BTRFS_I(inode)->root;
4011 	struct btrfs_fs_info *fs_info = root->fs_info;
4012 	u64 used;
4013 	int ret = 0;
4014 	int need_commit = 2;
4015 	int have_pinned_space;
4016 
4017 	/* make sure bytes are sectorsize aligned */
4018 	bytes = ALIGN(bytes, root->sectorsize);
4019 
4020 	if (btrfs_is_free_space_inode(inode)) {
4021 		need_commit = 0;
4022 		ASSERT(current->journal_info);
4023 	}
4024 
4025 	data_sinfo = fs_info->data_sinfo;
4026 	if (!data_sinfo)
4027 		goto alloc;
4028 
4029 again:
4030 	/* make sure we have enough space to handle the data first */
4031 	spin_lock(&data_sinfo->lock);
4032 	used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4033 		data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4034 		data_sinfo->bytes_may_use;
4035 
4036 	if (used + bytes > data_sinfo->total_bytes) {
4037 		struct btrfs_trans_handle *trans;
4038 
4039 		/*
4040 		 * if we don't have enough free bytes in this space then we need
4041 		 * to alloc a new chunk.
4042 		 */
4043 		if (!data_sinfo->full) {
4044 			u64 alloc_target;
4045 
4046 			data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4047 			spin_unlock(&data_sinfo->lock);
4048 alloc:
4049 			alloc_target = btrfs_get_alloc_profile(root, 1);
4050 			/*
4051 			 * It is ugly that we don't call nolock join
4052 			 * transaction for the free space inode case here.
4053 			 * But it is safe because we only do the data space
4054 			 * reservation for the free space cache in the
4055 			 * transaction context, the common join transaction
4056 			 * just increase the counter of the current transaction
4057 			 * handler, doesn't try to acquire the trans_lock of
4058 			 * the fs.
4059 			 */
4060 			trans = btrfs_join_transaction(root);
4061 			if (IS_ERR(trans))
4062 				return PTR_ERR(trans);
4063 
4064 			ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4065 					     alloc_target,
4066 					     CHUNK_ALLOC_NO_FORCE);
4067 			btrfs_end_transaction(trans, root);
4068 			if (ret < 0) {
4069 				if (ret != -ENOSPC)
4070 					return ret;
4071 				else {
4072 					have_pinned_space = 1;
4073 					goto commit_trans;
4074 				}
4075 			}
4076 
4077 			if (!data_sinfo)
4078 				data_sinfo = fs_info->data_sinfo;
4079 
4080 			goto again;
4081 		}
4082 
4083 		/*
4084 		 * If we don't have enough pinned space to deal with this
4085 		 * allocation, and no removed chunk in current transaction,
4086 		 * don't bother committing the transaction.
4087 		 */
4088 		have_pinned_space = percpu_counter_compare(
4089 			&data_sinfo->total_bytes_pinned,
4090 			used + bytes - data_sinfo->total_bytes);
4091 		spin_unlock(&data_sinfo->lock);
4092 
4093 		/* commit the current transaction and try again */
4094 commit_trans:
4095 		if (need_commit &&
4096 		    !atomic_read(&root->fs_info->open_ioctl_trans)) {
4097 			need_commit--;
4098 
4099 			if (need_commit > 0) {
4100 				btrfs_start_delalloc_roots(fs_info, 0, -1);
4101 				btrfs_wait_ordered_roots(fs_info, -1);
4102 			}
4103 
4104 			trans = btrfs_join_transaction(root);
4105 			if (IS_ERR(trans))
4106 				return PTR_ERR(trans);
4107 			if (have_pinned_space >= 0 ||
4108 			    test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4109 				     &trans->transaction->flags) ||
4110 			    need_commit > 0) {
4111 				ret = btrfs_commit_transaction(trans, root);
4112 				if (ret)
4113 					return ret;
4114 				/*
4115 				 * The cleaner kthread might still be doing iput
4116 				 * operations. Wait for it to finish so that
4117 				 * more space is released.
4118 				 */
4119 				mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4120 				mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4121 				goto again;
4122 			} else {
4123 				btrfs_end_transaction(trans, root);
4124 			}
4125 		}
4126 
4127 		trace_btrfs_space_reservation(root->fs_info,
4128 					      "space_info:enospc",
4129 					      data_sinfo->flags, bytes, 1);
4130 		return -ENOSPC;
4131 	}
4132 	data_sinfo->bytes_may_use += bytes;
4133 	trace_btrfs_space_reservation(root->fs_info, "space_info",
4134 				      data_sinfo->flags, bytes, 1);
4135 	spin_unlock(&data_sinfo->lock);
4136 
4137 	return 0;
4138 }
4139 
4140 /*
4141  * New check_data_free_space() with ability for precious data reservation
4142  * Will replace old btrfs_check_data_free_space(), but for patch split,
4143  * add a new function first and then replace it.
4144  */
btrfs_check_data_free_space(struct inode * inode,u64 start,u64 len)4145 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4146 {
4147 	struct btrfs_root *root = BTRFS_I(inode)->root;
4148 	int ret;
4149 
4150 	/* align the range */
4151 	len = round_up(start + len, root->sectorsize) -
4152 	      round_down(start, root->sectorsize);
4153 	start = round_down(start, root->sectorsize);
4154 
4155 	ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4156 	if (ret < 0)
4157 		return ret;
4158 
4159 	/*
4160 	 * Use new btrfs_qgroup_reserve_data to reserve precious data space
4161 	 *
4162 	 * TODO: Find a good method to avoid reserve data space for NOCOW
4163 	 * range, but don't impact performance on quota disable case.
4164 	 */
4165 	ret = btrfs_qgroup_reserve_data(inode, start, len);
4166 	return ret;
4167 }
4168 
4169 /*
4170  * Called if we need to clear a data reservation for this inode
4171  * Normally in a error case.
4172  *
4173  * This one will *NOT* use accurate qgroup reserved space API, just for case
4174  * which we can't sleep and is sure it won't affect qgroup reserved space.
4175  * Like clear_bit_hook().
4176  */
btrfs_free_reserved_data_space_noquota(struct inode * inode,u64 start,u64 len)4177 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4178 					    u64 len)
4179 {
4180 	struct btrfs_root *root = BTRFS_I(inode)->root;
4181 	struct btrfs_space_info *data_sinfo;
4182 
4183 	/* Make sure the range is aligned to sectorsize */
4184 	len = round_up(start + len, root->sectorsize) -
4185 	      round_down(start, root->sectorsize);
4186 	start = round_down(start, root->sectorsize);
4187 
4188 	data_sinfo = root->fs_info->data_sinfo;
4189 	spin_lock(&data_sinfo->lock);
4190 	if (WARN_ON(data_sinfo->bytes_may_use < len))
4191 		data_sinfo->bytes_may_use = 0;
4192 	else
4193 		data_sinfo->bytes_may_use -= len;
4194 	trace_btrfs_space_reservation(root->fs_info, "space_info",
4195 				      data_sinfo->flags, len, 0);
4196 	spin_unlock(&data_sinfo->lock);
4197 }
4198 
4199 /*
4200  * Called if we need to clear a data reservation for this inode
4201  * Normally in a error case.
4202  *
4203  * This one will handle the per-indoe data rsv map for accurate reserved
4204  * space framework.
4205  */
btrfs_free_reserved_data_space(struct inode * inode,u64 start,u64 len)4206 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4207 {
4208 	btrfs_free_reserved_data_space_noquota(inode, start, len);
4209 	btrfs_qgroup_free_data(inode, start, len);
4210 }
4211 
force_metadata_allocation(struct btrfs_fs_info * info)4212 static void force_metadata_allocation(struct btrfs_fs_info *info)
4213 {
4214 	struct list_head *head = &info->space_info;
4215 	struct btrfs_space_info *found;
4216 
4217 	rcu_read_lock();
4218 	list_for_each_entry_rcu(found, head, list) {
4219 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4220 			found->force_alloc = CHUNK_ALLOC_FORCE;
4221 	}
4222 	rcu_read_unlock();
4223 }
4224 
calc_global_rsv_need_space(struct btrfs_block_rsv * global)4225 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4226 {
4227 	return (global->size << 1);
4228 }
4229 
should_alloc_chunk(struct btrfs_root * root,struct btrfs_space_info * sinfo,int force)4230 static int should_alloc_chunk(struct btrfs_root *root,
4231 			      struct btrfs_space_info *sinfo, int force)
4232 {
4233 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4234 	u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4235 	u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4236 	u64 thresh;
4237 
4238 	if (force == CHUNK_ALLOC_FORCE)
4239 		return 1;
4240 
4241 	/*
4242 	 * We need to take into account the global rsv because for all intents
4243 	 * and purposes it's used space.  Don't worry about locking the
4244 	 * global_rsv, it doesn't change except when the transaction commits.
4245 	 */
4246 	if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4247 		num_allocated += calc_global_rsv_need_space(global_rsv);
4248 
4249 	/*
4250 	 * in limited mode, we want to have some free space up to
4251 	 * about 1% of the FS size.
4252 	 */
4253 	if (force == CHUNK_ALLOC_LIMITED) {
4254 		thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4255 		thresh = max_t(u64, 64 * 1024 * 1024,
4256 			       div_factor_fine(thresh, 1));
4257 
4258 		if (num_bytes - num_allocated < thresh)
4259 			return 1;
4260 	}
4261 
4262 	if (num_allocated + 2 * 1024 * 1024 < div_factor(num_bytes, 8))
4263 		return 0;
4264 	return 1;
4265 }
4266 
get_profile_num_devs(struct btrfs_root * root,u64 type)4267 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4268 {
4269 	u64 num_dev;
4270 
4271 	if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4272 		    BTRFS_BLOCK_GROUP_RAID0 |
4273 		    BTRFS_BLOCK_GROUP_RAID5 |
4274 		    BTRFS_BLOCK_GROUP_RAID6))
4275 		num_dev = root->fs_info->fs_devices->rw_devices;
4276 	else if (type & BTRFS_BLOCK_GROUP_RAID1)
4277 		num_dev = 2;
4278 	else
4279 		num_dev = 1;	/* DUP or single */
4280 
4281 	return num_dev;
4282 }
4283 
4284 /*
4285  * If @is_allocation is true, reserve space in the system space info necessary
4286  * for allocating a chunk, otherwise if it's false, reserve space necessary for
4287  * removing a chunk.
4288  */
check_system_chunk(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 type)4289 void check_system_chunk(struct btrfs_trans_handle *trans,
4290 			struct btrfs_root *root,
4291 			u64 type)
4292 {
4293 	struct btrfs_space_info *info;
4294 	u64 left;
4295 	u64 thresh;
4296 	int ret = 0;
4297 	u64 num_devs;
4298 
4299 	/*
4300 	 * Needed because we can end up allocating a system chunk and for an
4301 	 * atomic and race free space reservation in the chunk block reserve.
4302 	 */
4303 	ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4304 
4305 	info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4306 	spin_lock(&info->lock);
4307 	left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4308 		info->bytes_reserved - info->bytes_readonly -
4309 		info->bytes_may_use;
4310 	spin_unlock(&info->lock);
4311 
4312 	num_devs = get_profile_num_devs(root, type);
4313 
4314 	/* num_devs device items to update and 1 chunk item to add or remove */
4315 	thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4316 		btrfs_calc_trans_metadata_size(root, 1);
4317 
4318 	if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4319 		btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4320 			left, thresh, type);
4321 		dump_space_info(info, 0, 0);
4322 	}
4323 
4324 	if (left < thresh) {
4325 		u64 flags;
4326 
4327 		flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4328 		/*
4329 		 * Ignore failure to create system chunk. We might end up not
4330 		 * needing it, as we might not need to COW all nodes/leafs from
4331 		 * the paths we visit in the chunk tree (they were already COWed
4332 		 * or created in the current transaction for example).
4333 		 */
4334 		ret = btrfs_alloc_chunk(trans, root, flags);
4335 	}
4336 
4337 	if (!ret) {
4338 		ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4339 					  &root->fs_info->chunk_block_rsv,
4340 					  thresh, BTRFS_RESERVE_NO_FLUSH);
4341 		if (!ret)
4342 			trans->chunk_bytes_reserved += thresh;
4343 	}
4344 }
4345 
do_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_root * extent_root,u64 flags,int force)4346 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4347 			  struct btrfs_root *extent_root, u64 flags, int force)
4348 {
4349 	struct btrfs_space_info *space_info;
4350 	struct btrfs_fs_info *fs_info = extent_root->fs_info;
4351 	int wait_for_alloc = 0;
4352 	int ret = 0;
4353 
4354 	/* Don't re-enter if we're already allocating a chunk */
4355 	if (trans->allocating_chunk)
4356 		return -ENOSPC;
4357 
4358 	space_info = __find_space_info(extent_root->fs_info, flags);
4359 	if (!space_info) {
4360 		ret = update_space_info(extent_root->fs_info, flags,
4361 					0, 0, &space_info);
4362 		BUG_ON(ret); /* -ENOMEM */
4363 	}
4364 	BUG_ON(!space_info); /* Logic error */
4365 
4366 again:
4367 	spin_lock(&space_info->lock);
4368 	if (force < space_info->force_alloc)
4369 		force = space_info->force_alloc;
4370 	if (space_info->full) {
4371 		if (should_alloc_chunk(extent_root, space_info, force))
4372 			ret = -ENOSPC;
4373 		else
4374 			ret = 0;
4375 		spin_unlock(&space_info->lock);
4376 		return ret;
4377 	}
4378 
4379 	if (!should_alloc_chunk(extent_root, space_info, force)) {
4380 		spin_unlock(&space_info->lock);
4381 		return 0;
4382 	} else if (space_info->chunk_alloc) {
4383 		wait_for_alloc = 1;
4384 	} else {
4385 		space_info->chunk_alloc = 1;
4386 	}
4387 
4388 	spin_unlock(&space_info->lock);
4389 
4390 	mutex_lock(&fs_info->chunk_mutex);
4391 
4392 	/*
4393 	 * The chunk_mutex is held throughout the entirety of a chunk
4394 	 * allocation, so once we've acquired the chunk_mutex we know that the
4395 	 * other guy is done and we need to recheck and see if we should
4396 	 * allocate.
4397 	 */
4398 	if (wait_for_alloc) {
4399 		mutex_unlock(&fs_info->chunk_mutex);
4400 		wait_for_alloc = 0;
4401 		cond_resched();
4402 		goto again;
4403 	}
4404 
4405 	trans->allocating_chunk = true;
4406 
4407 	/*
4408 	 * If we have mixed data/metadata chunks we want to make sure we keep
4409 	 * allocating mixed chunks instead of individual chunks.
4410 	 */
4411 	if (btrfs_mixed_space_info(space_info))
4412 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4413 
4414 	/*
4415 	 * if we're doing a data chunk, go ahead and make sure that
4416 	 * we keep a reasonable number of metadata chunks allocated in the
4417 	 * FS as well.
4418 	 */
4419 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4420 		fs_info->data_chunk_allocations++;
4421 		if (!(fs_info->data_chunk_allocations %
4422 		      fs_info->metadata_ratio))
4423 			force_metadata_allocation(fs_info);
4424 	}
4425 
4426 	/*
4427 	 * Check if we have enough space in SYSTEM chunk because we may need
4428 	 * to update devices.
4429 	 */
4430 	check_system_chunk(trans, extent_root, flags);
4431 
4432 	ret = btrfs_alloc_chunk(trans, extent_root, flags);
4433 	trans->allocating_chunk = false;
4434 
4435 	spin_lock(&space_info->lock);
4436 	if (ret < 0 && ret != -ENOSPC)
4437 		goto out;
4438 	if (ret)
4439 		space_info->full = 1;
4440 	else
4441 		ret = 1;
4442 
4443 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4444 out:
4445 	space_info->chunk_alloc = 0;
4446 	spin_unlock(&space_info->lock);
4447 	mutex_unlock(&fs_info->chunk_mutex);
4448 	/*
4449 	 * When we allocate a new chunk we reserve space in the chunk block
4450 	 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4451 	 * add new nodes/leafs to it if we end up needing to do it when
4452 	 * inserting the chunk item and updating device items as part of the
4453 	 * second phase of chunk allocation, performed by
4454 	 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4455 	 * large number of new block groups to create in our transaction
4456 	 * handle's new_bgs list to avoid exhausting the chunk block reserve
4457 	 * in extreme cases - like having a single transaction create many new
4458 	 * block groups when starting to write out the free space caches of all
4459 	 * the block groups that were made dirty during the lifetime of the
4460 	 * transaction.
4461 	 */
4462 	if (trans->can_flush_pending_bgs &&
4463 	    trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4464 		btrfs_create_pending_block_groups(trans, trans->root);
4465 		btrfs_trans_release_chunk_metadata(trans);
4466 	}
4467 	return ret;
4468 }
4469 
can_overcommit(struct btrfs_root * root,struct btrfs_space_info * space_info,u64 bytes,enum btrfs_reserve_flush_enum flush)4470 static int can_overcommit(struct btrfs_root *root,
4471 			  struct btrfs_space_info *space_info, u64 bytes,
4472 			  enum btrfs_reserve_flush_enum flush)
4473 {
4474 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4475 	u64 profile = btrfs_get_alloc_profile(root, 0);
4476 	u64 space_size;
4477 	u64 avail;
4478 	u64 used;
4479 
4480 	used = space_info->bytes_used + space_info->bytes_reserved +
4481 		space_info->bytes_pinned + space_info->bytes_readonly;
4482 
4483 	/*
4484 	 * We only want to allow over committing if we have lots of actual space
4485 	 * free, but if we don't have enough space to handle the global reserve
4486 	 * space then we could end up having a real enospc problem when trying
4487 	 * to allocate a chunk or some other such important allocation.
4488 	 */
4489 	spin_lock(&global_rsv->lock);
4490 	space_size = calc_global_rsv_need_space(global_rsv);
4491 	spin_unlock(&global_rsv->lock);
4492 	if (used + space_size >= space_info->total_bytes)
4493 		return 0;
4494 
4495 	used += space_info->bytes_may_use;
4496 
4497 	spin_lock(&root->fs_info->free_chunk_lock);
4498 	avail = root->fs_info->free_chunk_space;
4499 	spin_unlock(&root->fs_info->free_chunk_lock);
4500 
4501 	/*
4502 	 * If we have dup, raid1 or raid10 then only half of the free
4503 	 * space is actually useable.  For raid56, the space info used
4504 	 * doesn't include the parity drive, so we don't have to
4505 	 * change the math
4506 	 */
4507 	if (profile & (BTRFS_BLOCK_GROUP_DUP |
4508 		       BTRFS_BLOCK_GROUP_RAID1 |
4509 		       BTRFS_BLOCK_GROUP_RAID10))
4510 		avail >>= 1;
4511 
4512 	/*
4513 	 * If we aren't flushing all things, let us overcommit up to
4514 	 * 1/2th of the space. If we can flush, don't let us overcommit
4515 	 * too much, let it overcommit up to 1/8 of the space.
4516 	 */
4517 	if (flush == BTRFS_RESERVE_FLUSH_ALL)
4518 		avail >>= 3;
4519 	else
4520 		avail >>= 1;
4521 
4522 	if (used + bytes < space_info->total_bytes + avail)
4523 		return 1;
4524 	return 0;
4525 }
4526 
btrfs_writeback_inodes_sb_nr(struct btrfs_root * root,unsigned long nr_pages,int nr_items)4527 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4528 					 unsigned long nr_pages, int nr_items)
4529 {
4530 	struct super_block *sb = root->fs_info->sb;
4531 
4532 	if (down_read_trylock(&sb->s_umount)) {
4533 		writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4534 		up_read(&sb->s_umount);
4535 	} else {
4536 		/*
4537 		 * We needn't worry the filesystem going from r/w to r/o though
4538 		 * we don't acquire ->s_umount mutex, because the filesystem
4539 		 * should guarantee the delalloc inodes list be empty after
4540 		 * the filesystem is readonly(all dirty pages are written to
4541 		 * the disk).
4542 		 */
4543 		btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4544 		if (!current->journal_info)
4545 			btrfs_wait_ordered_roots(root->fs_info, nr_items);
4546 	}
4547 }
4548 
calc_reclaim_items_nr(struct btrfs_root * root,u64 to_reclaim)4549 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4550 {
4551 	u64 bytes;
4552 	int nr;
4553 
4554 	bytes = btrfs_calc_trans_metadata_size(root, 1);
4555 	nr = (int)div64_u64(to_reclaim, bytes);
4556 	if (!nr)
4557 		nr = 1;
4558 	return nr;
4559 }
4560 
4561 #define EXTENT_SIZE_PER_ITEM	(256 * 1024)
4562 
4563 /*
4564  * shrink metadata reservation for delalloc
4565  */
shrink_delalloc(struct btrfs_root * root,u64 to_reclaim,u64 orig,bool wait_ordered)4566 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4567 			    bool wait_ordered)
4568 {
4569 	struct btrfs_block_rsv *block_rsv;
4570 	struct btrfs_space_info *space_info;
4571 	struct btrfs_trans_handle *trans;
4572 	u64 delalloc_bytes;
4573 	u64 max_reclaim;
4574 	long time_left;
4575 	unsigned long nr_pages;
4576 	int loops;
4577 	int items;
4578 	enum btrfs_reserve_flush_enum flush;
4579 
4580 	/* Calc the number of the pages we need flush for space reservation */
4581 	items = calc_reclaim_items_nr(root, to_reclaim);
4582 	to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4583 
4584 	trans = (struct btrfs_trans_handle *)current->journal_info;
4585 	block_rsv = &root->fs_info->delalloc_block_rsv;
4586 	space_info = block_rsv->space_info;
4587 
4588 	delalloc_bytes = percpu_counter_sum_positive(
4589 						&root->fs_info->delalloc_bytes);
4590 	if (delalloc_bytes == 0) {
4591 		if (trans)
4592 			return;
4593 		if (wait_ordered)
4594 			btrfs_wait_ordered_roots(root->fs_info, items);
4595 		return;
4596 	}
4597 
4598 	loops = 0;
4599 	while (delalloc_bytes && loops < 3) {
4600 		max_reclaim = min(delalloc_bytes, to_reclaim);
4601 		nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4602 		btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4603 		/*
4604 		 * We need to wait for the async pages to actually start before
4605 		 * we do anything.
4606 		 */
4607 		max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4608 		if (!max_reclaim)
4609 			goto skip_async;
4610 
4611 		if (max_reclaim <= nr_pages)
4612 			max_reclaim = 0;
4613 		else
4614 			max_reclaim -= nr_pages;
4615 
4616 		wait_event(root->fs_info->async_submit_wait,
4617 			   atomic_read(&root->fs_info->async_delalloc_pages) <=
4618 			   (int)max_reclaim);
4619 skip_async:
4620 		if (!trans)
4621 			flush = BTRFS_RESERVE_FLUSH_ALL;
4622 		else
4623 			flush = BTRFS_RESERVE_NO_FLUSH;
4624 		spin_lock(&space_info->lock);
4625 		if (can_overcommit(root, space_info, orig, flush)) {
4626 			spin_unlock(&space_info->lock);
4627 			break;
4628 		}
4629 		spin_unlock(&space_info->lock);
4630 
4631 		loops++;
4632 		if (wait_ordered && !trans) {
4633 			btrfs_wait_ordered_roots(root->fs_info, items);
4634 		} else {
4635 			time_left = schedule_timeout_killable(1);
4636 			if (time_left)
4637 				break;
4638 		}
4639 		delalloc_bytes = percpu_counter_sum_positive(
4640 						&root->fs_info->delalloc_bytes);
4641 	}
4642 }
4643 
4644 /**
4645  * maybe_commit_transaction - possibly commit the transaction if its ok to
4646  * @root - the root we're allocating for
4647  * @bytes - the number of bytes we want to reserve
4648  * @force - force the commit
4649  *
4650  * This will check to make sure that committing the transaction will actually
4651  * get us somewhere and then commit the transaction if it does.  Otherwise it
4652  * will return -ENOSPC.
4653  */
may_commit_transaction(struct btrfs_root * root,struct btrfs_space_info * space_info,u64 bytes,int force)4654 static int may_commit_transaction(struct btrfs_root *root,
4655 				  struct btrfs_space_info *space_info,
4656 				  u64 bytes, int force)
4657 {
4658 	struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4659 	struct btrfs_trans_handle *trans;
4660 
4661 	trans = (struct btrfs_trans_handle *)current->journal_info;
4662 	if (trans)
4663 		return -EAGAIN;
4664 
4665 	if (force)
4666 		goto commit;
4667 
4668 	/* See if there is enough pinned space to make this reservation */
4669 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4670 				   bytes) >= 0)
4671 		goto commit;
4672 
4673 	/*
4674 	 * See if there is some space in the delayed insertion reservation for
4675 	 * this reservation.
4676 	 */
4677 	if (space_info != delayed_rsv->space_info)
4678 		return -ENOSPC;
4679 
4680 	spin_lock(&delayed_rsv->lock);
4681 	if (percpu_counter_compare(&space_info->total_bytes_pinned,
4682 				   bytes - delayed_rsv->size) >= 0) {
4683 		spin_unlock(&delayed_rsv->lock);
4684 		return -ENOSPC;
4685 	}
4686 	spin_unlock(&delayed_rsv->lock);
4687 
4688 commit:
4689 	trans = btrfs_join_transaction(root);
4690 	if (IS_ERR(trans))
4691 		return -ENOSPC;
4692 
4693 	return btrfs_commit_transaction(trans, root);
4694 }
4695 
4696 enum flush_state {
4697 	FLUSH_DELAYED_ITEMS_NR	=	1,
4698 	FLUSH_DELAYED_ITEMS	=	2,
4699 	FLUSH_DELALLOC		=	3,
4700 	FLUSH_DELALLOC_WAIT	=	4,
4701 	ALLOC_CHUNK		=	5,
4702 	COMMIT_TRANS		=	6,
4703 };
4704 
flush_space(struct btrfs_root * root,struct btrfs_space_info * space_info,u64 num_bytes,u64 orig_bytes,int state)4705 static int flush_space(struct btrfs_root *root,
4706 		       struct btrfs_space_info *space_info, u64 num_bytes,
4707 		       u64 orig_bytes, int state)
4708 {
4709 	struct btrfs_trans_handle *trans;
4710 	int nr;
4711 	int ret = 0;
4712 
4713 	switch (state) {
4714 	case FLUSH_DELAYED_ITEMS_NR:
4715 	case FLUSH_DELAYED_ITEMS:
4716 		if (state == FLUSH_DELAYED_ITEMS_NR)
4717 			nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4718 		else
4719 			nr = -1;
4720 
4721 		trans = btrfs_join_transaction(root);
4722 		if (IS_ERR(trans)) {
4723 			ret = PTR_ERR(trans);
4724 			break;
4725 		}
4726 		ret = btrfs_run_delayed_items_nr(trans, root, nr);
4727 		btrfs_end_transaction(trans, root);
4728 		break;
4729 	case FLUSH_DELALLOC:
4730 	case FLUSH_DELALLOC_WAIT:
4731 		shrink_delalloc(root, num_bytes * 2, orig_bytes,
4732 				state == FLUSH_DELALLOC_WAIT);
4733 		break;
4734 	case ALLOC_CHUNK:
4735 		trans = btrfs_join_transaction(root);
4736 		if (IS_ERR(trans)) {
4737 			ret = PTR_ERR(trans);
4738 			break;
4739 		}
4740 		ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4741 				     btrfs_get_alloc_profile(root, 0),
4742 				     CHUNK_ALLOC_NO_FORCE);
4743 		btrfs_end_transaction(trans, root);
4744 		if (ret == -ENOSPC)
4745 			ret = 0;
4746 		break;
4747 	case COMMIT_TRANS:
4748 		ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4749 		break;
4750 	default:
4751 		ret = -ENOSPC;
4752 		break;
4753 	}
4754 
4755 	return ret;
4756 }
4757 
4758 static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_root * root,struct btrfs_space_info * space_info)4759 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4760 				 struct btrfs_space_info *space_info)
4761 {
4762 	u64 used;
4763 	u64 expected;
4764 	u64 to_reclaim;
4765 
4766 	to_reclaim = min_t(u64, num_online_cpus() * 1024 * 1024,
4767 				16 * 1024 * 1024);
4768 	spin_lock(&space_info->lock);
4769 	if (can_overcommit(root, space_info, to_reclaim,
4770 			   BTRFS_RESERVE_FLUSH_ALL)) {
4771 		to_reclaim = 0;
4772 		goto out;
4773 	}
4774 
4775 	used = space_info->bytes_used + space_info->bytes_reserved +
4776 	       space_info->bytes_pinned + space_info->bytes_readonly +
4777 	       space_info->bytes_may_use;
4778 	if (can_overcommit(root, space_info, 1024 * 1024,
4779 			   BTRFS_RESERVE_FLUSH_ALL))
4780 		expected = div_factor_fine(space_info->total_bytes, 95);
4781 	else
4782 		expected = div_factor_fine(space_info->total_bytes, 90);
4783 
4784 	if (used > expected)
4785 		to_reclaim = used - expected;
4786 	else
4787 		to_reclaim = 0;
4788 	to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4789 				     space_info->bytes_reserved);
4790 out:
4791 	spin_unlock(&space_info->lock);
4792 
4793 	return to_reclaim;
4794 }
4795 
need_do_async_reclaim(struct btrfs_space_info * space_info,struct btrfs_fs_info * fs_info,u64 used)4796 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4797 					struct btrfs_fs_info *fs_info, u64 used)
4798 {
4799 	u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4800 
4801 	/* If we're just plain full then async reclaim just slows us down. */
4802 	if (space_info->bytes_used >= thresh)
4803 		return 0;
4804 
4805 	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4806 		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4807 }
4808 
btrfs_need_do_async_reclaim(struct btrfs_space_info * space_info,struct btrfs_fs_info * fs_info,int flush_state)4809 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4810 				       struct btrfs_fs_info *fs_info,
4811 				       int flush_state)
4812 {
4813 	u64 used;
4814 
4815 	spin_lock(&space_info->lock);
4816 	/*
4817 	 * We run out of space and have not got any free space via flush_space,
4818 	 * so don't bother doing async reclaim.
4819 	 */
4820 	if (flush_state > COMMIT_TRANS && space_info->full) {
4821 		spin_unlock(&space_info->lock);
4822 		return 0;
4823 	}
4824 
4825 	used = space_info->bytes_used + space_info->bytes_reserved +
4826 	       space_info->bytes_pinned + space_info->bytes_readonly +
4827 	       space_info->bytes_may_use;
4828 	if (need_do_async_reclaim(space_info, fs_info, used)) {
4829 		spin_unlock(&space_info->lock);
4830 		return 1;
4831 	}
4832 	spin_unlock(&space_info->lock);
4833 
4834 	return 0;
4835 }
4836 
btrfs_async_reclaim_metadata_space(struct work_struct * work)4837 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4838 {
4839 	struct btrfs_fs_info *fs_info;
4840 	struct btrfs_space_info *space_info;
4841 	u64 to_reclaim;
4842 	int flush_state;
4843 
4844 	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4845 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4846 
4847 	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4848 						      space_info);
4849 	if (!to_reclaim)
4850 		return;
4851 
4852 	flush_state = FLUSH_DELAYED_ITEMS_NR;
4853 	do {
4854 		flush_space(fs_info->fs_root, space_info, to_reclaim,
4855 			    to_reclaim, flush_state);
4856 		flush_state++;
4857 		if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4858 						 flush_state))
4859 			return;
4860 	} while (flush_state < COMMIT_TRANS);
4861 }
4862 
btrfs_init_async_reclaim_work(struct work_struct * work)4863 void btrfs_init_async_reclaim_work(struct work_struct *work)
4864 {
4865 	INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4866 }
4867 
4868 /**
4869  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4870  * @root - the root we're allocating for
4871  * @block_rsv - the block_rsv we're allocating for
4872  * @orig_bytes - the number of bytes we want
4873  * @flush - whether or not we can flush to make our reservation
4874  *
4875  * This will reserve orgi_bytes number of bytes from the space info associated
4876  * with the block_rsv.  If there is not enough space it will make an attempt to
4877  * flush out space to make room.  It will do this by flushing delalloc if
4878  * possible or committing the transaction.  If flush is 0 then no attempts to
4879  * regain reservations will be made and this will fail if there is not enough
4880  * space already.
4881  */
reserve_metadata_bytes(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,u64 orig_bytes,enum btrfs_reserve_flush_enum flush)4882 static int reserve_metadata_bytes(struct btrfs_root *root,
4883 				  struct btrfs_block_rsv *block_rsv,
4884 				  u64 orig_bytes,
4885 				  enum btrfs_reserve_flush_enum flush)
4886 {
4887 	struct btrfs_space_info *space_info = block_rsv->space_info;
4888 	u64 used;
4889 	u64 num_bytes = orig_bytes;
4890 	int flush_state = FLUSH_DELAYED_ITEMS_NR;
4891 	int ret = 0;
4892 	bool flushing = false;
4893 
4894 again:
4895 	ret = 0;
4896 	spin_lock(&space_info->lock);
4897 	/*
4898 	 * We only want to wait if somebody other than us is flushing and we
4899 	 * are actually allowed to flush all things.
4900 	 */
4901 	while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4902 	       space_info->flush) {
4903 		spin_unlock(&space_info->lock);
4904 		/*
4905 		 * If we have a trans handle we can't wait because the flusher
4906 		 * may have to commit the transaction, which would mean we would
4907 		 * deadlock since we are waiting for the flusher to finish, but
4908 		 * hold the current transaction open.
4909 		 */
4910 		if (current->journal_info)
4911 			return -EAGAIN;
4912 		ret = wait_event_killable(space_info->wait, !space_info->flush);
4913 		/* Must have been killed, return */
4914 		if (ret)
4915 			return -EINTR;
4916 
4917 		spin_lock(&space_info->lock);
4918 	}
4919 
4920 	ret = -ENOSPC;
4921 	used = space_info->bytes_used + space_info->bytes_reserved +
4922 		space_info->bytes_pinned + space_info->bytes_readonly +
4923 		space_info->bytes_may_use;
4924 
4925 	/*
4926 	 * The idea here is that we've not already over-reserved the block group
4927 	 * then we can go ahead and save our reservation first and then start
4928 	 * flushing if we need to.  Otherwise if we've already overcommitted
4929 	 * lets start flushing stuff first and then come back and try to make
4930 	 * our reservation.
4931 	 */
4932 	if (used <= space_info->total_bytes) {
4933 		if (used + orig_bytes <= space_info->total_bytes) {
4934 			space_info->bytes_may_use += orig_bytes;
4935 			trace_btrfs_space_reservation(root->fs_info,
4936 				"space_info", space_info->flags, orig_bytes, 1);
4937 			ret = 0;
4938 		} else {
4939 			/*
4940 			 * Ok set num_bytes to orig_bytes since we aren't
4941 			 * overocmmitted, this way we only try and reclaim what
4942 			 * we need.
4943 			 */
4944 			num_bytes = orig_bytes;
4945 		}
4946 	} else {
4947 		/*
4948 		 * Ok we're over committed, set num_bytes to the overcommitted
4949 		 * amount plus the amount of bytes that we need for this
4950 		 * reservation.
4951 		 */
4952 		num_bytes = used - space_info->total_bytes +
4953 			(orig_bytes * 2);
4954 	}
4955 
4956 	if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4957 		space_info->bytes_may_use += orig_bytes;
4958 		trace_btrfs_space_reservation(root->fs_info, "space_info",
4959 					      space_info->flags, orig_bytes,
4960 					      1);
4961 		ret = 0;
4962 	}
4963 
4964 	/*
4965 	 * Couldn't make our reservation, save our place so while we're trying
4966 	 * to reclaim space we can actually use it instead of somebody else
4967 	 * stealing it from us.
4968 	 *
4969 	 * We make the other tasks wait for the flush only when we can flush
4970 	 * all things.
4971 	 */
4972 	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
4973 		flushing = true;
4974 		space_info->flush = 1;
4975 	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
4976 		used += orig_bytes;
4977 		/*
4978 		 * We will do the space reservation dance during log replay,
4979 		 * which means we won't have fs_info->fs_root set, so don't do
4980 		 * the async reclaim as we will panic.
4981 		 */
4982 		if (!root->fs_info->log_root_recovering &&
4983 		    need_do_async_reclaim(space_info, root->fs_info, used) &&
4984 		    !work_busy(&root->fs_info->async_reclaim_work))
4985 			queue_work(system_unbound_wq,
4986 				   &root->fs_info->async_reclaim_work);
4987 	}
4988 	spin_unlock(&space_info->lock);
4989 
4990 	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
4991 		goto out;
4992 
4993 	ret = flush_space(root, space_info, num_bytes, orig_bytes,
4994 			  flush_state);
4995 	flush_state++;
4996 
4997 	/*
4998 	 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
4999 	 * would happen. So skip delalloc flush.
5000 	 */
5001 	if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5002 	    (flush_state == FLUSH_DELALLOC ||
5003 	     flush_state == FLUSH_DELALLOC_WAIT))
5004 		flush_state = ALLOC_CHUNK;
5005 
5006 	if (!ret)
5007 		goto again;
5008 	else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5009 		 flush_state < COMMIT_TRANS)
5010 		goto again;
5011 	else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
5012 		 flush_state <= COMMIT_TRANS)
5013 		goto again;
5014 
5015 out:
5016 	if (ret == -ENOSPC &&
5017 	    unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5018 		struct btrfs_block_rsv *global_rsv =
5019 			&root->fs_info->global_block_rsv;
5020 
5021 		if (block_rsv != global_rsv &&
5022 		    !block_rsv_use_bytes(global_rsv, orig_bytes))
5023 			ret = 0;
5024 	}
5025 	if (ret == -ENOSPC)
5026 		trace_btrfs_space_reservation(root->fs_info,
5027 					      "space_info:enospc",
5028 					      space_info->flags, orig_bytes, 1);
5029 	if (flushing) {
5030 		spin_lock(&space_info->lock);
5031 		space_info->flush = 0;
5032 		wake_up_all(&space_info->wait);
5033 		spin_unlock(&space_info->lock);
5034 	}
5035 	return ret;
5036 }
5037 
get_block_rsv(const struct btrfs_trans_handle * trans,const struct btrfs_root * root)5038 static struct btrfs_block_rsv *get_block_rsv(
5039 					const struct btrfs_trans_handle *trans,
5040 					const struct btrfs_root *root)
5041 {
5042 	struct btrfs_block_rsv *block_rsv = NULL;
5043 
5044 	if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5045 	    (root == root->fs_info->csum_root && trans->adding_csums) ||
5046 	     (root == root->fs_info->uuid_root))
5047 		block_rsv = trans->block_rsv;
5048 
5049 	if (!block_rsv)
5050 		block_rsv = root->block_rsv;
5051 
5052 	if (!block_rsv)
5053 		block_rsv = &root->fs_info->empty_block_rsv;
5054 
5055 	return block_rsv;
5056 }
5057 
block_rsv_use_bytes(struct btrfs_block_rsv * block_rsv,u64 num_bytes)5058 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5059 			       u64 num_bytes)
5060 {
5061 	int ret = -ENOSPC;
5062 	spin_lock(&block_rsv->lock);
5063 	if (block_rsv->reserved >= num_bytes) {
5064 		block_rsv->reserved -= num_bytes;
5065 		if (block_rsv->reserved < block_rsv->size)
5066 			block_rsv->full = 0;
5067 		ret = 0;
5068 	}
5069 	spin_unlock(&block_rsv->lock);
5070 	return ret;
5071 }
5072 
block_rsv_add_bytes(struct btrfs_block_rsv * block_rsv,u64 num_bytes,int update_size)5073 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5074 				u64 num_bytes, int update_size)
5075 {
5076 	spin_lock(&block_rsv->lock);
5077 	block_rsv->reserved += num_bytes;
5078 	if (update_size)
5079 		block_rsv->size += num_bytes;
5080 	else if (block_rsv->reserved >= block_rsv->size)
5081 		block_rsv->full = 1;
5082 	spin_unlock(&block_rsv->lock);
5083 }
5084 
btrfs_cond_migrate_bytes(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * dest,u64 num_bytes,int min_factor)5085 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5086 			     struct btrfs_block_rsv *dest, u64 num_bytes,
5087 			     int min_factor)
5088 {
5089 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5090 	u64 min_bytes;
5091 
5092 	if (global_rsv->space_info != dest->space_info)
5093 		return -ENOSPC;
5094 
5095 	spin_lock(&global_rsv->lock);
5096 	min_bytes = div_factor(global_rsv->size, min_factor);
5097 	if (global_rsv->reserved < min_bytes + num_bytes) {
5098 		spin_unlock(&global_rsv->lock);
5099 		return -ENOSPC;
5100 	}
5101 	global_rsv->reserved -= num_bytes;
5102 	if (global_rsv->reserved < global_rsv->size)
5103 		global_rsv->full = 0;
5104 	spin_unlock(&global_rsv->lock);
5105 
5106 	block_rsv_add_bytes(dest, num_bytes, 1);
5107 	return 0;
5108 }
5109 
block_rsv_release_bytes(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,struct btrfs_block_rsv * dest,u64 num_bytes)5110 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5111 				    struct btrfs_block_rsv *block_rsv,
5112 				    struct btrfs_block_rsv *dest, u64 num_bytes)
5113 {
5114 	struct btrfs_space_info *space_info = block_rsv->space_info;
5115 
5116 	spin_lock(&block_rsv->lock);
5117 	if (num_bytes == (u64)-1)
5118 		num_bytes = block_rsv->size;
5119 	block_rsv->size -= num_bytes;
5120 	if (block_rsv->reserved >= block_rsv->size) {
5121 		num_bytes = block_rsv->reserved - block_rsv->size;
5122 		block_rsv->reserved = block_rsv->size;
5123 		block_rsv->full = 1;
5124 	} else {
5125 		num_bytes = 0;
5126 	}
5127 	spin_unlock(&block_rsv->lock);
5128 
5129 	if (num_bytes > 0) {
5130 		if (dest) {
5131 			spin_lock(&dest->lock);
5132 			if (!dest->full) {
5133 				u64 bytes_to_add;
5134 
5135 				bytes_to_add = dest->size - dest->reserved;
5136 				bytes_to_add = min(num_bytes, bytes_to_add);
5137 				dest->reserved += bytes_to_add;
5138 				if (dest->reserved >= dest->size)
5139 					dest->full = 1;
5140 				num_bytes -= bytes_to_add;
5141 			}
5142 			spin_unlock(&dest->lock);
5143 		}
5144 		if (num_bytes) {
5145 			spin_lock(&space_info->lock);
5146 			space_info->bytes_may_use -= num_bytes;
5147 			trace_btrfs_space_reservation(fs_info, "space_info",
5148 					space_info->flags, num_bytes, 0);
5149 			spin_unlock(&space_info->lock);
5150 		}
5151 	}
5152 }
5153 
block_rsv_migrate_bytes(struct btrfs_block_rsv * src,struct btrfs_block_rsv * dst,u64 num_bytes)5154 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5155 				   struct btrfs_block_rsv *dst, u64 num_bytes)
5156 {
5157 	int ret;
5158 
5159 	ret = block_rsv_use_bytes(src, num_bytes);
5160 	if (ret)
5161 		return ret;
5162 
5163 	block_rsv_add_bytes(dst, num_bytes, 1);
5164 	return 0;
5165 }
5166 
btrfs_init_block_rsv(struct btrfs_block_rsv * rsv,unsigned short type)5167 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5168 {
5169 	memset(rsv, 0, sizeof(*rsv));
5170 	spin_lock_init(&rsv->lock);
5171 	rsv->type = type;
5172 }
5173 
btrfs_alloc_block_rsv(struct btrfs_root * root,unsigned short type)5174 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5175 					      unsigned short type)
5176 {
5177 	struct btrfs_block_rsv *block_rsv;
5178 	struct btrfs_fs_info *fs_info = root->fs_info;
5179 
5180 	block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5181 	if (!block_rsv)
5182 		return NULL;
5183 
5184 	btrfs_init_block_rsv(block_rsv, type);
5185 	block_rsv->space_info = __find_space_info(fs_info,
5186 						  BTRFS_BLOCK_GROUP_METADATA);
5187 	return block_rsv;
5188 }
5189 
btrfs_free_block_rsv(struct btrfs_root * root,struct btrfs_block_rsv * rsv)5190 void btrfs_free_block_rsv(struct btrfs_root *root,
5191 			  struct btrfs_block_rsv *rsv)
5192 {
5193 	if (!rsv)
5194 		return;
5195 	btrfs_block_rsv_release(root, rsv, (u64)-1);
5196 	kfree(rsv);
5197 }
5198 
__btrfs_free_block_rsv(struct btrfs_block_rsv * rsv)5199 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5200 {
5201 	kfree(rsv);
5202 }
5203 
btrfs_block_rsv_add(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,u64 num_bytes,enum btrfs_reserve_flush_enum flush)5204 int btrfs_block_rsv_add(struct btrfs_root *root,
5205 			struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5206 			enum btrfs_reserve_flush_enum flush)
5207 {
5208 	int ret;
5209 
5210 	if (num_bytes == 0)
5211 		return 0;
5212 
5213 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5214 	if (!ret) {
5215 		block_rsv_add_bytes(block_rsv, num_bytes, 1);
5216 		return 0;
5217 	}
5218 
5219 	return ret;
5220 }
5221 
btrfs_block_rsv_check(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,int min_factor)5222 int btrfs_block_rsv_check(struct btrfs_root *root,
5223 			  struct btrfs_block_rsv *block_rsv, int min_factor)
5224 {
5225 	u64 num_bytes = 0;
5226 	int ret = -ENOSPC;
5227 
5228 	if (!block_rsv)
5229 		return 0;
5230 
5231 	spin_lock(&block_rsv->lock);
5232 	num_bytes = div_factor(block_rsv->size, min_factor);
5233 	if (block_rsv->reserved >= num_bytes)
5234 		ret = 0;
5235 	spin_unlock(&block_rsv->lock);
5236 
5237 	return ret;
5238 }
5239 
btrfs_block_rsv_refill(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,u64 min_reserved,enum btrfs_reserve_flush_enum flush)5240 int btrfs_block_rsv_refill(struct btrfs_root *root,
5241 			   struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5242 			   enum btrfs_reserve_flush_enum flush)
5243 {
5244 	u64 num_bytes = 0;
5245 	int ret = -ENOSPC;
5246 
5247 	if (!block_rsv)
5248 		return 0;
5249 
5250 	spin_lock(&block_rsv->lock);
5251 	num_bytes = min_reserved;
5252 	if (block_rsv->reserved >= num_bytes)
5253 		ret = 0;
5254 	else
5255 		num_bytes -= block_rsv->reserved;
5256 	spin_unlock(&block_rsv->lock);
5257 
5258 	if (!ret)
5259 		return 0;
5260 
5261 	ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5262 	if (!ret) {
5263 		block_rsv_add_bytes(block_rsv, num_bytes, 0);
5264 		return 0;
5265 	}
5266 
5267 	return ret;
5268 }
5269 
btrfs_block_rsv_migrate(struct btrfs_block_rsv * src_rsv,struct btrfs_block_rsv * dst_rsv,u64 num_bytes)5270 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5271 			    struct btrfs_block_rsv *dst_rsv,
5272 			    u64 num_bytes)
5273 {
5274 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5275 }
5276 
btrfs_block_rsv_release(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,u64 num_bytes)5277 void btrfs_block_rsv_release(struct btrfs_root *root,
5278 			     struct btrfs_block_rsv *block_rsv,
5279 			     u64 num_bytes)
5280 {
5281 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5282 	if (global_rsv == block_rsv ||
5283 	    block_rsv->space_info != global_rsv->space_info)
5284 		global_rsv = NULL;
5285 	block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5286 				num_bytes);
5287 }
5288 
5289 /*
5290  * helper to calculate size of global block reservation.
5291  * the desired value is sum of space used by extent tree,
5292  * checksum tree and root tree
5293  */
calc_global_metadata_size(struct btrfs_fs_info * fs_info)5294 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5295 {
5296 	struct btrfs_space_info *sinfo;
5297 	u64 num_bytes;
5298 	u64 meta_used;
5299 	u64 data_used;
5300 	int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5301 
5302 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5303 	spin_lock(&sinfo->lock);
5304 	data_used = sinfo->bytes_used;
5305 	spin_unlock(&sinfo->lock);
5306 
5307 	sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5308 	spin_lock(&sinfo->lock);
5309 	if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5310 		data_used = 0;
5311 	meta_used = sinfo->bytes_used;
5312 	spin_unlock(&sinfo->lock);
5313 
5314 	num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5315 		    csum_size * 2;
5316 	num_bytes += div_u64(data_used + meta_used, 50);
5317 
5318 	if (num_bytes * 3 > meta_used)
5319 		num_bytes = div_u64(meta_used, 3);
5320 
5321 	return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5322 }
5323 
update_global_block_rsv(struct btrfs_fs_info * fs_info)5324 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5325 {
5326 	struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5327 	struct btrfs_space_info *sinfo = block_rsv->space_info;
5328 	u64 num_bytes;
5329 
5330 	num_bytes = calc_global_metadata_size(fs_info);
5331 
5332 	spin_lock(&sinfo->lock);
5333 	spin_lock(&block_rsv->lock);
5334 
5335 	block_rsv->size = min_t(u64, num_bytes, 512 * 1024 * 1024);
5336 
5337 	num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5338 		    sinfo->bytes_reserved + sinfo->bytes_readonly +
5339 		    sinfo->bytes_may_use;
5340 
5341 	if (sinfo->total_bytes > num_bytes) {
5342 		num_bytes = sinfo->total_bytes - num_bytes;
5343 		block_rsv->reserved += num_bytes;
5344 		sinfo->bytes_may_use += num_bytes;
5345 		trace_btrfs_space_reservation(fs_info, "space_info",
5346 				      sinfo->flags, num_bytes, 1);
5347 	}
5348 
5349 	if (block_rsv->reserved >= block_rsv->size) {
5350 		num_bytes = block_rsv->reserved - block_rsv->size;
5351 		sinfo->bytes_may_use -= num_bytes;
5352 		trace_btrfs_space_reservation(fs_info, "space_info",
5353 				      sinfo->flags, num_bytes, 0);
5354 		block_rsv->reserved = block_rsv->size;
5355 		block_rsv->full = 1;
5356 	}
5357 
5358 	spin_unlock(&block_rsv->lock);
5359 	spin_unlock(&sinfo->lock);
5360 }
5361 
init_global_block_rsv(struct btrfs_fs_info * fs_info)5362 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5363 {
5364 	struct btrfs_space_info *space_info;
5365 
5366 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5367 	fs_info->chunk_block_rsv.space_info = space_info;
5368 
5369 	space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5370 	fs_info->global_block_rsv.space_info = space_info;
5371 	fs_info->delalloc_block_rsv.space_info = space_info;
5372 	fs_info->trans_block_rsv.space_info = space_info;
5373 	fs_info->empty_block_rsv.space_info = space_info;
5374 	fs_info->delayed_block_rsv.space_info = space_info;
5375 
5376 	fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5377 	fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5378 	fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5379 	fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5380 	if (fs_info->quota_root)
5381 		fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5382 	fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5383 
5384 	update_global_block_rsv(fs_info);
5385 }
5386 
release_global_block_rsv(struct btrfs_fs_info * fs_info)5387 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5388 {
5389 	block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5390 				(u64)-1);
5391 	WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5392 	WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5393 	WARN_ON(fs_info->trans_block_rsv.size > 0);
5394 	WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5395 	WARN_ON(fs_info->chunk_block_rsv.size > 0);
5396 	WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5397 	WARN_ON(fs_info->delayed_block_rsv.size > 0);
5398 	WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5399 }
5400 
btrfs_trans_release_metadata(struct btrfs_trans_handle * trans,struct btrfs_root * root)5401 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5402 				  struct btrfs_root *root)
5403 {
5404 	if (!trans->block_rsv)
5405 		return;
5406 
5407 	if (!trans->bytes_reserved)
5408 		return;
5409 
5410 	trace_btrfs_space_reservation(root->fs_info, "transaction",
5411 				      trans->transid, trans->bytes_reserved, 0);
5412 	btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5413 	trans->bytes_reserved = 0;
5414 }
5415 
5416 /*
5417  * To be called after all the new block groups attached to the transaction
5418  * handle have been created (btrfs_create_pending_block_groups()).
5419  */
btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle * trans)5420 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5421 {
5422 	struct btrfs_fs_info *fs_info = trans->root->fs_info;
5423 
5424 	if (!trans->chunk_bytes_reserved)
5425 		return;
5426 
5427 	WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5428 
5429 	block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5430 				trans->chunk_bytes_reserved);
5431 	trans->chunk_bytes_reserved = 0;
5432 }
5433 
5434 /* Can only return 0 or -ENOSPC */
btrfs_orphan_reserve_metadata(struct btrfs_trans_handle * trans,struct inode * inode)5435 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5436 				  struct inode *inode)
5437 {
5438 	struct btrfs_root *root = BTRFS_I(inode)->root;
5439 	struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5440 	struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5441 
5442 	/*
5443 	 * We need to hold space in order to delete our orphan item once we've
5444 	 * added it, so this takes the reservation so we can release it later
5445 	 * when we are truly done with the orphan item.
5446 	 */
5447 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5448 	trace_btrfs_space_reservation(root->fs_info, "orphan",
5449 				      btrfs_ino(inode), num_bytes, 1);
5450 	return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5451 }
5452 
btrfs_orphan_release_metadata(struct inode * inode)5453 void btrfs_orphan_release_metadata(struct inode *inode)
5454 {
5455 	struct btrfs_root *root = BTRFS_I(inode)->root;
5456 	u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5457 	trace_btrfs_space_reservation(root->fs_info, "orphan",
5458 				      btrfs_ino(inode), num_bytes, 0);
5459 	btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5460 }
5461 
5462 /*
5463  * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5464  * root: the root of the parent directory
5465  * rsv: block reservation
5466  * items: the number of items that we need do reservation
5467  * qgroup_reserved: used to return the reserved size in qgroup
5468  *
5469  * This function is used to reserve the space for snapshot/subvolume
5470  * creation and deletion. Those operations are different with the
5471  * common file/directory operations, they change two fs/file trees
5472  * and root tree, the number of items that the qgroup reserves is
5473  * different with the free space reservation. So we can not use
5474  * the space reseravtion mechanism in start_transaction().
5475  */
btrfs_subvolume_reserve_metadata(struct btrfs_root * root,struct btrfs_block_rsv * rsv,int items,u64 * qgroup_reserved,bool use_global_rsv)5476 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5477 				     struct btrfs_block_rsv *rsv,
5478 				     int items,
5479 				     u64 *qgroup_reserved,
5480 				     bool use_global_rsv)
5481 {
5482 	u64 num_bytes;
5483 	int ret;
5484 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5485 
5486 	if (root->fs_info->quota_enabled) {
5487 		/* One for parent inode, two for dir entries */
5488 		num_bytes = 3 * root->nodesize;
5489 		ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5490 		if (ret)
5491 			return ret;
5492 	} else {
5493 		num_bytes = 0;
5494 	}
5495 
5496 	*qgroup_reserved = num_bytes;
5497 
5498 	num_bytes = btrfs_calc_trans_metadata_size(root, items);
5499 	rsv->space_info = __find_space_info(root->fs_info,
5500 					    BTRFS_BLOCK_GROUP_METADATA);
5501 	ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5502 				  BTRFS_RESERVE_FLUSH_ALL);
5503 
5504 	if (ret == -ENOSPC && use_global_rsv)
5505 		ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5506 
5507 	if (ret && *qgroup_reserved)
5508 		btrfs_qgroup_free_meta(root, *qgroup_reserved);
5509 
5510 	return ret;
5511 }
5512 
btrfs_subvolume_release_metadata(struct btrfs_root * root,struct btrfs_block_rsv * rsv,u64 qgroup_reserved)5513 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5514 				      struct btrfs_block_rsv *rsv,
5515 				      u64 qgroup_reserved)
5516 {
5517 	btrfs_block_rsv_release(root, rsv, (u64)-1);
5518 }
5519 
5520 /**
5521  * drop_outstanding_extent - drop an outstanding extent
5522  * @inode: the inode we're dropping the extent for
5523  * @num_bytes: the number of bytes we're relaseing.
5524  *
5525  * This is called when we are freeing up an outstanding extent, either called
5526  * after an error or after an extent is written.  This will return the number of
5527  * reserved extents that need to be freed.  This must be called with
5528  * BTRFS_I(inode)->lock held.
5529  */
drop_outstanding_extent(struct inode * inode,u64 num_bytes)5530 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5531 {
5532 	unsigned drop_inode_space = 0;
5533 	unsigned dropped_extents = 0;
5534 	unsigned num_extents = 0;
5535 
5536 	num_extents = (unsigned)div64_u64(num_bytes +
5537 					  BTRFS_MAX_EXTENT_SIZE - 1,
5538 					  BTRFS_MAX_EXTENT_SIZE);
5539 	ASSERT(num_extents);
5540 	ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5541 	BTRFS_I(inode)->outstanding_extents -= num_extents;
5542 
5543 	if (BTRFS_I(inode)->outstanding_extents == 0 &&
5544 	    test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5545 			       &BTRFS_I(inode)->runtime_flags))
5546 		drop_inode_space = 1;
5547 
5548 	/*
5549 	 * If we have more or the same amount of outsanding extents than we have
5550 	 * reserved then we need to leave the reserved extents count alone.
5551 	 */
5552 	if (BTRFS_I(inode)->outstanding_extents >=
5553 	    BTRFS_I(inode)->reserved_extents)
5554 		return drop_inode_space;
5555 
5556 	dropped_extents = BTRFS_I(inode)->reserved_extents -
5557 		BTRFS_I(inode)->outstanding_extents;
5558 	BTRFS_I(inode)->reserved_extents -= dropped_extents;
5559 	return dropped_extents + drop_inode_space;
5560 }
5561 
5562 /**
5563  * calc_csum_metadata_size - return the amount of metada space that must be
5564  *	reserved/free'd for the given bytes.
5565  * @inode: the inode we're manipulating
5566  * @num_bytes: the number of bytes in question
5567  * @reserve: 1 if we are reserving space, 0 if we are freeing space
5568  *
5569  * This adjusts the number of csum_bytes in the inode and then returns the
5570  * correct amount of metadata that must either be reserved or freed.  We
5571  * calculate how many checksums we can fit into one leaf and then divide the
5572  * number of bytes that will need to be checksumed by this value to figure out
5573  * how many checksums will be required.  If we are adding bytes then the number
5574  * may go up and we will return the number of additional bytes that must be
5575  * reserved.  If it is going down we will return the number of bytes that must
5576  * be freed.
5577  *
5578  * This must be called with BTRFS_I(inode)->lock held.
5579  */
calc_csum_metadata_size(struct inode * inode,u64 num_bytes,int reserve)5580 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5581 				   int reserve)
5582 {
5583 	struct btrfs_root *root = BTRFS_I(inode)->root;
5584 	u64 old_csums, num_csums;
5585 
5586 	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5587 	    BTRFS_I(inode)->csum_bytes == 0)
5588 		return 0;
5589 
5590 	old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5591 	if (reserve)
5592 		BTRFS_I(inode)->csum_bytes += num_bytes;
5593 	else
5594 		BTRFS_I(inode)->csum_bytes -= num_bytes;
5595 	num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5596 
5597 	/* No change, no need to reserve more */
5598 	if (old_csums == num_csums)
5599 		return 0;
5600 
5601 	if (reserve)
5602 		return btrfs_calc_trans_metadata_size(root,
5603 						      num_csums - old_csums);
5604 
5605 	return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5606 }
5607 
btrfs_delalloc_reserve_metadata(struct inode * inode,u64 num_bytes)5608 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5609 {
5610 	struct btrfs_root *root = BTRFS_I(inode)->root;
5611 	struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5612 	u64 to_reserve = 0;
5613 	u64 csum_bytes;
5614 	unsigned nr_extents = 0;
5615 	int extra_reserve = 0;
5616 	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5617 	int ret = 0;
5618 	bool delalloc_lock = true;
5619 	u64 to_free = 0;
5620 	unsigned dropped;
5621 
5622 	/* If we are a free space inode we need to not flush since we will be in
5623 	 * the middle of a transaction commit.  We also don't need the delalloc
5624 	 * mutex since we won't race with anybody.  We need this mostly to make
5625 	 * lockdep shut its filthy mouth.
5626 	 */
5627 	if (btrfs_is_free_space_inode(inode)) {
5628 		flush = BTRFS_RESERVE_NO_FLUSH;
5629 		delalloc_lock = false;
5630 	}
5631 
5632 	if (flush != BTRFS_RESERVE_NO_FLUSH &&
5633 	    btrfs_transaction_in_commit(root->fs_info))
5634 		schedule_timeout(1);
5635 
5636 	if (delalloc_lock)
5637 		mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5638 
5639 	num_bytes = ALIGN(num_bytes, root->sectorsize);
5640 
5641 	spin_lock(&BTRFS_I(inode)->lock);
5642 	nr_extents = (unsigned)div64_u64(num_bytes +
5643 					 BTRFS_MAX_EXTENT_SIZE - 1,
5644 					 BTRFS_MAX_EXTENT_SIZE);
5645 	BTRFS_I(inode)->outstanding_extents += nr_extents;
5646 	nr_extents = 0;
5647 
5648 	if (BTRFS_I(inode)->outstanding_extents >
5649 	    BTRFS_I(inode)->reserved_extents)
5650 		nr_extents = BTRFS_I(inode)->outstanding_extents -
5651 			BTRFS_I(inode)->reserved_extents;
5652 
5653 	/*
5654 	 * Add an item to reserve for updating the inode when we complete the
5655 	 * delalloc io.
5656 	 */
5657 	if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5658 		      &BTRFS_I(inode)->runtime_flags)) {
5659 		nr_extents++;
5660 		extra_reserve = 1;
5661 	}
5662 
5663 	to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5664 	to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5665 	csum_bytes = BTRFS_I(inode)->csum_bytes;
5666 	spin_unlock(&BTRFS_I(inode)->lock);
5667 
5668 	if (root->fs_info->quota_enabled) {
5669 		ret = btrfs_qgroup_reserve_meta(root,
5670 				nr_extents * root->nodesize);
5671 		if (ret)
5672 			goto out_fail;
5673 	}
5674 
5675 	ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5676 	if (unlikely(ret)) {
5677 		btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5678 		goto out_fail;
5679 	}
5680 
5681 	spin_lock(&BTRFS_I(inode)->lock);
5682 	if (extra_reserve) {
5683 		set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5684 			&BTRFS_I(inode)->runtime_flags);
5685 		nr_extents--;
5686 	}
5687 	BTRFS_I(inode)->reserved_extents += nr_extents;
5688 	spin_unlock(&BTRFS_I(inode)->lock);
5689 
5690 	if (delalloc_lock)
5691 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5692 
5693 	if (to_reserve)
5694 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5695 					      btrfs_ino(inode), to_reserve, 1);
5696 	block_rsv_add_bytes(block_rsv, to_reserve, 1);
5697 
5698 	return 0;
5699 
5700 out_fail:
5701 	spin_lock(&BTRFS_I(inode)->lock);
5702 	dropped = drop_outstanding_extent(inode, num_bytes);
5703 	/*
5704 	 * If the inodes csum_bytes is the same as the original
5705 	 * csum_bytes then we know we haven't raced with any free()ers
5706 	 * so we can just reduce our inodes csum bytes and carry on.
5707 	 */
5708 	if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5709 		calc_csum_metadata_size(inode, num_bytes, 0);
5710 	} else {
5711 		u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5712 		u64 bytes;
5713 
5714 		/*
5715 		 * This is tricky, but first we need to figure out how much we
5716 		 * free'd from any free-ers that occured during this
5717 		 * reservation, so we reset ->csum_bytes to the csum_bytes
5718 		 * before we dropped our lock, and then call the free for the
5719 		 * number of bytes that were freed while we were trying our
5720 		 * reservation.
5721 		 */
5722 		bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5723 		BTRFS_I(inode)->csum_bytes = csum_bytes;
5724 		to_free = calc_csum_metadata_size(inode, bytes, 0);
5725 
5726 
5727 		/*
5728 		 * Now we need to see how much we would have freed had we not
5729 		 * been making this reservation and our ->csum_bytes were not
5730 		 * artificially inflated.
5731 		 */
5732 		BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5733 		bytes = csum_bytes - orig_csum_bytes;
5734 		bytes = calc_csum_metadata_size(inode, bytes, 0);
5735 
5736 		/*
5737 		 * Now reset ->csum_bytes to what it should be.  If bytes is
5738 		 * more than to_free then we would have free'd more space had we
5739 		 * not had an artificially high ->csum_bytes, so we need to free
5740 		 * the remainder.  If bytes is the same or less then we don't
5741 		 * need to do anything, the other free-ers did the correct
5742 		 * thing.
5743 		 */
5744 		BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5745 		if (bytes > to_free)
5746 			to_free = bytes - to_free;
5747 		else
5748 			to_free = 0;
5749 	}
5750 	spin_unlock(&BTRFS_I(inode)->lock);
5751 	if (dropped)
5752 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5753 
5754 	if (to_free) {
5755 		btrfs_block_rsv_release(root, block_rsv, to_free);
5756 		trace_btrfs_space_reservation(root->fs_info, "delalloc",
5757 					      btrfs_ino(inode), to_free, 0);
5758 	}
5759 	if (delalloc_lock)
5760 		mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5761 	return ret;
5762 }
5763 
5764 /**
5765  * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5766  * @inode: the inode to release the reservation for
5767  * @num_bytes: the number of bytes we're releasing
5768  *
5769  * This will release the metadata reservation for an inode.  This can be called
5770  * once we complete IO for a given set of bytes to release their metadata
5771  * reservations.
5772  */
btrfs_delalloc_release_metadata(struct inode * inode,u64 num_bytes)5773 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5774 {
5775 	struct btrfs_root *root = BTRFS_I(inode)->root;
5776 	u64 to_free = 0;
5777 	unsigned dropped;
5778 
5779 	num_bytes = ALIGN(num_bytes, root->sectorsize);
5780 	spin_lock(&BTRFS_I(inode)->lock);
5781 	dropped = drop_outstanding_extent(inode, num_bytes);
5782 
5783 	if (num_bytes)
5784 		to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5785 	spin_unlock(&BTRFS_I(inode)->lock);
5786 	if (dropped > 0)
5787 		to_free += btrfs_calc_trans_metadata_size(root, dropped);
5788 
5789 	if (btrfs_test_is_dummy_root(root))
5790 		return;
5791 
5792 	trace_btrfs_space_reservation(root->fs_info, "delalloc",
5793 				      btrfs_ino(inode), to_free, 0);
5794 
5795 	btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5796 				to_free);
5797 }
5798 
5799 /**
5800  * btrfs_delalloc_reserve_space - reserve data and metadata space for
5801  * delalloc
5802  * @inode: inode we're writing to
5803  * @start: start range we are writing to
5804  * @len: how long the range we are writing to
5805  *
5806  * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5807  *
5808  * This will do the following things
5809  *
5810  * o reserve space in data space info for num bytes
5811  *   and reserve precious corresponding qgroup space
5812  *   (Done in check_data_free_space)
5813  *
5814  * o reserve space for metadata space, based on the number of outstanding
5815  *   extents and how much csums will be needed
5816  *   also reserve metadata space in a per root over-reserve method.
5817  * o add to the inodes->delalloc_bytes
5818  * o add it to the fs_info's delalloc inodes list.
5819  *   (Above 3 all done in delalloc_reserve_metadata)
5820  *
5821  * Return 0 for success
5822  * Return <0 for error(-ENOSPC or -EQUOT)
5823  */
btrfs_delalloc_reserve_space(struct inode * inode,u64 start,u64 len)5824 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5825 {
5826 	int ret;
5827 
5828 	ret = btrfs_check_data_free_space(inode, start, len);
5829 	if (ret < 0)
5830 		return ret;
5831 	ret = btrfs_delalloc_reserve_metadata(inode, len);
5832 	if (ret < 0)
5833 		btrfs_free_reserved_data_space(inode, start, len);
5834 	return ret;
5835 }
5836 
5837 /**
5838  * btrfs_delalloc_release_space - release data and metadata space for delalloc
5839  * @inode: inode we're releasing space for
5840  * @start: start position of the space already reserved
5841  * @len: the len of the space already reserved
5842  *
5843  * This must be matched with a call to btrfs_delalloc_reserve_space.  This is
5844  * called in the case that we don't need the metadata AND data reservations
5845  * anymore.  So if there is an error or we insert an inline extent.
5846  *
5847  * This function will release the metadata space that was not used and will
5848  * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5849  * list if there are no delalloc bytes left.
5850  * Also it will handle the qgroup reserved space.
5851  */
btrfs_delalloc_release_space(struct inode * inode,u64 start,u64 len)5852 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5853 {
5854 	btrfs_delalloc_release_metadata(inode, len);
5855 	btrfs_free_reserved_data_space(inode, start, len);
5856 }
5857 
update_block_group(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 num_bytes,int alloc)5858 static int update_block_group(struct btrfs_trans_handle *trans,
5859 			      struct btrfs_root *root, u64 bytenr,
5860 			      u64 num_bytes, int alloc)
5861 {
5862 	struct btrfs_block_group_cache *cache = NULL;
5863 	struct btrfs_fs_info *info = root->fs_info;
5864 	u64 total = num_bytes;
5865 	u64 old_val;
5866 	u64 byte_in_group;
5867 	int factor;
5868 
5869 	/* block accounting for super block */
5870 	spin_lock(&info->delalloc_root_lock);
5871 	old_val = btrfs_super_bytes_used(info->super_copy);
5872 	if (alloc)
5873 		old_val += num_bytes;
5874 	else
5875 		old_val -= num_bytes;
5876 	btrfs_set_super_bytes_used(info->super_copy, old_val);
5877 	spin_unlock(&info->delalloc_root_lock);
5878 
5879 	while (total) {
5880 		cache = btrfs_lookup_block_group(info, bytenr);
5881 		if (!cache)
5882 			return -ENOENT;
5883 		if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5884 				    BTRFS_BLOCK_GROUP_RAID1 |
5885 				    BTRFS_BLOCK_GROUP_RAID10))
5886 			factor = 2;
5887 		else
5888 			factor = 1;
5889 		/*
5890 		 * If this block group has free space cache written out, we
5891 		 * need to make sure to load it if we are removing space.  This
5892 		 * is because we need the unpinning stage to actually add the
5893 		 * space back to the block group, otherwise we will leak space.
5894 		 */
5895 		if (!alloc && cache->cached == BTRFS_CACHE_NO)
5896 			cache_block_group(cache, 1);
5897 
5898 		byte_in_group = bytenr - cache->key.objectid;
5899 		WARN_ON(byte_in_group > cache->key.offset);
5900 
5901 		spin_lock(&cache->space_info->lock);
5902 		spin_lock(&cache->lock);
5903 
5904 		if (btrfs_test_opt(root, SPACE_CACHE) &&
5905 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
5906 			cache->disk_cache_state = BTRFS_DC_CLEAR;
5907 
5908 		old_val = btrfs_block_group_used(&cache->item);
5909 		num_bytes = min(total, cache->key.offset - byte_in_group);
5910 		if (alloc) {
5911 			old_val += num_bytes;
5912 			btrfs_set_block_group_used(&cache->item, old_val);
5913 			cache->reserved -= num_bytes;
5914 			cache->space_info->bytes_reserved -= num_bytes;
5915 			cache->space_info->bytes_used += num_bytes;
5916 			cache->space_info->disk_used += num_bytes * factor;
5917 			spin_unlock(&cache->lock);
5918 			spin_unlock(&cache->space_info->lock);
5919 		} else {
5920 			old_val -= num_bytes;
5921 			btrfs_set_block_group_used(&cache->item, old_val);
5922 			cache->pinned += num_bytes;
5923 			cache->space_info->bytes_pinned += num_bytes;
5924 			cache->space_info->bytes_used -= num_bytes;
5925 			cache->space_info->disk_used -= num_bytes * factor;
5926 			spin_unlock(&cache->lock);
5927 			spin_unlock(&cache->space_info->lock);
5928 
5929 			set_extent_dirty(info->pinned_extents,
5930 					 bytenr, bytenr + num_bytes - 1,
5931 					 GFP_NOFS | __GFP_NOFAIL);
5932 		}
5933 
5934 		spin_lock(&trans->transaction->dirty_bgs_lock);
5935 		if (list_empty(&cache->dirty_list)) {
5936 			list_add_tail(&cache->dirty_list,
5937 				      &trans->transaction->dirty_bgs);
5938 				trans->transaction->num_dirty_bgs++;
5939 			btrfs_get_block_group(cache);
5940 		}
5941 		spin_unlock(&trans->transaction->dirty_bgs_lock);
5942 
5943 		/*
5944 		 * No longer have used bytes in this block group, queue it for
5945 		 * deletion. We do this after adding the block group to the
5946 		 * dirty list to avoid races between cleaner kthread and space
5947 		 * cache writeout.
5948 		 */
5949 		if (!alloc && old_val == 0) {
5950 			spin_lock(&info->unused_bgs_lock);
5951 			if (list_empty(&cache->bg_list)) {
5952 				btrfs_get_block_group(cache);
5953 				list_add_tail(&cache->bg_list,
5954 					      &info->unused_bgs);
5955 			}
5956 			spin_unlock(&info->unused_bgs_lock);
5957 		}
5958 
5959 		btrfs_put_block_group(cache);
5960 		total -= num_bytes;
5961 		bytenr += num_bytes;
5962 	}
5963 	return 0;
5964 }
5965 
first_logical_byte(struct btrfs_root * root,u64 search_start)5966 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
5967 {
5968 	struct btrfs_block_group_cache *cache;
5969 	u64 bytenr;
5970 
5971 	spin_lock(&root->fs_info->block_group_cache_lock);
5972 	bytenr = root->fs_info->first_logical_byte;
5973 	spin_unlock(&root->fs_info->block_group_cache_lock);
5974 
5975 	if (bytenr < (u64)-1)
5976 		return bytenr;
5977 
5978 	cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
5979 	if (!cache)
5980 		return 0;
5981 
5982 	bytenr = cache->key.objectid;
5983 	btrfs_put_block_group(cache);
5984 
5985 	return bytenr;
5986 }
5987 
pin_down_extent(struct btrfs_root * root,struct btrfs_block_group_cache * cache,u64 bytenr,u64 num_bytes,int reserved)5988 static int pin_down_extent(struct btrfs_root *root,
5989 			   struct btrfs_block_group_cache *cache,
5990 			   u64 bytenr, u64 num_bytes, int reserved)
5991 {
5992 	spin_lock(&cache->space_info->lock);
5993 	spin_lock(&cache->lock);
5994 	cache->pinned += num_bytes;
5995 	cache->space_info->bytes_pinned += num_bytes;
5996 	if (reserved) {
5997 		cache->reserved -= num_bytes;
5998 		cache->space_info->bytes_reserved -= num_bytes;
5999 	}
6000 	spin_unlock(&cache->lock);
6001 	spin_unlock(&cache->space_info->lock);
6002 
6003 	set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6004 			 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6005 	if (reserved)
6006 		trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
6007 	return 0;
6008 }
6009 
6010 /*
6011  * this function must be called within transaction
6012  */
btrfs_pin_extent(struct btrfs_root * root,u64 bytenr,u64 num_bytes,int reserved)6013 int btrfs_pin_extent(struct btrfs_root *root,
6014 		     u64 bytenr, u64 num_bytes, int reserved)
6015 {
6016 	struct btrfs_block_group_cache *cache;
6017 
6018 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6019 	BUG_ON(!cache); /* Logic error */
6020 
6021 	pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6022 
6023 	btrfs_put_block_group(cache);
6024 	return 0;
6025 }
6026 
6027 /*
6028  * this function must be called within transaction
6029  */
btrfs_pin_extent_for_log_replay(struct btrfs_root * root,u64 bytenr,u64 num_bytes)6030 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6031 				    u64 bytenr, u64 num_bytes)
6032 {
6033 	struct btrfs_block_group_cache *cache;
6034 	int ret;
6035 
6036 	cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6037 	if (!cache)
6038 		return -EINVAL;
6039 
6040 	/*
6041 	 * pull in the free space cache (if any) so that our pin
6042 	 * removes the free space from the cache.  We have load_only set
6043 	 * to one because the slow code to read in the free extents does check
6044 	 * the pinned extents.
6045 	 */
6046 	cache_block_group(cache, 1);
6047 
6048 	pin_down_extent(root, cache, bytenr, num_bytes, 0);
6049 
6050 	/* remove us from the free space cache (if we're there at all) */
6051 	ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6052 	btrfs_put_block_group(cache);
6053 	return ret;
6054 }
6055 
__exclude_logged_extent(struct btrfs_root * root,u64 start,u64 num_bytes)6056 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6057 {
6058 	int ret;
6059 	struct btrfs_block_group_cache *block_group;
6060 	struct btrfs_caching_control *caching_ctl;
6061 
6062 	block_group = btrfs_lookup_block_group(root->fs_info, start);
6063 	if (!block_group)
6064 		return -EINVAL;
6065 
6066 	cache_block_group(block_group, 0);
6067 	caching_ctl = get_caching_control(block_group);
6068 
6069 	if (!caching_ctl) {
6070 		/* Logic error */
6071 		BUG_ON(!block_group_cache_done(block_group));
6072 		ret = btrfs_remove_free_space(block_group, start, num_bytes);
6073 	} else {
6074 		mutex_lock(&caching_ctl->mutex);
6075 
6076 		if (start >= caching_ctl->progress) {
6077 			ret = add_excluded_extent(root, start, num_bytes);
6078 		} else if (start + num_bytes <= caching_ctl->progress) {
6079 			ret = btrfs_remove_free_space(block_group,
6080 						      start, num_bytes);
6081 		} else {
6082 			num_bytes = caching_ctl->progress - start;
6083 			ret = btrfs_remove_free_space(block_group,
6084 						      start, num_bytes);
6085 			if (ret)
6086 				goto out_lock;
6087 
6088 			num_bytes = (start + num_bytes) -
6089 				caching_ctl->progress;
6090 			start = caching_ctl->progress;
6091 			ret = add_excluded_extent(root, start, num_bytes);
6092 		}
6093 out_lock:
6094 		mutex_unlock(&caching_ctl->mutex);
6095 		put_caching_control(caching_ctl);
6096 	}
6097 	btrfs_put_block_group(block_group);
6098 	return ret;
6099 }
6100 
btrfs_exclude_logged_extents(struct btrfs_root * log,struct extent_buffer * eb)6101 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6102 				 struct extent_buffer *eb)
6103 {
6104 	struct btrfs_file_extent_item *item;
6105 	struct btrfs_key key;
6106 	int found_type;
6107 	int i;
6108 
6109 	if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6110 		return 0;
6111 
6112 	for (i = 0; i < btrfs_header_nritems(eb); i++) {
6113 		btrfs_item_key_to_cpu(eb, &key, i);
6114 		if (key.type != BTRFS_EXTENT_DATA_KEY)
6115 			continue;
6116 		item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6117 		found_type = btrfs_file_extent_type(eb, item);
6118 		if (found_type == BTRFS_FILE_EXTENT_INLINE)
6119 			continue;
6120 		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6121 			continue;
6122 		key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6123 		key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6124 		__exclude_logged_extent(log, key.objectid, key.offset);
6125 	}
6126 
6127 	return 0;
6128 }
6129 
6130 /**
6131  * btrfs_update_reserved_bytes - update the block_group and space info counters
6132  * @cache:	The cache we are manipulating
6133  * @num_bytes:	The number of bytes in question
6134  * @reserve:	One of the reservation enums
6135  * @delalloc:   The blocks are allocated for the delalloc write
6136  *
6137  * This is called by the allocator when it reserves space, or by somebody who is
6138  * freeing space that was never actually used on disk.  For example if you
6139  * reserve some space for a new leaf in transaction A and before transaction A
6140  * commits you free that leaf, you call this with reserve set to 0 in order to
6141  * clear the reservation.
6142  *
6143  * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6144  * ENOSPC accounting.  For data we handle the reservation through clearing the
6145  * delalloc bits in the io_tree.  We have to do this since we could end up
6146  * allocating less disk space for the amount of data we have reserved in the
6147  * case of compression.
6148  *
6149  * If this is a reservation and the block group has become read only we cannot
6150  * make the reservation and return -EAGAIN, otherwise this function always
6151  * succeeds.
6152  */
btrfs_update_reserved_bytes(struct btrfs_block_group_cache * cache,u64 num_bytes,int reserve,int delalloc)6153 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6154 				       u64 num_bytes, int reserve, int delalloc)
6155 {
6156 	struct btrfs_space_info *space_info = cache->space_info;
6157 	int ret = 0;
6158 
6159 	spin_lock(&space_info->lock);
6160 	spin_lock(&cache->lock);
6161 	if (reserve != RESERVE_FREE) {
6162 		if (cache->ro) {
6163 			ret = -EAGAIN;
6164 		} else {
6165 			cache->reserved += num_bytes;
6166 			space_info->bytes_reserved += num_bytes;
6167 			if (reserve == RESERVE_ALLOC) {
6168 				trace_btrfs_space_reservation(cache->fs_info,
6169 						"space_info", space_info->flags,
6170 						num_bytes, 0);
6171 				space_info->bytes_may_use -= num_bytes;
6172 			}
6173 
6174 			if (delalloc)
6175 				cache->delalloc_bytes += num_bytes;
6176 		}
6177 	} else {
6178 		if (cache->ro)
6179 			space_info->bytes_readonly += num_bytes;
6180 		cache->reserved -= num_bytes;
6181 		space_info->bytes_reserved -= num_bytes;
6182 
6183 		if (delalloc)
6184 			cache->delalloc_bytes -= num_bytes;
6185 	}
6186 	spin_unlock(&cache->lock);
6187 	spin_unlock(&space_info->lock);
6188 	return ret;
6189 }
6190 
btrfs_prepare_extent_commit(struct btrfs_trans_handle * trans,struct btrfs_root * root)6191 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6192 				struct btrfs_root *root)
6193 {
6194 	struct btrfs_fs_info *fs_info = root->fs_info;
6195 	struct btrfs_caching_control *next;
6196 	struct btrfs_caching_control *caching_ctl;
6197 	struct btrfs_block_group_cache *cache;
6198 
6199 	down_write(&fs_info->commit_root_sem);
6200 
6201 	list_for_each_entry_safe(caching_ctl, next,
6202 				 &fs_info->caching_block_groups, list) {
6203 		cache = caching_ctl->block_group;
6204 		if (block_group_cache_done(cache)) {
6205 			cache->last_byte_to_unpin = (u64)-1;
6206 			list_del_init(&caching_ctl->list);
6207 			put_caching_control(caching_ctl);
6208 		} else {
6209 			cache->last_byte_to_unpin = caching_ctl->progress;
6210 		}
6211 	}
6212 
6213 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6214 		fs_info->pinned_extents = &fs_info->freed_extents[1];
6215 	else
6216 		fs_info->pinned_extents = &fs_info->freed_extents[0];
6217 
6218 	up_write(&fs_info->commit_root_sem);
6219 
6220 	update_global_block_rsv(fs_info);
6221 }
6222 
6223 /*
6224  * Returns the free cluster for the given space info and sets empty_cluster to
6225  * what it should be based on the mount options.
6226  */
6227 static struct btrfs_free_cluster *
fetch_cluster_info(struct btrfs_root * root,struct btrfs_space_info * space_info,u64 * empty_cluster)6228 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6229 		   u64 *empty_cluster)
6230 {
6231 	struct btrfs_free_cluster *ret = NULL;
6232 	bool ssd = btrfs_test_opt(root, SSD);
6233 
6234 	*empty_cluster = 0;
6235 	if (btrfs_mixed_space_info(space_info))
6236 		return ret;
6237 
6238 	if (ssd)
6239 		*empty_cluster = 2 * 1024 * 1024;
6240 	if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6241 		ret = &root->fs_info->meta_alloc_cluster;
6242 		if (!ssd)
6243 			*empty_cluster = 64 * 1024;
6244 	} else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6245 		ret = &root->fs_info->data_alloc_cluster;
6246 	}
6247 
6248 	return ret;
6249 }
6250 
unpin_extent_range(struct btrfs_root * root,u64 start,u64 end,const bool return_free_space)6251 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6252 			      const bool return_free_space)
6253 {
6254 	struct btrfs_fs_info *fs_info = root->fs_info;
6255 	struct btrfs_block_group_cache *cache = NULL;
6256 	struct btrfs_space_info *space_info;
6257 	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6258 	struct btrfs_free_cluster *cluster = NULL;
6259 	u64 len;
6260 	u64 total_unpinned = 0;
6261 	u64 empty_cluster = 0;
6262 	bool readonly;
6263 
6264 	while (start <= end) {
6265 		readonly = false;
6266 		if (!cache ||
6267 		    start >= cache->key.objectid + cache->key.offset) {
6268 			if (cache)
6269 				btrfs_put_block_group(cache);
6270 			total_unpinned = 0;
6271 			cache = btrfs_lookup_block_group(fs_info, start);
6272 			BUG_ON(!cache); /* Logic error */
6273 
6274 			cluster = fetch_cluster_info(root,
6275 						     cache->space_info,
6276 						     &empty_cluster);
6277 			empty_cluster <<= 1;
6278 		}
6279 
6280 		len = cache->key.objectid + cache->key.offset - start;
6281 		len = min(len, end + 1 - start);
6282 
6283 		if (start < cache->last_byte_to_unpin) {
6284 			len = min(len, cache->last_byte_to_unpin - start);
6285 			if (return_free_space)
6286 				btrfs_add_free_space(cache, start, len);
6287 		}
6288 
6289 		start += len;
6290 		total_unpinned += len;
6291 		space_info = cache->space_info;
6292 
6293 		/*
6294 		 * If this space cluster has been marked as fragmented and we've
6295 		 * unpinned enough in this block group to potentially allow a
6296 		 * cluster to be created inside of it go ahead and clear the
6297 		 * fragmented check.
6298 		 */
6299 		if (cluster && cluster->fragmented &&
6300 		    total_unpinned > empty_cluster) {
6301 			spin_lock(&cluster->lock);
6302 			cluster->fragmented = 0;
6303 			spin_unlock(&cluster->lock);
6304 		}
6305 
6306 		spin_lock(&space_info->lock);
6307 		spin_lock(&cache->lock);
6308 		cache->pinned -= len;
6309 		space_info->bytes_pinned -= len;
6310 		space_info->max_extent_size = 0;
6311 		percpu_counter_add(&space_info->total_bytes_pinned, -len);
6312 		if (cache->ro) {
6313 			space_info->bytes_readonly += len;
6314 			readonly = true;
6315 		}
6316 		spin_unlock(&cache->lock);
6317 		if (!readonly && global_rsv->space_info == space_info) {
6318 			spin_lock(&global_rsv->lock);
6319 			if (!global_rsv->full) {
6320 				len = min(len, global_rsv->size -
6321 					  global_rsv->reserved);
6322 				global_rsv->reserved += len;
6323 				space_info->bytes_may_use += len;
6324 				if (global_rsv->reserved >= global_rsv->size)
6325 					global_rsv->full = 1;
6326 			}
6327 			spin_unlock(&global_rsv->lock);
6328 		}
6329 		spin_unlock(&space_info->lock);
6330 	}
6331 
6332 	if (cache)
6333 		btrfs_put_block_group(cache);
6334 	return 0;
6335 }
6336 
btrfs_finish_extent_commit(struct btrfs_trans_handle * trans,struct btrfs_root * root)6337 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6338 			       struct btrfs_root *root)
6339 {
6340 	struct btrfs_fs_info *fs_info = root->fs_info;
6341 	struct btrfs_block_group_cache *block_group, *tmp;
6342 	struct list_head *deleted_bgs;
6343 	struct extent_io_tree *unpin;
6344 	u64 start;
6345 	u64 end;
6346 	int ret;
6347 
6348 	if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6349 		unpin = &fs_info->freed_extents[1];
6350 	else
6351 		unpin = &fs_info->freed_extents[0];
6352 
6353 	while (!trans->aborted) {
6354 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
6355 		ret = find_first_extent_bit(unpin, 0, &start, &end,
6356 					    EXTENT_DIRTY, NULL);
6357 		if (ret) {
6358 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6359 			break;
6360 		}
6361 
6362 		if (btrfs_test_opt(root, DISCARD))
6363 			ret = btrfs_discard_extent(root, start,
6364 						   end + 1 - start, NULL);
6365 
6366 		clear_extent_dirty(unpin, start, end, GFP_NOFS);
6367 		unpin_extent_range(root, start, end, true);
6368 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6369 		cond_resched();
6370 	}
6371 
6372 	/*
6373 	 * Transaction is finished.  We don't need the lock anymore.  We
6374 	 * do need to clean up the block groups in case of a transaction
6375 	 * abort.
6376 	 */
6377 	deleted_bgs = &trans->transaction->deleted_bgs;
6378 	list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6379 		u64 trimmed = 0;
6380 
6381 		ret = -EROFS;
6382 		if (!trans->aborted)
6383 			ret = btrfs_discard_extent(root,
6384 						   block_group->key.objectid,
6385 						   block_group->key.offset,
6386 						   &trimmed);
6387 
6388 		list_del_init(&block_group->bg_list);
6389 		btrfs_put_block_group_trimming(block_group);
6390 		btrfs_put_block_group(block_group);
6391 
6392 		if (ret) {
6393 			const char *errstr = btrfs_decode_error(ret);
6394 			btrfs_warn(fs_info,
6395 				   "Discard failed while removing blockgroup: errno=%d %s\n",
6396 				   ret, errstr);
6397 		}
6398 	}
6399 
6400 	return 0;
6401 }
6402 
add_pinned_bytes(struct btrfs_fs_info * fs_info,u64 num_bytes,u64 owner,u64 root_objectid)6403 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6404 			     u64 owner, u64 root_objectid)
6405 {
6406 	struct btrfs_space_info *space_info;
6407 	u64 flags;
6408 
6409 	if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6410 		if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6411 			flags = BTRFS_BLOCK_GROUP_SYSTEM;
6412 		else
6413 			flags = BTRFS_BLOCK_GROUP_METADATA;
6414 	} else {
6415 		flags = BTRFS_BLOCK_GROUP_DATA;
6416 	}
6417 
6418 	space_info = __find_space_info(fs_info, flags);
6419 	BUG_ON(!space_info); /* Logic bug */
6420 	percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6421 }
6422 
6423 
__btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_delayed_ref_node * node,u64 parent,u64 root_objectid,u64 owner_objectid,u64 owner_offset,int refs_to_drop,struct btrfs_delayed_extent_op * extent_op)6424 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6425 				struct btrfs_root *root,
6426 				struct btrfs_delayed_ref_node *node, u64 parent,
6427 				u64 root_objectid, u64 owner_objectid,
6428 				u64 owner_offset, int refs_to_drop,
6429 				struct btrfs_delayed_extent_op *extent_op)
6430 {
6431 	struct btrfs_key key;
6432 	struct btrfs_path *path;
6433 	struct btrfs_fs_info *info = root->fs_info;
6434 	struct btrfs_root *extent_root = info->extent_root;
6435 	struct extent_buffer *leaf;
6436 	struct btrfs_extent_item *ei;
6437 	struct btrfs_extent_inline_ref *iref;
6438 	int ret;
6439 	int is_data;
6440 	int extent_slot = 0;
6441 	int found_extent = 0;
6442 	int num_to_del = 1;
6443 	u32 item_size;
6444 	u64 refs;
6445 	u64 bytenr = node->bytenr;
6446 	u64 num_bytes = node->num_bytes;
6447 	int last_ref = 0;
6448 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6449 						 SKINNY_METADATA);
6450 
6451 	path = btrfs_alloc_path();
6452 	if (!path)
6453 		return -ENOMEM;
6454 
6455 	path->reada = 1;
6456 	path->leave_spinning = 1;
6457 
6458 	is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6459 	BUG_ON(!is_data && refs_to_drop != 1);
6460 
6461 	if (is_data)
6462 		skinny_metadata = 0;
6463 
6464 	ret = lookup_extent_backref(trans, extent_root, path, &iref,
6465 				    bytenr, num_bytes, parent,
6466 				    root_objectid, owner_objectid,
6467 				    owner_offset);
6468 	if (ret == 0) {
6469 		extent_slot = path->slots[0];
6470 		while (extent_slot >= 0) {
6471 			btrfs_item_key_to_cpu(path->nodes[0], &key,
6472 					      extent_slot);
6473 			if (key.objectid != bytenr)
6474 				break;
6475 			if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6476 			    key.offset == num_bytes) {
6477 				found_extent = 1;
6478 				break;
6479 			}
6480 			if (key.type == BTRFS_METADATA_ITEM_KEY &&
6481 			    key.offset == owner_objectid) {
6482 				found_extent = 1;
6483 				break;
6484 			}
6485 			if (path->slots[0] - extent_slot > 5)
6486 				break;
6487 			extent_slot--;
6488 		}
6489 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6490 		item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6491 		if (found_extent && item_size < sizeof(*ei))
6492 			found_extent = 0;
6493 #endif
6494 		if (!found_extent) {
6495 			BUG_ON(iref);
6496 			ret = remove_extent_backref(trans, extent_root, path,
6497 						    NULL, refs_to_drop,
6498 						    is_data, &last_ref);
6499 			if (ret) {
6500 				btrfs_abort_transaction(trans, extent_root, ret);
6501 				goto out;
6502 			}
6503 			btrfs_release_path(path);
6504 			path->leave_spinning = 1;
6505 
6506 			key.objectid = bytenr;
6507 			key.type = BTRFS_EXTENT_ITEM_KEY;
6508 			key.offset = num_bytes;
6509 
6510 			if (!is_data && skinny_metadata) {
6511 				key.type = BTRFS_METADATA_ITEM_KEY;
6512 				key.offset = owner_objectid;
6513 			}
6514 
6515 			ret = btrfs_search_slot(trans, extent_root,
6516 						&key, path, -1, 1);
6517 			if (ret > 0 && skinny_metadata && path->slots[0]) {
6518 				/*
6519 				 * Couldn't find our skinny metadata item,
6520 				 * see if we have ye olde extent item.
6521 				 */
6522 				path->slots[0]--;
6523 				btrfs_item_key_to_cpu(path->nodes[0], &key,
6524 						      path->slots[0]);
6525 				if (key.objectid == bytenr &&
6526 				    key.type == BTRFS_EXTENT_ITEM_KEY &&
6527 				    key.offset == num_bytes)
6528 					ret = 0;
6529 			}
6530 
6531 			if (ret > 0 && skinny_metadata) {
6532 				skinny_metadata = false;
6533 				key.objectid = bytenr;
6534 				key.type = BTRFS_EXTENT_ITEM_KEY;
6535 				key.offset = num_bytes;
6536 				btrfs_release_path(path);
6537 				ret = btrfs_search_slot(trans, extent_root,
6538 							&key, path, -1, 1);
6539 			}
6540 
6541 			if (ret) {
6542 				btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6543 					ret, bytenr);
6544 				if (ret > 0)
6545 					btrfs_print_leaf(extent_root,
6546 							 path->nodes[0]);
6547 			}
6548 			if (ret < 0) {
6549 				btrfs_abort_transaction(trans, extent_root, ret);
6550 				goto out;
6551 			}
6552 			extent_slot = path->slots[0];
6553 		}
6554 	} else if (WARN_ON(ret == -ENOENT)) {
6555 		btrfs_print_leaf(extent_root, path->nodes[0]);
6556 		btrfs_err(info,
6557 			"unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
6558 			bytenr, parent, root_objectid, owner_objectid,
6559 			owner_offset);
6560 		btrfs_abort_transaction(trans, extent_root, ret);
6561 		goto out;
6562 	} else {
6563 		btrfs_abort_transaction(trans, extent_root, ret);
6564 		goto out;
6565 	}
6566 
6567 	leaf = path->nodes[0];
6568 	item_size = btrfs_item_size_nr(leaf, extent_slot);
6569 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6570 	if (item_size < sizeof(*ei)) {
6571 		BUG_ON(found_extent || extent_slot != path->slots[0]);
6572 		ret = convert_extent_item_v0(trans, extent_root, path,
6573 					     owner_objectid, 0);
6574 		if (ret < 0) {
6575 			btrfs_abort_transaction(trans, extent_root, ret);
6576 			goto out;
6577 		}
6578 
6579 		btrfs_release_path(path);
6580 		path->leave_spinning = 1;
6581 
6582 		key.objectid = bytenr;
6583 		key.type = BTRFS_EXTENT_ITEM_KEY;
6584 		key.offset = num_bytes;
6585 
6586 		ret = btrfs_search_slot(trans, extent_root, &key, path,
6587 					-1, 1);
6588 		if (ret) {
6589 			btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6590 				ret, bytenr);
6591 			btrfs_print_leaf(extent_root, path->nodes[0]);
6592 		}
6593 		if (ret < 0) {
6594 			btrfs_abort_transaction(trans, extent_root, ret);
6595 			goto out;
6596 		}
6597 
6598 		extent_slot = path->slots[0];
6599 		leaf = path->nodes[0];
6600 		item_size = btrfs_item_size_nr(leaf, extent_slot);
6601 	}
6602 #endif
6603 	BUG_ON(item_size < sizeof(*ei));
6604 	ei = btrfs_item_ptr(leaf, extent_slot,
6605 			    struct btrfs_extent_item);
6606 	if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6607 	    key.type == BTRFS_EXTENT_ITEM_KEY) {
6608 		struct btrfs_tree_block_info *bi;
6609 		BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6610 		bi = (struct btrfs_tree_block_info *)(ei + 1);
6611 		WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6612 	}
6613 
6614 	refs = btrfs_extent_refs(leaf, ei);
6615 	if (refs < refs_to_drop) {
6616 		btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6617 			  "for bytenr %Lu", refs_to_drop, refs, bytenr);
6618 		ret = -EINVAL;
6619 		btrfs_abort_transaction(trans, extent_root, ret);
6620 		goto out;
6621 	}
6622 	refs -= refs_to_drop;
6623 
6624 	if (refs > 0) {
6625 		if (extent_op)
6626 			__run_delayed_extent_op(extent_op, leaf, ei);
6627 		/*
6628 		 * In the case of inline back ref, reference count will
6629 		 * be updated by remove_extent_backref
6630 		 */
6631 		if (iref) {
6632 			BUG_ON(!found_extent);
6633 		} else {
6634 			btrfs_set_extent_refs(leaf, ei, refs);
6635 			btrfs_mark_buffer_dirty(leaf);
6636 		}
6637 		if (found_extent) {
6638 			ret = remove_extent_backref(trans, extent_root, path,
6639 						    iref, refs_to_drop,
6640 						    is_data, &last_ref);
6641 			if (ret) {
6642 				btrfs_abort_transaction(trans, extent_root, ret);
6643 				goto out;
6644 			}
6645 		}
6646 		add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6647 				 root_objectid);
6648 	} else {
6649 		if (found_extent) {
6650 			BUG_ON(is_data && refs_to_drop !=
6651 			       extent_data_ref_count(path, iref));
6652 			if (iref) {
6653 				BUG_ON(path->slots[0] != extent_slot);
6654 			} else {
6655 				BUG_ON(path->slots[0] != extent_slot + 1);
6656 				path->slots[0] = extent_slot;
6657 				num_to_del = 2;
6658 			}
6659 		}
6660 
6661 		last_ref = 1;
6662 		ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6663 				      num_to_del);
6664 		if (ret) {
6665 			btrfs_abort_transaction(trans, extent_root, ret);
6666 			goto out;
6667 		}
6668 		btrfs_release_path(path);
6669 
6670 		if (is_data) {
6671 			ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6672 			if (ret) {
6673 				btrfs_abort_transaction(trans, extent_root, ret);
6674 				goto out;
6675 			}
6676 		}
6677 
6678 		ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6679 		if (ret) {
6680 			btrfs_abort_transaction(trans, extent_root, ret);
6681 			goto out;
6682 		}
6683 	}
6684 	btrfs_release_path(path);
6685 
6686 out:
6687 	btrfs_free_path(path);
6688 	return ret;
6689 }
6690 
6691 /*
6692  * when we free an block, it is possible (and likely) that we free the last
6693  * delayed ref for that extent as well.  This searches the delayed ref tree for
6694  * a given extent, and if there are no other delayed refs to be processed, it
6695  * removes it from the tree.
6696  */
check_ref_cleanup(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr)6697 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6698 				      struct btrfs_root *root, u64 bytenr)
6699 {
6700 	struct btrfs_delayed_ref_head *head;
6701 	struct btrfs_delayed_ref_root *delayed_refs;
6702 	int ret = 0;
6703 
6704 	delayed_refs = &trans->transaction->delayed_refs;
6705 	spin_lock(&delayed_refs->lock);
6706 	head = btrfs_find_delayed_ref_head(trans, bytenr);
6707 	if (!head)
6708 		goto out_delayed_unlock;
6709 
6710 	spin_lock(&head->lock);
6711 	if (!list_empty(&head->ref_list))
6712 		goto out;
6713 
6714 	if (head->extent_op) {
6715 		if (!head->must_insert_reserved)
6716 			goto out;
6717 		btrfs_free_delayed_extent_op(head->extent_op);
6718 		head->extent_op = NULL;
6719 	}
6720 
6721 	/*
6722 	 * waiting for the lock here would deadlock.  If someone else has it
6723 	 * locked they are already in the process of dropping it anyway
6724 	 */
6725 	if (!mutex_trylock(&head->mutex))
6726 		goto out;
6727 
6728 	/*
6729 	 * at this point we have a head with no other entries.  Go
6730 	 * ahead and process it.
6731 	 */
6732 	head->node.in_tree = 0;
6733 	rb_erase(&head->href_node, &delayed_refs->href_root);
6734 
6735 	atomic_dec(&delayed_refs->num_entries);
6736 
6737 	/*
6738 	 * we don't take a ref on the node because we're removing it from the
6739 	 * tree, so we just steal the ref the tree was holding.
6740 	 */
6741 	delayed_refs->num_heads--;
6742 	if (head->processing == 0)
6743 		delayed_refs->num_heads_ready--;
6744 	head->processing = 0;
6745 	spin_unlock(&head->lock);
6746 	spin_unlock(&delayed_refs->lock);
6747 
6748 	BUG_ON(head->extent_op);
6749 	if (head->must_insert_reserved)
6750 		ret = 1;
6751 
6752 	mutex_unlock(&head->mutex);
6753 	btrfs_put_delayed_ref(&head->node);
6754 	return ret;
6755 out:
6756 	spin_unlock(&head->lock);
6757 
6758 out_delayed_unlock:
6759 	spin_unlock(&delayed_refs->lock);
6760 	return 0;
6761 }
6762 
btrfs_free_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * buf,u64 parent,int last_ref)6763 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6764 			   struct btrfs_root *root,
6765 			   struct extent_buffer *buf,
6766 			   u64 parent, int last_ref)
6767 {
6768 	int pin = 1;
6769 	int ret;
6770 
6771 	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6772 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6773 					buf->start, buf->len,
6774 					parent, root->root_key.objectid,
6775 					btrfs_header_level(buf),
6776 					BTRFS_DROP_DELAYED_REF, NULL);
6777 		BUG_ON(ret); /* -ENOMEM */
6778 	}
6779 
6780 	if (!last_ref)
6781 		return;
6782 
6783 	if (btrfs_header_generation(buf) == trans->transid) {
6784 		struct btrfs_block_group_cache *cache;
6785 
6786 		if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6787 			ret = check_ref_cleanup(trans, root, buf->start);
6788 			if (!ret)
6789 				goto out;
6790 		}
6791 
6792 		cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6793 
6794 		if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6795 			pin_down_extent(root, cache, buf->start, buf->len, 1);
6796 			btrfs_put_block_group(cache);
6797 			goto out;
6798 		}
6799 
6800 		WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6801 
6802 		btrfs_add_free_space(cache, buf->start, buf->len);
6803 		btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6804 		btrfs_put_block_group(cache);
6805 		trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6806 		pin = 0;
6807 	}
6808 out:
6809 	if (pin)
6810 		add_pinned_bytes(root->fs_info, buf->len,
6811 				 btrfs_header_level(buf),
6812 				 root->root_key.objectid);
6813 
6814 	/*
6815 	 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6816 	 * anymore.
6817 	 */
6818 	clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6819 }
6820 
6821 /* Can return -ENOMEM */
btrfs_free_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 num_bytes,u64 parent,u64 root_objectid,u64 owner,u64 offset)6822 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6823 		      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6824 		      u64 owner, u64 offset)
6825 {
6826 	int ret;
6827 	struct btrfs_fs_info *fs_info = root->fs_info;
6828 
6829 	if (btrfs_test_is_dummy_root(root))
6830 		return 0;
6831 
6832 	add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6833 
6834 	/*
6835 	 * tree log blocks never actually go into the extent allocation
6836 	 * tree, just update pinning info and exit early.
6837 	 */
6838 	if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6839 		WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6840 		/* unlocks the pinned mutex */
6841 		btrfs_pin_extent(root, bytenr, num_bytes, 1);
6842 		ret = 0;
6843 	} else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6844 		ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6845 					num_bytes,
6846 					parent, root_objectid, (int)owner,
6847 					BTRFS_DROP_DELAYED_REF, NULL);
6848 	} else {
6849 		ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6850 						num_bytes,
6851 						parent, root_objectid, owner,
6852 						offset, 0,
6853 						BTRFS_DROP_DELAYED_REF, NULL);
6854 	}
6855 	return ret;
6856 }
6857 
6858 /*
6859  * when we wait for progress in the block group caching, its because
6860  * our allocation attempt failed at least once.  So, we must sleep
6861  * and let some progress happen before we try again.
6862  *
6863  * This function will sleep at least once waiting for new free space to
6864  * show up, and then it will check the block group free space numbers
6865  * for our min num_bytes.  Another option is to have it go ahead
6866  * and look in the rbtree for a free extent of a given size, but this
6867  * is a good start.
6868  *
6869  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6870  * any of the information in this block group.
6871  */
6872 static noinline void
wait_block_group_cache_progress(struct btrfs_block_group_cache * cache,u64 num_bytes)6873 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6874 				u64 num_bytes)
6875 {
6876 	struct btrfs_caching_control *caching_ctl;
6877 
6878 	caching_ctl = get_caching_control(cache);
6879 	if (!caching_ctl)
6880 		return;
6881 
6882 	wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6883 		   (cache->free_space_ctl->free_space >= num_bytes));
6884 
6885 	put_caching_control(caching_ctl);
6886 }
6887 
6888 static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache * cache)6889 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6890 {
6891 	struct btrfs_caching_control *caching_ctl;
6892 	int ret = 0;
6893 
6894 	caching_ctl = get_caching_control(cache);
6895 	if (!caching_ctl)
6896 		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6897 
6898 	wait_event(caching_ctl->wait, block_group_cache_done(cache));
6899 	if (cache->cached == BTRFS_CACHE_ERROR)
6900 		ret = -EIO;
6901 	put_caching_control(caching_ctl);
6902 	return ret;
6903 }
6904 
__get_raid_index(u64 flags)6905 int __get_raid_index(u64 flags)
6906 {
6907 	if (flags & BTRFS_BLOCK_GROUP_RAID10)
6908 		return BTRFS_RAID_RAID10;
6909 	else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6910 		return BTRFS_RAID_RAID1;
6911 	else if (flags & BTRFS_BLOCK_GROUP_DUP)
6912 		return BTRFS_RAID_DUP;
6913 	else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6914 		return BTRFS_RAID_RAID0;
6915 	else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6916 		return BTRFS_RAID_RAID5;
6917 	else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6918 		return BTRFS_RAID_RAID6;
6919 
6920 	return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6921 }
6922 
get_block_group_index(struct btrfs_block_group_cache * cache)6923 int get_block_group_index(struct btrfs_block_group_cache *cache)
6924 {
6925 	return __get_raid_index(cache->flags);
6926 }
6927 
6928 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6929 	[BTRFS_RAID_RAID10]	= "raid10",
6930 	[BTRFS_RAID_RAID1]	= "raid1",
6931 	[BTRFS_RAID_DUP]	= "dup",
6932 	[BTRFS_RAID_RAID0]	= "raid0",
6933 	[BTRFS_RAID_SINGLE]	= "single",
6934 	[BTRFS_RAID_RAID5]	= "raid5",
6935 	[BTRFS_RAID_RAID6]	= "raid6",
6936 };
6937 
get_raid_name(enum btrfs_raid_types type)6938 static const char *get_raid_name(enum btrfs_raid_types type)
6939 {
6940 	if (type >= BTRFS_NR_RAID_TYPES)
6941 		return NULL;
6942 
6943 	return btrfs_raid_type_names[type];
6944 }
6945 
6946 enum btrfs_loop_type {
6947 	LOOP_CACHING_NOWAIT = 0,
6948 	LOOP_CACHING_WAIT = 1,
6949 	LOOP_ALLOC_CHUNK = 2,
6950 	LOOP_NO_EMPTY_SIZE = 3,
6951 };
6952 
6953 static inline void
btrfs_lock_block_group(struct btrfs_block_group_cache * cache,int delalloc)6954 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
6955 		       int delalloc)
6956 {
6957 	if (delalloc)
6958 		down_read(&cache->data_rwsem);
6959 }
6960 
6961 static inline void
btrfs_grab_block_group(struct btrfs_block_group_cache * cache,int delalloc)6962 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
6963 		       int delalloc)
6964 {
6965 	btrfs_get_block_group(cache);
6966 	if (delalloc)
6967 		down_read(&cache->data_rwsem);
6968 }
6969 
6970 static struct btrfs_block_group_cache *
btrfs_lock_cluster(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,int delalloc)6971 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
6972 		   struct btrfs_free_cluster *cluster,
6973 		   int delalloc)
6974 {
6975 	struct btrfs_block_group_cache *used_bg;
6976 	bool locked = false;
6977 again:
6978 	spin_lock(&cluster->refill_lock);
6979 	if (locked) {
6980 		if (used_bg == cluster->block_group)
6981 			return used_bg;
6982 
6983 		up_read(&used_bg->data_rwsem);
6984 		btrfs_put_block_group(used_bg);
6985 	}
6986 
6987 	used_bg = cluster->block_group;
6988 	if (!used_bg)
6989 		return NULL;
6990 
6991 	if (used_bg == block_group)
6992 		return used_bg;
6993 
6994 	btrfs_get_block_group(used_bg);
6995 
6996 	if (!delalloc)
6997 		return used_bg;
6998 
6999 	if (down_read_trylock(&used_bg->data_rwsem))
7000 		return used_bg;
7001 
7002 	spin_unlock(&cluster->refill_lock);
7003 	down_read(&used_bg->data_rwsem);
7004 	locked = true;
7005 	goto again;
7006 }
7007 
7008 static inline void
btrfs_release_block_group(struct btrfs_block_group_cache * cache,int delalloc)7009 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7010 			 int delalloc)
7011 {
7012 	if (delalloc)
7013 		up_read(&cache->data_rwsem);
7014 	btrfs_put_block_group(cache);
7015 }
7016 
7017 /*
7018  * walks the btree of allocated extents and find a hole of a given size.
7019  * The key ins is changed to record the hole:
7020  * ins->objectid == start position
7021  * ins->flags = BTRFS_EXTENT_ITEM_KEY
7022  * ins->offset == the size of the hole.
7023  * Any available blocks before search_start are skipped.
7024  *
7025  * If there is no suitable free space, we will record the max size of
7026  * the free space extent currently.
7027  */
find_free_extent(struct btrfs_root * orig_root,u64 num_bytes,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,u64 flags,int delalloc)7028 static noinline int find_free_extent(struct btrfs_root *orig_root,
7029 				     u64 num_bytes, u64 empty_size,
7030 				     u64 hint_byte, struct btrfs_key *ins,
7031 				     u64 flags, int delalloc)
7032 {
7033 	int ret = 0;
7034 	struct btrfs_root *root = orig_root->fs_info->extent_root;
7035 	struct btrfs_free_cluster *last_ptr = NULL;
7036 	struct btrfs_block_group_cache *block_group = NULL;
7037 	u64 search_start = 0;
7038 	u64 max_extent_size = 0;
7039 	u64 empty_cluster = 0;
7040 	struct btrfs_space_info *space_info;
7041 	int loop = 0;
7042 	int index = __get_raid_index(flags);
7043 	int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7044 		RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7045 	bool failed_cluster_refill = false;
7046 	bool failed_alloc = false;
7047 	bool use_cluster = true;
7048 	bool have_caching_bg = false;
7049 	bool orig_have_caching_bg = false;
7050 	bool full_search = false;
7051 
7052 	WARN_ON(num_bytes < root->sectorsize);
7053 	ins->type = BTRFS_EXTENT_ITEM_KEY;
7054 	ins->objectid = 0;
7055 	ins->offset = 0;
7056 
7057 	trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7058 
7059 	space_info = __find_space_info(root->fs_info, flags);
7060 	if (!space_info) {
7061 		btrfs_err(root->fs_info, "No space info for %llu", flags);
7062 		return -ENOSPC;
7063 	}
7064 
7065 	/*
7066 	 * If our free space is heavily fragmented we may not be able to make
7067 	 * big contiguous allocations, so instead of doing the expensive search
7068 	 * for free space, simply return ENOSPC with our max_extent_size so we
7069 	 * can go ahead and search for a more manageable chunk.
7070 	 *
7071 	 * If our max_extent_size is large enough for our allocation simply
7072 	 * disable clustering since we will likely not be able to find enough
7073 	 * space to create a cluster and induce latency trying.
7074 	 */
7075 	if (unlikely(space_info->max_extent_size)) {
7076 		spin_lock(&space_info->lock);
7077 		if (space_info->max_extent_size &&
7078 		    num_bytes > space_info->max_extent_size) {
7079 			ins->offset = space_info->max_extent_size;
7080 			spin_unlock(&space_info->lock);
7081 			return -ENOSPC;
7082 		} else if (space_info->max_extent_size) {
7083 			use_cluster = false;
7084 		}
7085 		spin_unlock(&space_info->lock);
7086 	}
7087 
7088 	last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7089 	if (last_ptr) {
7090 		spin_lock(&last_ptr->lock);
7091 		if (last_ptr->block_group)
7092 			hint_byte = last_ptr->window_start;
7093 		if (last_ptr->fragmented) {
7094 			/*
7095 			 * We still set window_start so we can keep track of the
7096 			 * last place we found an allocation to try and save
7097 			 * some time.
7098 			 */
7099 			hint_byte = last_ptr->window_start;
7100 			use_cluster = false;
7101 		}
7102 		spin_unlock(&last_ptr->lock);
7103 	}
7104 
7105 	search_start = max(search_start, first_logical_byte(root, 0));
7106 	search_start = max(search_start, hint_byte);
7107 	if (search_start == hint_byte) {
7108 		block_group = btrfs_lookup_block_group(root->fs_info,
7109 						       search_start);
7110 		/*
7111 		 * we don't want to use the block group if it doesn't match our
7112 		 * allocation bits, or if its not cached.
7113 		 *
7114 		 * However if we are re-searching with an ideal block group
7115 		 * picked out then we don't care that the block group is cached.
7116 		 */
7117 		if (block_group && block_group_bits(block_group, flags) &&
7118 		    block_group->cached != BTRFS_CACHE_NO) {
7119 			down_read(&space_info->groups_sem);
7120 			if (list_empty(&block_group->list) ||
7121 			    block_group->ro) {
7122 				/*
7123 				 * someone is removing this block group,
7124 				 * we can't jump into the have_block_group
7125 				 * target because our list pointers are not
7126 				 * valid
7127 				 */
7128 				btrfs_put_block_group(block_group);
7129 				up_read(&space_info->groups_sem);
7130 			} else {
7131 				index = get_block_group_index(block_group);
7132 				btrfs_lock_block_group(block_group, delalloc);
7133 				goto have_block_group;
7134 			}
7135 		} else if (block_group) {
7136 			btrfs_put_block_group(block_group);
7137 		}
7138 	}
7139 search:
7140 	have_caching_bg = false;
7141 	if (index == 0 || index == __get_raid_index(flags))
7142 		full_search = true;
7143 	down_read(&space_info->groups_sem);
7144 	list_for_each_entry(block_group, &space_info->block_groups[index],
7145 			    list) {
7146 		u64 offset;
7147 		int cached;
7148 
7149 		btrfs_grab_block_group(block_group, delalloc);
7150 		search_start = block_group->key.objectid;
7151 
7152 		/*
7153 		 * this can happen if we end up cycling through all the
7154 		 * raid types, but we want to make sure we only allocate
7155 		 * for the proper type.
7156 		 */
7157 		if (!block_group_bits(block_group, flags)) {
7158 		    u64 extra = BTRFS_BLOCK_GROUP_DUP |
7159 				BTRFS_BLOCK_GROUP_RAID1 |
7160 				BTRFS_BLOCK_GROUP_RAID5 |
7161 				BTRFS_BLOCK_GROUP_RAID6 |
7162 				BTRFS_BLOCK_GROUP_RAID10;
7163 
7164 			/*
7165 			 * if they asked for extra copies and this block group
7166 			 * doesn't provide them, bail.  This does allow us to
7167 			 * fill raid0 from raid1.
7168 			 */
7169 			if ((flags & extra) && !(block_group->flags & extra))
7170 				goto loop;
7171 
7172 			/*
7173 			 * This block group has different flags than we want.
7174 			 * It's possible that we have MIXED_GROUP flag but no
7175 			 * block group is mixed.  Just skip such block group.
7176 			 */
7177 			btrfs_release_block_group(block_group, delalloc);
7178 			continue;
7179 		}
7180 
7181 have_block_group:
7182 		cached = block_group_cache_done(block_group);
7183 		if (unlikely(!cached)) {
7184 			have_caching_bg = true;
7185 			ret = cache_block_group(block_group, 0);
7186 			BUG_ON(ret < 0);
7187 			ret = 0;
7188 		}
7189 
7190 		if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7191 			goto loop;
7192 		if (unlikely(block_group->ro))
7193 			goto loop;
7194 
7195 		/*
7196 		 * Ok we want to try and use the cluster allocator, so
7197 		 * lets look there
7198 		 */
7199 		if (last_ptr && use_cluster) {
7200 			struct btrfs_block_group_cache *used_block_group;
7201 			unsigned long aligned_cluster;
7202 			/*
7203 			 * the refill lock keeps out other
7204 			 * people trying to start a new cluster
7205 			 */
7206 			used_block_group = btrfs_lock_cluster(block_group,
7207 							      last_ptr,
7208 							      delalloc);
7209 			if (!used_block_group)
7210 				goto refill_cluster;
7211 
7212 			if (used_block_group != block_group &&
7213 			    (used_block_group->ro ||
7214 			     !block_group_bits(used_block_group, flags)))
7215 				goto release_cluster;
7216 
7217 			offset = btrfs_alloc_from_cluster(used_block_group,
7218 						last_ptr,
7219 						num_bytes,
7220 						used_block_group->key.objectid,
7221 						&max_extent_size);
7222 			if (offset) {
7223 				/* we have a block, we're done */
7224 				spin_unlock(&last_ptr->refill_lock);
7225 				trace_btrfs_reserve_extent_cluster(root,
7226 						used_block_group,
7227 						search_start, num_bytes);
7228 				if (used_block_group != block_group) {
7229 					btrfs_release_block_group(block_group,
7230 								  delalloc);
7231 					block_group = used_block_group;
7232 				}
7233 				goto checks;
7234 			}
7235 
7236 			WARN_ON(last_ptr->block_group != used_block_group);
7237 release_cluster:
7238 			/* If we are on LOOP_NO_EMPTY_SIZE, we can't
7239 			 * set up a new clusters, so lets just skip it
7240 			 * and let the allocator find whatever block
7241 			 * it can find.  If we reach this point, we
7242 			 * will have tried the cluster allocator
7243 			 * plenty of times and not have found
7244 			 * anything, so we are likely way too
7245 			 * fragmented for the clustering stuff to find
7246 			 * anything.
7247 			 *
7248 			 * However, if the cluster is taken from the
7249 			 * current block group, release the cluster
7250 			 * first, so that we stand a better chance of
7251 			 * succeeding in the unclustered
7252 			 * allocation.  */
7253 			if (loop >= LOOP_NO_EMPTY_SIZE &&
7254 			    used_block_group != block_group) {
7255 				spin_unlock(&last_ptr->refill_lock);
7256 				btrfs_release_block_group(used_block_group,
7257 							  delalloc);
7258 				goto unclustered_alloc;
7259 			}
7260 
7261 			/*
7262 			 * this cluster didn't work out, free it and
7263 			 * start over
7264 			 */
7265 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
7266 
7267 			if (used_block_group != block_group)
7268 				btrfs_release_block_group(used_block_group,
7269 							  delalloc);
7270 refill_cluster:
7271 			if (loop >= LOOP_NO_EMPTY_SIZE) {
7272 				spin_unlock(&last_ptr->refill_lock);
7273 				goto unclustered_alloc;
7274 			}
7275 
7276 			aligned_cluster = max_t(unsigned long,
7277 						empty_cluster + empty_size,
7278 					      block_group->full_stripe_len);
7279 
7280 			/* allocate a cluster in this block group */
7281 			ret = btrfs_find_space_cluster(root, block_group,
7282 						       last_ptr, search_start,
7283 						       num_bytes,
7284 						       aligned_cluster);
7285 			if (ret == 0) {
7286 				/*
7287 				 * now pull our allocation out of this
7288 				 * cluster
7289 				 */
7290 				offset = btrfs_alloc_from_cluster(block_group,
7291 							last_ptr,
7292 							num_bytes,
7293 							search_start,
7294 							&max_extent_size);
7295 				if (offset) {
7296 					/* we found one, proceed */
7297 					spin_unlock(&last_ptr->refill_lock);
7298 					trace_btrfs_reserve_extent_cluster(root,
7299 						block_group, search_start,
7300 						num_bytes);
7301 					goto checks;
7302 				}
7303 			} else if (!cached && loop > LOOP_CACHING_NOWAIT
7304 				   && !failed_cluster_refill) {
7305 				spin_unlock(&last_ptr->refill_lock);
7306 
7307 				failed_cluster_refill = true;
7308 				wait_block_group_cache_progress(block_group,
7309 				       num_bytes + empty_cluster + empty_size);
7310 				goto have_block_group;
7311 			}
7312 
7313 			/*
7314 			 * at this point we either didn't find a cluster
7315 			 * or we weren't able to allocate a block from our
7316 			 * cluster.  Free the cluster we've been trying
7317 			 * to use, and go to the next block group
7318 			 */
7319 			btrfs_return_cluster_to_free_space(NULL, last_ptr);
7320 			spin_unlock(&last_ptr->refill_lock);
7321 			goto loop;
7322 		}
7323 
7324 unclustered_alloc:
7325 		/*
7326 		 * We are doing an unclustered alloc, set the fragmented flag so
7327 		 * we don't bother trying to setup a cluster again until we get
7328 		 * more space.
7329 		 */
7330 		if (unlikely(last_ptr)) {
7331 			spin_lock(&last_ptr->lock);
7332 			last_ptr->fragmented = 1;
7333 			spin_unlock(&last_ptr->lock);
7334 		}
7335 		spin_lock(&block_group->free_space_ctl->tree_lock);
7336 		if (cached &&
7337 		    block_group->free_space_ctl->free_space <
7338 		    num_bytes + empty_cluster + empty_size) {
7339 			if (block_group->free_space_ctl->free_space >
7340 			    max_extent_size)
7341 				max_extent_size =
7342 					block_group->free_space_ctl->free_space;
7343 			spin_unlock(&block_group->free_space_ctl->tree_lock);
7344 			goto loop;
7345 		}
7346 		spin_unlock(&block_group->free_space_ctl->tree_lock);
7347 
7348 		offset = btrfs_find_space_for_alloc(block_group, search_start,
7349 						    num_bytes, empty_size,
7350 						    &max_extent_size);
7351 		/*
7352 		 * If we didn't find a chunk, and we haven't failed on this
7353 		 * block group before, and this block group is in the middle of
7354 		 * caching and we are ok with waiting, then go ahead and wait
7355 		 * for progress to be made, and set failed_alloc to true.
7356 		 *
7357 		 * If failed_alloc is true then we've already waited on this
7358 		 * block group once and should move on to the next block group.
7359 		 */
7360 		if (!offset && !failed_alloc && !cached &&
7361 		    loop > LOOP_CACHING_NOWAIT) {
7362 			wait_block_group_cache_progress(block_group,
7363 						num_bytes + empty_size);
7364 			failed_alloc = true;
7365 			goto have_block_group;
7366 		} else if (!offset) {
7367 			goto loop;
7368 		}
7369 checks:
7370 		search_start = ALIGN(offset, root->stripesize);
7371 
7372 		/* move on to the next group */
7373 		if (search_start + num_bytes >
7374 		    block_group->key.objectid + block_group->key.offset) {
7375 			btrfs_add_free_space(block_group, offset, num_bytes);
7376 			goto loop;
7377 		}
7378 
7379 		if (offset < search_start)
7380 			btrfs_add_free_space(block_group, offset,
7381 					     search_start - offset);
7382 		BUG_ON(offset > search_start);
7383 
7384 		ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7385 						  alloc_type, delalloc);
7386 		if (ret == -EAGAIN) {
7387 			btrfs_add_free_space(block_group, offset, num_bytes);
7388 			goto loop;
7389 		}
7390 
7391 		/* we are all good, lets return */
7392 		ins->objectid = search_start;
7393 		ins->offset = num_bytes;
7394 
7395 		trace_btrfs_reserve_extent(orig_root, block_group,
7396 					   search_start, num_bytes);
7397 		btrfs_release_block_group(block_group, delalloc);
7398 		break;
7399 loop:
7400 		failed_cluster_refill = false;
7401 		failed_alloc = false;
7402 		BUG_ON(index != get_block_group_index(block_group));
7403 		btrfs_release_block_group(block_group, delalloc);
7404 	}
7405 	up_read(&space_info->groups_sem);
7406 
7407 	if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7408 		&& !orig_have_caching_bg)
7409 		orig_have_caching_bg = true;
7410 
7411 	if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7412 		goto search;
7413 
7414 	if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7415 		goto search;
7416 
7417 	/*
7418 	 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7419 	 *			caching kthreads as we move along
7420 	 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7421 	 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7422 	 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7423 	 *			again
7424 	 */
7425 	if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7426 		index = 0;
7427 		if (loop == LOOP_CACHING_NOWAIT) {
7428 			/*
7429 			 * We want to skip the LOOP_CACHING_WAIT step if we
7430 			 * don't have any unached bgs and we've alrelady done a
7431 			 * full search through.
7432 			 */
7433 			if (orig_have_caching_bg || !full_search)
7434 				loop = LOOP_CACHING_WAIT;
7435 			else
7436 				loop = LOOP_ALLOC_CHUNK;
7437 		} else {
7438 			loop++;
7439 		}
7440 
7441 		if (loop == LOOP_ALLOC_CHUNK) {
7442 			struct btrfs_trans_handle *trans;
7443 			int exist = 0;
7444 
7445 			trans = current->journal_info;
7446 			if (trans)
7447 				exist = 1;
7448 			else
7449 				trans = btrfs_join_transaction(root);
7450 
7451 			if (IS_ERR(trans)) {
7452 				ret = PTR_ERR(trans);
7453 				goto out;
7454 			}
7455 
7456 			ret = do_chunk_alloc(trans, root, flags,
7457 					     CHUNK_ALLOC_FORCE);
7458 
7459 			/*
7460 			 * If we can't allocate a new chunk we've already looped
7461 			 * through at least once, move on to the NO_EMPTY_SIZE
7462 			 * case.
7463 			 */
7464 			if (ret == -ENOSPC)
7465 				loop = LOOP_NO_EMPTY_SIZE;
7466 
7467 			/*
7468 			 * Do not bail out on ENOSPC since we
7469 			 * can do more things.
7470 			 */
7471 			if (ret < 0 && ret != -ENOSPC)
7472 				btrfs_abort_transaction(trans,
7473 							root, ret);
7474 			else
7475 				ret = 0;
7476 			if (!exist)
7477 				btrfs_end_transaction(trans, root);
7478 			if (ret)
7479 				goto out;
7480 		}
7481 
7482 		if (loop == LOOP_NO_EMPTY_SIZE) {
7483 			/*
7484 			 * Don't loop again if we already have no empty_size and
7485 			 * no empty_cluster.
7486 			 */
7487 			if (empty_size == 0 &&
7488 			    empty_cluster == 0) {
7489 				ret = -ENOSPC;
7490 				goto out;
7491 			}
7492 			empty_size = 0;
7493 			empty_cluster = 0;
7494 		}
7495 
7496 		goto search;
7497 	} else if (!ins->objectid) {
7498 		ret = -ENOSPC;
7499 	} else if (ins->objectid) {
7500 		if (!use_cluster && last_ptr) {
7501 			spin_lock(&last_ptr->lock);
7502 			last_ptr->window_start = ins->objectid;
7503 			spin_unlock(&last_ptr->lock);
7504 		}
7505 		ret = 0;
7506 	}
7507 out:
7508 	if (ret == -ENOSPC) {
7509 		spin_lock(&space_info->lock);
7510 		space_info->max_extent_size = max_extent_size;
7511 		spin_unlock(&space_info->lock);
7512 		ins->offset = max_extent_size;
7513 	}
7514 	return ret;
7515 }
7516 
dump_space_info(struct btrfs_space_info * info,u64 bytes,int dump_block_groups)7517 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7518 			    int dump_block_groups)
7519 {
7520 	struct btrfs_block_group_cache *cache;
7521 	int index = 0;
7522 
7523 	spin_lock(&info->lock);
7524 	printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7525 	       info->flags,
7526 	       info->total_bytes - info->bytes_used - info->bytes_pinned -
7527 	       info->bytes_reserved - info->bytes_readonly,
7528 	       (info->full) ? "" : "not ");
7529 	printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7530 	       "reserved=%llu, may_use=%llu, readonly=%llu\n",
7531 	       info->total_bytes, info->bytes_used, info->bytes_pinned,
7532 	       info->bytes_reserved, info->bytes_may_use,
7533 	       info->bytes_readonly);
7534 	spin_unlock(&info->lock);
7535 
7536 	if (!dump_block_groups)
7537 		return;
7538 
7539 	down_read(&info->groups_sem);
7540 again:
7541 	list_for_each_entry(cache, &info->block_groups[index], list) {
7542 		spin_lock(&cache->lock);
7543 		printk(KERN_INFO "BTRFS: "
7544 			   "block group %llu has %llu bytes, "
7545 			   "%llu used %llu pinned %llu reserved %s\n",
7546 		       cache->key.objectid, cache->key.offset,
7547 		       btrfs_block_group_used(&cache->item), cache->pinned,
7548 		       cache->reserved, cache->ro ? "[readonly]" : "");
7549 		btrfs_dump_free_space(cache, bytes);
7550 		spin_unlock(&cache->lock);
7551 	}
7552 	if (++index < BTRFS_NR_RAID_TYPES)
7553 		goto again;
7554 	up_read(&info->groups_sem);
7555 }
7556 
btrfs_reserve_extent(struct btrfs_root * root,u64 num_bytes,u64 min_alloc_size,u64 empty_size,u64 hint_byte,struct btrfs_key * ins,int is_data,int delalloc)7557 int btrfs_reserve_extent(struct btrfs_root *root,
7558 			 u64 num_bytes, u64 min_alloc_size,
7559 			 u64 empty_size, u64 hint_byte,
7560 			 struct btrfs_key *ins, int is_data, int delalloc)
7561 {
7562 	bool final_tried = num_bytes == min_alloc_size;
7563 	u64 flags;
7564 	int ret;
7565 
7566 	flags = btrfs_get_alloc_profile(root, is_data);
7567 again:
7568 	WARN_ON(num_bytes < root->sectorsize);
7569 	ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7570 			       flags, delalloc);
7571 
7572 	if (ret == -ENOSPC) {
7573 		if (!final_tried && ins->offset) {
7574 			num_bytes = min(num_bytes >> 1, ins->offset);
7575 			num_bytes = round_down(num_bytes, root->sectorsize);
7576 			num_bytes = max(num_bytes, min_alloc_size);
7577 			if (num_bytes == min_alloc_size)
7578 				final_tried = true;
7579 			goto again;
7580 		} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7581 			struct btrfs_space_info *sinfo;
7582 
7583 			sinfo = __find_space_info(root->fs_info, flags);
7584 			btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7585 				flags, num_bytes);
7586 			if (sinfo)
7587 				dump_space_info(sinfo, num_bytes, 1);
7588 		}
7589 	}
7590 
7591 	return ret;
7592 }
7593 
__btrfs_free_reserved_extent(struct btrfs_root * root,u64 start,u64 len,int pin,int delalloc)7594 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7595 					u64 start, u64 len,
7596 					int pin, int delalloc)
7597 {
7598 	struct btrfs_block_group_cache *cache;
7599 	int ret = 0;
7600 
7601 	cache = btrfs_lookup_block_group(root->fs_info, start);
7602 	if (!cache) {
7603 		btrfs_err(root->fs_info, "Unable to find block group for %llu",
7604 			start);
7605 		return -ENOSPC;
7606 	}
7607 
7608 	if (pin)
7609 		pin_down_extent(root, cache, start, len, 1);
7610 	else {
7611 		if (btrfs_test_opt(root, DISCARD))
7612 			ret = btrfs_discard_extent(root, start, len, NULL);
7613 		btrfs_add_free_space(cache, start, len);
7614 		btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7615 	}
7616 
7617 	btrfs_put_block_group(cache);
7618 
7619 	trace_btrfs_reserved_extent_free(root, start, len);
7620 
7621 	return ret;
7622 }
7623 
btrfs_free_reserved_extent(struct btrfs_root * root,u64 start,u64 len,int delalloc)7624 int btrfs_free_reserved_extent(struct btrfs_root *root,
7625 			       u64 start, u64 len, int delalloc)
7626 {
7627 	return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7628 }
7629 
btrfs_free_and_pin_reserved_extent(struct btrfs_root * root,u64 start,u64 len)7630 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7631 				       u64 start, u64 len)
7632 {
7633 	return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7634 }
7635 
alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,u64 flags,u64 owner,u64 offset,struct btrfs_key * ins,int ref_mod)7636 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7637 				      struct btrfs_root *root,
7638 				      u64 parent, u64 root_objectid,
7639 				      u64 flags, u64 owner, u64 offset,
7640 				      struct btrfs_key *ins, int ref_mod)
7641 {
7642 	int ret;
7643 	struct btrfs_fs_info *fs_info = root->fs_info;
7644 	struct btrfs_extent_item *extent_item;
7645 	struct btrfs_extent_inline_ref *iref;
7646 	struct btrfs_path *path;
7647 	struct extent_buffer *leaf;
7648 	int type;
7649 	u32 size;
7650 
7651 	if (parent > 0)
7652 		type = BTRFS_SHARED_DATA_REF_KEY;
7653 	else
7654 		type = BTRFS_EXTENT_DATA_REF_KEY;
7655 
7656 	size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7657 
7658 	path = btrfs_alloc_path();
7659 	if (!path)
7660 		return -ENOMEM;
7661 
7662 	path->leave_spinning = 1;
7663 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7664 				      ins, size);
7665 	if (ret) {
7666 		btrfs_free_path(path);
7667 		return ret;
7668 	}
7669 
7670 	leaf = path->nodes[0];
7671 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7672 				     struct btrfs_extent_item);
7673 	btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7674 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7675 	btrfs_set_extent_flags(leaf, extent_item,
7676 			       flags | BTRFS_EXTENT_FLAG_DATA);
7677 
7678 	iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7679 	btrfs_set_extent_inline_ref_type(leaf, iref, type);
7680 	if (parent > 0) {
7681 		struct btrfs_shared_data_ref *ref;
7682 		ref = (struct btrfs_shared_data_ref *)(iref + 1);
7683 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7684 		btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7685 	} else {
7686 		struct btrfs_extent_data_ref *ref;
7687 		ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7688 		btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7689 		btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7690 		btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7691 		btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7692 	}
7693 
7694 	btrfs_mark_buffer_dirty(path->nodes[0]);
7695 	btrfs_free_path(path);
7696 
7697 	ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7698 	if (ret) { /* -ENOENT, logic error */
7699 		btrfs_err(fs_info, "update block group failed for %llu %llu",
7700 			ins->objectid, ins->offset);
7701 		BUG();
7702 	}
7703 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7704 	return ret;
7705 }
7706 
alloc_reserved_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,u64 flags,struct btrfs_disk_key * key,int level,struct btrfs_key * ins)7707 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7708 				     struct btrfs_root *root,
7709 				     u64 parent, u64 root_objectid,
7710 				     u64 flags, struct btrfs_disk_key *key,
7711 				     int level, struct btrfs_key *ins)
7712 {
7713 	int ret;
7714 	struct btrfs_fs_info *fs_info = root->fs_info;
7715 	struct btrfs_extent_item *extent_item;
7716 	struct btrfs_tree_block_info *block_info;
7717 	struct btrfs_extent_inline_ref *iref;
7718 	struct btrfs_path *path;
7719 	struct extent_buffer *leaf;
7720 	u32 size = sizeof(*extent_item) + sizeof(*iref);
7721 	u64 num_bytes = ins->offset;
7722 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7723 						 SKINNY_METADATA);
7724 
7725 	if (!skinny_metadata)
7726 		size += sizeof(*block_info);
7727 
7728 	path = btrfs_alloc_path();
7729 	if (!path) {
7730 		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7731 						   root->nodesize);
7732 		return -ENOMEM;
7733 	}
7734 
7735 	path->leave_spinning = 1;
7736 	ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7737 				      ins, size);
7738 	if (ret) {
7739 		btrfs_free_path(path);
7740 		btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7741 						   root->nodesize);
7742 		return ret;
7743 	}
7744 
7745 	leaf = path->nodes[0];
7746 	extent_item = btrfs_item_ptr(leaf, path->slots[0],
7747 				     struct btrfs_extent_item);
7748 	btrfs_set_extent_refs(leaf, extent_item, 1);
7749 	btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7750 	btrfs_set_extent_flags(leaf, extent_item,
7751 			       flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7752 
7753 	if (skinny_metadata) {
7754 		iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7755 		num_bytes = root->nodesize;
7756 	} else {
7757 		block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7758 		btrfs_set_tree_block_key(leaf, block_info, key);
7759 		btrfs_set_tree_block_level(leaf, block_info, level);
7760 		iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7761 	}
7762 
7763 	if (parent > 0) {
7764 		BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7765 		btrfs_set_extent_inline_ref_type(leaf, iref,
7766 						 BTRFS_SHARED_BLOCK_REF_KEY);
7767 		btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7768 	} else {
7769 		btrfs_set_extent_inline_ref_type(leaf, iref,
7770 						 BTRFS_TREE_BLOCK_REF_KEY);
7771 		btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7772 	}
7773 
7774 	btrfs_mark_buffer_dirty(leaf);
7775 	btrfs_free_path(path);
7776 
7777 	ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7778 				 1);
7779 	if (ret) { /* -ENOENT, logic error */
7780 		btrfs_err(fs_info, "update block group failed for %llu %llu",
7781 			ins->objectid, ins->offset);
7782 		BUG();
7783 	}
7784 
7785 	trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7786 	return ret;
7787 }
7788 
btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 root_objectid,u64 owner,u64 offset,u64 ram_bytes,struct btrfs_key * ins)7789 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7790 				     struct btrfs_root *root,
7791 				     u64 root_objectid, u64 owner,
7792 				     u64 offset, u64 ram_bytes,
7793 				     struct btrfs_key *ins)
7794 {
7795 	int ret;
7796 
7797 	BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7798 
7799 	ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7800 					 ins->offset, 0,
7801 					 root_objectid, owner, offset,
7802 					 ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7803 					 NULL);
7804 	return ret;
7805 }
7806 
7807 /*
7808  * this is used by the tree logging recovery code.  It records that
7809  * an extent has been allocated and makes sure to clear the free
7810  * space cache bits as well
7811  */
btrfs_alloc_logged_file_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 root_objectid,u64 owner,u64 offset,struct btrfs_key * ins)7812 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7813 				   struct btrfs_root *root,
7814 				   u64 root_objectid, u64 owner, u64 offset,
7815 				   struct btrfs_key *ins)
7816 {
7817 	int ret;
7818 	struct btrfs_block_group_cache *block_group;
7819 
7820 	/*
7821 	 * Mixed block groups will exclude before processing the log so we only
7822 	 * need to do the exlude dance if this fs isn't mixed.
7823 	 */
7824 	if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7825 		ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7826 		if (ret)
7827 			return ret;
7828 	}
7829 
7830 	block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7831 	if (!block_group)
7832 		return -EINVAL;
7833 
7834 	ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7835 					  RESERVE_ALLOC_NO_ACCOUNT, 0);
7836 	BUG_ON(ret); /* logic error */
7837 	ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7838 					 0, owner, offset, ins, 1);
7839 	btrfs_put_block_group(block_group);
7840 	return ret;
7841 }
7842 
7843 static struct extent_buffer *
btrfs_init_new_buffer(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,int level)7844 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7845 		      u64 bytenr, int level)
7846 {
7847 	struct extent_buffer *buf;
7848 
7849 	buf = btrfs_find_create_tree_block(root, bytenr);
7850 	if (!buf)
7851 		return ERR_PTR(-ENOMEM);
7852 
7853 	/*
7854 	 * Extra safety check in case the extent tree is corrupted and extent
7855 	 * allocator chooses to use a tree block which is already used and
7856 	 * locked.
7857 	 */
7858 	if (buf->lock_owner == current->pid) {
7859 		btrfs_err_rl(root->fs_info,
7860 "tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
7861 			buf->start, btrfs_header_owner(buf), current->pid);
7862 		free_extent_buffer(buf);
7863 		return ERR_PTR(-EUCLEAN);
7864 	}
7865 
7866 	btrfs_set_header_generation(buf, trans->transid);
7867 	btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7868 	btrfs_tree_lock(buf);
7869 	clean_tree_block(trans, root->fs_info, buf);
7870 	clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7871 
7872 	btrfs_set_lock_blocking(buf);
7873 	btrfs_set_buffer_uptodate(buf);
7874 
7875 	if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7876 		buf->log_index = root->log_transid % 2;
7877 		/*
7878 		 * we allow two log transactions at a time, use different
7879 		 * EXENT bit to differentiate dirty pages.
7880 		 */
7881 		if (buf->log_index == 0)
7882 			set_extent_dirty(&root->dirty_log_pages, buf->start,
7883 					buf->start + buf->len - 1, GFP_NOFS);
7884 		else
7885 			set_extent_new(&root->dirty_log_pages, buf->start,
7886 					buf->start + buf->len - 1, GFP_NOFS);
7887 	} else {
7888 		buf->log_index = -1;
7889 		set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7890 			 buf->start + buf->len - 1, GFP_NOFS);
7891 	}
7892 	trans->dirty = true;
7893 	/* this returns a buffer locked for blocking */
7894 	return buf;
7895 }
7896 
7897 static struct btrfs_block_rsv *
use_block_rsv(struct btrfs_trans_handle * trans,struct btrfs_root * root,u32 blocksize)7898 use_block_rsv(struct btrfs_trans_handle *trans,
7899 	      struct btrfs_root *root, u32 blocksize)
7900 {
7901 	struct btrfs_block_rsv *block_rsv;
7902 	struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7903 	int ret;
7904 	bool global_updated = false;
7905 
7906 	block_rsv = get_block_rsv(trans, root);
7907 
7908 	if (unlikely(block_rsv->size == 0))
7909 		goto try_reserve;
7910 again:
7911 	ret = block_rsv_use_bytes(block_rsv, blocksize);
7912 	if (!ret)
7913 		return block_rsv;
7914 
7915 	if (block_rsv->failfast)
7916 		return ERR_PTR(ret);
7917 
7918 	if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7919 		global_updated = true;
7920 		update_global_block_rsv(root->fs_info);
7921 		goto again;
7922 	}
7923 
7924 	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7925 		static DEFINE_RATELIMIT_STATE(_rs,
7926 				DEFAULT_RATELIMIT_INTERVAL * 10,
7927 				/*DEFAULT_RATELIMIT_BURST*/ 1);
7928 		if (__ratelimit(&_rs))
7929 			WARN(1, KERN_DEBUG
7930 				"BTRFS: block rsv returned %d\n", ret);
7931 	}
7932 try_reserve:
7933 	ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7934 				     BTRFS_RESERVE_NO_FLUSH);
7935 	if (!ret)
7936 		return block_rsv;
7937 	/*
7938 	 * If we couldn't reserve metadata bytes try and use some from
7939 	 * the global reserve if its space type is the same as the global
7940 	 * reservation.
7941 	 */
7942 	if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7943 	    block_rsv->space_info == global_rsv->space_info) {
7944 		ret = block_rsv_use_bytes(global_rsv, blocksize);
7945 		if (!ret)
7946 			return global_rsv;
7947 	}
7948 	return ERR_PTR(ret);
7949 }
7950 
unuse_block_rsv(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * block_rsv,u32 blocksize)7951 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7952 			    struct btrfs_block_rsv *block_rsv, u32 blocksize)
7953 {
7954 	block_rsv_add_bytes(block_rsv, blocksize, 0);
7955 	block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7956 }
7957 
7958 /*
7959  * finds a free extent and does all the dirty work required for allocation
7960  * returns the tree buffer or an ERR_PTR on error.
7961  */
btrfs_alloc_tree_block(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 parent,u64 root_objectid,struct btrfs_disk_key * key,int level,u64 hint,u64 empty_size)7962 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7963 					struct btrfs_root *root,
7964 					u64 parent, u64 root_objectid,
7965 					struct btrfs_disk_key *key, int level,
7966 					u64 hint, u64 empty_size)
7967 {
7968 	struct btrfs_key ins;
7969 	struct btrfs_block_rsv *block_rsv;
7970 	struct extent_buffer *buf;
7971 	struct btrfs_delayed_extent_op *extent_op;
7972 	u64 flags = 0;
7973 	int ret;
7974 	u32 blocksize = root->nodesize;
7975 	bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7976 						 SKINNY_METADATA);
7977 
7978 	if (btrfs_test_is_dummy_root(root)) {
7979 		buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
7980 					    level);
7981 		if (!IS_ERR(buf))
7982 			root->alloc_bytenr += blocksize;
7983 		return buf;
7984 	}
7985 
7986 	block_rsv = use_block_rsv(trans, root, blocksize);
7987 	if (IS_ERR(block_rsv))
7988 		return ERR_CAST(block_rsv);
7989 
7990 	ret = btrfs_reserve_extent(root, blocksize, blocksize,
7991 				   empty_size, hint, &ins, 0, 0);
7992 	if (ret)
7993 		goto out_unuse;
7994 
7995 	buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
7996 	if (IS_ERR(buf)) {
7997 		ret = PTR_ERR(buf);
7998 		goto out_free_reserved;
7999 	}
8000 
8001 	if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8002 		if (parent == 0)
8003 			parent = ins.objectid;
8004 		flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8005 	} else
8006 		BUG_ON(parent > 0);
8007 
8008 	if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8009 		extent_op = btrfs_alloc_delayed_extent_op();
8010 		if (!extent_op) {
8011 			ret = -ENOMEM;
8012 			goto out_free_buf;
8013 		}
8014 		if (key)
8015 			memcpy(&extent_op->key, key, sizeof(extent_op->key));
8016 		else
8017 			memset(&extent_op->key, 0, sizeof(extent_op->key));
8018 		extent_op->flags_to_set = flags;
8019 		if (skinny_metadata)
8020 			extent_op->update_key = 0;
8021 		else
8022 			extent_op->update_key = 1;
8023 		extent_op->update_flags = 1;
8024 		extent_op->is_data = 0;
8025 		extent_op->level = level;
8026 
8027 		ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8028 						 ins.objectid, ins.offset,
8029 						 parent, root_objectid, level,
8030 						 BTRFS_ADD_DELAYED_EXTENT,
8031 						 extent_op);
8032 		if (ret)
8033 			goto out_free_delayed;
8034 	}
8035 	return buf;
8036 
8037 out_free_delayed:
8038 	btrfs_free_delayed_extent_op(extent_op);
8039 out_free_buf:
8040 	free_extent_buffer(buf);
8041 out_free_reserved:
8042 	btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8043 out_unuse:
8044 	unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8045 	return ERR_PTR(ret);
8046 }
8047 
8048 struct walk_control {
8049 	u64 refs[BTRFS_MAX_LEVEL];
8050 	u64 flags[BTRFS_MAX_LEVEL];
8051 	struct btrfs_key update_progress;
8052 	int stage;
8053 	int level;
8054 	int shared_level;
8055 	int update_ref;
8056 	int keep_locks;
8057 	int reada_slot;
8058 	int reada_count;
8059 	int for_reloc;
8060 };
8061 
8062 #define DROP_REFERENCE	1
8063 #define UPDATE_BACKREF	2
8064 
reada_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct walk_control * wc,struct btrfs_path * path)8065 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8066 				     struct btrfs_root *root,
8067 				     struct walk_control *wc,
8068 				     struct btrfs_path *path)
8069 {
8070 	u64 bytenr;
8071 	u64 generation;
8072 	u64 refs;
8073 	u64 flags;
8074 	u32 nritems;
8075 	u32 blocksize;
8076 	struct btrfs_key key;
8077 	struct extent_buffer *eb;
8078 	int ret;
8079 	int slot;
8080 	int nread = 0;
8081 
8082 	if (path->slots[wc->level] < wc->reada_slot) {
8083 		wc->reada_count = wc->reada_count * 2 / 3;
8084 		wc->reada_count = max(wc->reada_count, 2);
8085 	} else {
8086 		wc->reada_count = wc->reada_count * 3 / 2;
8087 		wc->reada_count = min_t(int, wc->reada_count,
8088 					BTRFS_NODEPTRS_PER_BLOCK(root));
8089 	}
8090 
8091 	eb = path->nodes[wc->level];
8092 	nritems = btrfs_header_nritems(eb);
8093 	blocksize = root->nodesize;
8094 
8095 	for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8096 		if (nread >= wc->reada_count)
8097 			break;
8098 
8099 		cond_resched();
8100 		bytenr = btrfs_node_blockptr(eb, slot);
8101 		generation = btrfs_node_ptr_generation(eb, slot);
8102 
8103 		if (slot == path->slots[wc->level])
8104 			goto reada;
8105 
8106 		if (wc->stage == UPDATE_BACKREF &&
8107 		    generation <= root->root_key.offset)
8108 			continue;
8109 
8110 		/* We don't lock the tree block, it's OK to be racy here */
8111 		ret = btrfs_lookup_extent_info(trans, root, bytenr,
8112 					       wc->level - 1, 1, &refs,
8113 					       &flags);
8114 		/* We don't care about errors in readahead. */
8115 		if (ret < 0)
8116 			continue;
8117 		BUG_ON(refs == 0);
8118 
8119 		if (wc->stage == DROP_REFERENCE) {
8120 			if (refs == 1)
8121 				goto reada;
8122 
8123 			if (wc->level == 1 &&
8124 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8125 				continue;
8126 			if (!wc->update_ref ||
8127 			    generation <= root->root_key.offset)
8128 				continue;
8129 			btrfs_node_key_to_cpu(eb, &key, slot);
8130 			ret = btrfs_comp_cpu_keys(&key,
8131 						  &wc->update_progress);
8132 			if (ret < 0)
8133 				continue;
8134 		} else {
8135 			if (wc->level == 1 &&
8136 			    (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8137 				continue;
8138 		}
8139 reada:
8140 		readahead_tree_block(root, bytenr);
8141 		nread++;
8142 	}
8143 	wc->reada_slot = slot;
8144 }
8145 
8146 /*
8147  * These may not be seen by the usual inc/dec ref code so we have to
8148  * add them here.
8149  */
record_one_subtree_extent(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytenr,u64 num_bytes)8150 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8151 				     struct btrfs_root *root, u64 bytenr,
8152 				     u64 num_bytes)
8153 {
8154 	struct btrfs_qgroup_extent_record *qrecord;
8155 	struct btrfs_delayed_ref_root *delayed_refs;
8156 
8157 	qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8158 	if (!qrecord)
8159 		return -ENOMEM;
8160 
8161 	qrecord->bytenr = bytenr;
8162 	qrecord->num_bytes = num_bytes;
8163 	qrecord->old_roots = NULL;
8164 
8165 	delayed_refs = &trans->transaction->delayed_refs;
8166 	spin_lock(&delayed_refs->lock);
8167 	if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8168 		kfree(qrecord);
8169 	spin_unlock(&delayed_refs->lock);
8170 
8171 	return 0;
8172 }
8173 
account_leaf_items(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * eb)8174 static int account_leaf_items(struct btrfs_trans_handle *trans,
8175 			      struct btrfs_root *root,
8176 			      struct extent_buffer *eb)
8177 {
8178 	int nr = btrfs_header_nritems(eb);
8179 	int i, extent_type, ret;
8180 	struct btrfs_key key;
8181 	struct btrfs_file_extent_item *fi;
8182 	u64 bytenr, num_bytes;
8183 
8184 	/* We can be called directly from walk_up_proc() */
8185 	if (!root->fs_info->quota_enabled)
8186 		return 0;
8187 
8188 	for (i = 0; i < nr; i++) {
8189 		btrfs_item_key_to_cpu(eb, &key, i);
8190 
8191 		if (key.type != BTRFS_EXTENT_DATA_KEY)
8192 			continue;
8193 
8194 		fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8195 		/* filter out non qgroup-accountable extents  */
8196 		extent_type = btrfs_file_extent_type(eb, fi);
8197 
8198 		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8199 			continue;
8200 
8201 		bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8202 		if (!bytenr)
8203 			continue;
8204 
8205 		num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8206 
8207 		ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8208 		if (ret)
8209 			return ret;
8210 	}
8211 	return 0;
8212 }
8213 
8214 /*
8215  * Walk up the tree from the bottom, freeing leaves and any interior
8216  * nodes which have had all slots visited. If a node (leaf or
8217  * interior) is freed, the node above it will have it's slot
8218  * incremented. The root node will never be freed.
8219  *
8220  * At the end of this function, we should have a path which has all
8221  * slots incremented to the next position for a search. If we need to
8222  * read a new node it will be NULL and the node above it will have the
8223  * correct slot selected for a later read.
8224  *
8225  * If we increment the root nodes slot counter past the number of
8226  * elements, 1 is returned to signal completion of the search.
8227  */
adjust_slots_upwards(struct btrfs_root * root,struct btrfs_path * path,int root_level)8228 static int adjust_slots_upwards(struct btrfs_root *root,
8229 				struct btrfs_path *path, int root_level)
8230 {
8231 	int level = 0;
8232 	int nr, slot;
8233 	struct extent_buffer *eb;
8234 
8235 	if (root_level == 0)
8236 		return 1;
8237 
8238 	while (level <= root_level) {
8239 		eb = path->nodes[level];
8240 		nr = btrfs_header_nritems(eb);
8241 		path->slots[level]++;
8242 		slot = path->slots[level];
8243 		if (slot >= nr || level == 0) {
8244 			/*
8245 			 * Don't free the root -  we will detect this
8246 			 * condition after our loop and return a
8247 			 * positive value for caller to stop walking the tree.
8248 			 */
8249 			if (level != root_level) {
8250 				btrfs_tree_unlock_rw(eb, path->locks[level]);
8251 				path->locks[level] = 0;
8252 
8253 				free_extent_buffer(eb);
8254 				path->nodes[level] = NULL;
8255 				path->slots[level] = 0;
8256 			}
8257 		} else {
8258 			/*
8259 			 * We have a valid slot to walk back down
8260 			 * from. Stop here so caller can process these
8261 			 * new nodes.
8262 			 */
8263 			break;
8264 		}
8265 
8266 		level++;
8267 	}
8268 
8269 	eb = path->nodes[root_level];
8270 	if (path->slots[root_level] >= btrfs_header_nritems(eb))
8271 		return 1;
8272 
8273 	return 0;
8274 }
8275 
8276 /*
8277  * root_eb is the subtree root and is locked before this function is called.
8278  */
account_shared_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * root_eb,u64 root_gen,int root_level)8279 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8280 				  struct btrfs_root *root,
8281 				  struct extent_buffer *root_eb,
8282 				  u64 root_gen,
8283 				  int root_level)
8284 {
8285 	int ret = 0;
8286 	int level;
8287 	struct extent_buffer *eb = root_eb;
8288 	struct btrfs_path *path = NULL;
8289 
8290 	BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8291 	BUG_ON(root_eb == NULL);
8292 
8293 	if (!root->fs_info->quota_enabled)
8294 		return 0;
8295 
8296 	if (!extent_buffer_uptodate(root_eb)) {
8297 		ret = btrfs_read_buffer(root_eb, root_gen);
8298 		if (ret)
8299 			goto out;
8300 	}
8301 
8302 	if (root_level == 0) {
8303 		ret = account_leaf_items(trans, root, root_eb);
8304 		goto out;
8305 	}
8306 
8307 	path = btrfs_alloc_path();
8308 	if (!path)
8309 		return -ENOMEM;
8310 
8311 	/*
8312 	 * Walk down the tree.  Missing extent blocks are filled in as
8313 	 * we go. Metadata is accounted every time we read a new
8314 	 * extent block.
8315 	 *
8316 	 * When we reach a leaf, we account for file extent items in it,
8317 	 * walk back up the tree (adjusting slot pointers as we go)
8318 	 * and restart the search process.
8319 	 */
8320 	extent_buffer_get(root_eb); /* For path */
8321 	path->nodes[root_level] = root_eb;
8322 	path->slots[root_level] = 0;
8323 	path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8324 walk_down:
8325 	level = root_level;
8326 	while (level >= 0) {
8327 		if (path->nodes[level] == NULL) {
8328 			int parent_slot;
8329 			u64 child_gen;
8330 			u64 child_bytenr;
8331 
8332 			/* We need to get child blockptr/gen from
8333 			 * parent before we can read it. */
8334 			eb = path->nodes[level + 1];
8335 			parent_slot = path->slots[level + 1];
8336 			child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8337 			child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8338 
8339 			eb = read_tree_block(root, child_bytenr, child_gen);
8340 			if (IS_ERR(eb)) {
8341 				ret = PTR_ERR(eb);
8342 				goto out;
8343 			} else if (!extent_buffer_uptodate(eb)) {
8344 				free_extent_buffer(eb);
8345 				ret = -EIO;
8346 				goto out;
8347 			}
8348 
8349 			path->nodes[level] = eb;
8350 			path->slots[level] = 0;
8351 
8352 			btrfs_tree_read_lock(eb);
8353 			btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8354 			path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8355 
8356 			ret = record_one_subtree_extent(trans, root, child_bytenr,
8357 							root->nodesize);
8358 			if (ret)
8359 				goto out;
8360 		}
8361 
8362 		if (level == 0) {
8363 			ret = account_leaf_items(trans, root, path->nodes[level]);
8364 			if (ret)
8365 				goto out;
8366 
8367 			/* Nonzero return here means we completed our search */
8368 			ret = adjust_slots_upwards(root, path, root_level);
8369 			if (ret)
8370 				break;
8371 
8372 			/* Restart search with new slots */
8373 			goto walk_down;
8374 		}
8375 
8376 		level--;
8377 	}
8378 
8379 	ret = 0;
8380 out:
8381 	btrfs_free_path(path);
8382 
8383 	return ret;
8384 }
8385 
8386 /*
8387  * helper to process tree block while walking down the tree.
8388  *
8389  * when wc->stage == UPDATE_BACKREF, this function updates
8390  * back refs for pointers in the block.
8391  *
8392  * NOTE: return value 1 means we should stop walking down.
8393  */
walk_down_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int lookup_info)8394 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8395 				   struct btrfs_root *root,
8396 				   struct btrfs_path *path,
8397 				   struct walk_control *wc, int lookup_info)
8398 {
8399 	int level = wc->level;
8400 	struct extent_buffer *eb = path->nodes[level];
8401 	u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8402 	int ret;
8403 
8404 	if (wc->stage == UPDATE_BACKREF &&
8405 	    btrfs_header_owner(eb) != root->root_key.objectid)
8406 		return 1;
8407 
8408 	/*
8409 	 * when reference count of tree block is 1, it won't increase
8410 	 * again. once full backref flag is set, we never clear it.
8411 	 */
8412 	if (lookup_info &&
8413 	    ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8414 	     (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8415 		BUG_ON(!path->locks[level]);
8416 		ret = btrfs_lookup_extent_info(trans, root,
8417 					       eb->start, level, 1,
8418 					       &wc->refs[level],
8419 					       &wc->flags[level]);
8420 		BUG_ON(ret == -ENOMEM);
8421 		if (ret)
8422 			return ret;
8423 		BUG_ON(wc->refs[level] == 0);
8424 	}
8425 
8426 	if (wc->stage == DROP_REFERENCE) {
8427 		if (wc->refs[level] > 1)
8428 			return 1;
8429 
8430 		if (path->locks[level] && !wc->keep_locks) {
8431 			btrfs_tree_unlock_rw(eb, path->locks[level]);
8432 			path->locks[level] = 0;
8433 		}
8434 		return 0;
8435 	}
8436 
8437 	/* wc->stage == UPDATE_BACKREF */
8438 	if (!(wc->flags[level] & flag)) {
8439 		BUG_ON(!path->locks[level]);
8440 		ret = btrfs_inc_ref(trans, root, eb, 1);
8441 		BUG_ON(ret); /* -ENOMEM */
8442 		ret = btrfs_dec_ref(trans, root, eb, 0);
8443 		BUG_ON(ret); /* -ENOMEM */
8444 		ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8445 						  eb->len, flag,
8446 						  btrfs_header_level(eb), 0);
8447 		BUG_ON(ret); /* -ENOMEM */
8448 		wc->flags[level] |= flag;
8449 	}
8450 
8451 	/*
8452 	 * the block is shared by multiple trees, so it's not good to
8453 	 * keep the tree lock
8454 	 */
8455 	if (path->locks[level] && level > 0) {
8456 		btrfs_tree_unlock_rw(eb, path->locks[level]);
8457 		path->locks[level] = 0;
8458 	}
8459 	return 0;
8460 }
8461 
8462 /*
8463  * helper to process tree block pointer.
8464  *
8465  * when wc->stage == DROP_REFERENCE, this function checks
8466  * reference count of the block pointed to. if the block
8467  * is shared and we need update back refs for the subtree
8468  * rooted at the block, this function changes wc->stage to
8469  * UPDATE_BACKREF. if the block is shared and there is no
8470  * need to update back, this function drops the reference
8471  * to the block.
8472  *
8473  * NOTE: return value 1 means we should stop walking down.
8474  */
do_walk_down(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int * lookup_info)8475 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8476 				 struct btrfs_root *root,
8477 				 struct btrfs_path *path,
8478 				 struct walk_control *wc, int *lookup_info)
8479 {
8480 	u64 bytenr;
8481 	u64 generation;
8482 	u64 parent;
8483 	u32 blocksize;
8484 	struct btrfs_key key;
8485 	struct extent_buffer *next;
8486 	int level = wc->level;
8487 	int reada = 0;
8488 	int ret = 0;
8489 	bool need_account = false;
8490 
8491 	generation = btrfs_node_ptr_generation(path->nodes[level],
8492 					       path->slots[level]);
8493 	/*
8494 	 * if the lower level block was created before the snapshot
8495 	 * was created, we know there is no need to update back refs
8496 	 * for the subtree
8497 	 */
8498 	if (wc->stage == UPDATE_BACKREF &&
8499 	    generation <= root->root_key.offset) {
8500 		*lookup_info = 1;
8501 		return 1;
8502 	}
8503 
8504 	bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8505 	blocksize = root->nodesize;
8506 
8507 	next = btrfs_find_tree_block(root->fs_info, bytenr);
8508 	if (!next) {
8509 		next = btrfs_find_create_tree_block(root, bytenr);
8510 		if (!next)
8511 			return -ENOMEM;
8512 		btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8513 					       level - 1);
8514 		reada = 1;
8515 	}
8516 	btrfs_tree_lock(next);
8517 	btrfs_set_lock_blocking(next);
8518 
8519 	ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8520 				       &wc->refs[level - 1],
8521 				       &wc->flags[level - 1]);
8522 	if (ret < 0)
8523 		goto out_unlock;
8524 
8525 	if (unlikely(wc->refs[level - 1] == 0)) {
8526 		btrfs_err(root->fs_info, "Missing references.");
8527 		ret = -EIO;
8528 		goto out_unlock;
8529 	}
8530 	*lookup_info = 0;
8531 
8532 	if (wc->stage == DROP_REFERENCE) {
8533 		if (wc->refs[level - 1] > 1) {
8534 			need_account = true;
8535 			if (level == 1 &&
8536 			    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8537 				goto skip;
8538 
8539 			if (!wc->update_ref ||
8540 			    generation <= root->root_key.offset)
8541 				goto skip;
8542 
8543 			btrfs_node_key_to_cpu(path->nodes[level], &key,
8544 					      path->slots[level]);
8545 			ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8546 			if (ret < 0)
8547 				goto skip;
8548 
8549 			wc->stage = UPDATE_BACKREF;
8550 			wc->shared_level = level - 1;
8551 		}
8552 	} else {
8553 		if (level == 1 &&
8554 		    (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8555 			goto skip;
8556 	}
8557 
8558 	if (!btrfs_buffer_uptodate(next, generation, 0)) {
8559 		btrfs_tree_unlock(next);
8560 		free_extent_buffer(next);
8561 		next = NULL;
8562 		*lookup_info = 1;
8563 	}
8564 
8565 	if (!next) {
8566 		if (reada && level == 1)
8567 			reada_walk_down(trans, root, wc, path);
8568 		next = read_tree_block(root, bytenr, generation);
8569 		if (IS_ERR(next)) {
8570 			return PTR_ERR(next);
8571 		} else if (!extent_buffer_uptodate(next)) {
8572 			free_extent_buffer(next);
8573 			return -EIO;
8574 		}
8575 		btrfs_tree_lock(next);
8576 		btrfs_set_lock_blocking(next);
8577 	}
8578 
8579 	level--;
8580 	ASSERT(level == btrfs_header_level(next));
8581 	if (level != btrfs_header_level(next)) {
8582 		btrfs_err(root->fs_info, "mismatched level");
8583 		ret = -EIO;
8584 		goto out_unlock;
8585 	}
8586 	path->nodes[level] = next;
8587 	path->slots[level] = 0;
8588 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8589 	wc->level = level;
8590 	if (wc->level == 1)
8591 		wc->reada_slot = 0;
8592 	return 0;
8593 skip:
8594 	wc->refs[level - 1] = 0;
8595 	wc->flags[level - 1] = 0;
8596 	if (wc->stage == DROP_REFERENCE) {
8597 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8598 			parent = path->nodes[level]->start;
8599 		} else {
8600 			ASSERT(root->root_key.objectid ==
8601 			       btrfs_header_owner(path->nodes[level]));
8602 			if (root->root_key.objectid !=
8603 			    btrfs_header_owner(path->nodes[level])) {
8604 				btrfs_err(root->fs_info,
8605 						"mismatched block owner");
8606 				ret = -EIO;
8607 				goto out_unlock;
8608 			}
8609 			parent = 0;
8610 		}
8611 
8612 		if (need_account) {
8613 			ret = account_shared_subtree(trans, root, next,
8614 						     generation, level - 1);
8615 			if (ret) {
8616 				btrfs_err_rl(root->fs_info,
8617 					"Error "
8618 					"%d accounting shared subtree. Quota "
8619 					"is out of sync, rescan required.",
8620 					ret);
8621 			}
8622 		}
8623 		ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8624 				root->root_key.objectid, level - 1, 0);
8625 		if (ret)
8626 			goto out_unlock;
8627 	}
8628 
8629 	*lookup_info = 1;
8630 	ret = 1;
8631 
8632 out_unlock:
8633 	btrfs_tree_unlock(next);
8634 	free_extent_buffer(next);
8635 
8636 	return ret;
8637 }
8638 
8639 /*
8640  * helper to process tree block while walking up the tree.
8641  *
8642  * when wc->stage == DROP_REFERENCE, this function drops
8643  * reference count on the block.
8644  *
8645  * when wc->stage == UPDATE_BACKREF, this function changes
8646  * wc->stage back to DROP_REFERENCE if we changed wc->stage
8647  * to UPDATE_BACKREF previously while processing the block.
8648  *
8649  * NOTE: return value 1 means we should stop walking up.
8650  */
walk_up_proc(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)8651 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8652 				 struct btrfs_root *root,
8653 				 struct btrfs_path *path,
8654 				 struct walk_control *wc)
8655 {
8656 	int ret;
8657 	int level = wc->level;
8658 	struct extent_buffer *eb = path->nodes[level];
8659 	u64 parent = 0;
8660 
8661 	if (wc->stage == UPDATE_BACKREF) {
8662 		BUG_ON(wc->shared_level < level);
8663 		if (level < wc->shared_level)
8664 			goto out;
8665 
8666 		ret = find_next_key(path, level + 1, &wc->update_progress);
8667 		if (ret > 0)
8668 			wc->update_ref = 0;
8669 
8670 		wc->stage = DROP_REFERENCE;
8671 		wc->shared_level = -1;
8672 		path->slots[level] = 0;
8673 
8674 		/*
8675 		 * check reference count again if the block isn't locked.
8676 		 * we should start walking down the tree again if reference
8677 		 * count is one.
8678 		 */
8679 		if (!path->locks[level]) {
8680 			BUG_ON(level == 0);
8681 			btrfs_tree_lock(eb);
8682 			btrfs_set_lock_blocking(eb);
8683 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8684 
8685 			ret = btrfs_lookup_extent_info(trans, root,
8686 						       eb->start, level, 1,
8687 						       &wc->refs[level],
8688 						       &wc->flags[level]);
8689 			if (ret < 0) {
8690 				btrfs_tree_unlock_rw(eb, path->locks[level]);
8691 				path->locks[level] = 0;
8692 				return ret;
8693 			}
8694 			BUG_ON(wc->refs[level] == 0);
8695 			if (wc->refs[level] == 1) {
8696 				btrfs_tree_unlock_rw(eb, path->locks[level]);
8697 				path->locks[level] = 0;
8698 				return 1;
8699 			}
8700 		}
8701 	}
8702 
8703 	/* wc->stage == DROP_REFERENCE */
8704 	BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8705 
8706 	if (wc->refs[level] == 1) {
8707 		if (level == 0) {
8708 			if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8709 				ret = btrfs_dec_ref(trans, root, eb, 1);
8710 			else
8711 				ret = btrfs_dec_ref(trans, root, eb, 0);
8712 			BUG_ON(ret); /* -ENOMEM */
8713 			ret = account_leaf_items(trans, root, eb);
8714 			if (ret) {
8715 				btrfs_err_rl(root->fs_info,
8716 					"error "
8717 					"%d accounting leaf items. Quota "
8718 					"is out of sync, rescan required.",
8719 					ret);
8720 			}
8721 		}
8722 		/* make block locked assertion in clean_tree_block happy */
8723 		if (!path->locks[level] &&
8724 		    btrfs_header_generation(eb) == trans->transid) {
8725 			btrfs_tree_lock(eb);
8726 			btrfs_set_lock_blocking(eb);
8727 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8728 		}
8729 		clean_tree_block(trans, root->fs_info, eb);
8730 	}
8731 
8732 	if (eb == root->node) {
8733 		if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8734 			parent = eb->start;
8735 		else if (root->root_key.objectid != btrfs_header_owner(eb))
8736 			goto owner_mismatch;
8737 	} else {
8738 		if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8739 			parent = path->nodes[level + 1]->start;
8740 		else if (root->root_key.objectid !=
8741 			 btrfs_header_owner(path->nodes[level + 1]))
8742 			goto owner_mismatch;
8743 	}
8744 
8745 	btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8746 out:
8747 	wc->refs[level] = 0;
8748 	wc->flags[level] = 0;
8749 	return 0;
8750 
8751 owner_mismatch:
8752 	btrfs_err_rl(root->fs_info, "unexpected tree owner, have %llu expect %llu",
8753 		     btrfs_header_owner(eb), root->root_key.objectid);
8754 	return -EUCLEAN;
8755 }
8756 
walk_down_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc)8757 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8758 				   struct btrfs_root *root,
8759 				   struct btrfs_path *path,
8760 				   struct walk_control *wc)
8761 {
8762 	int level = wc->level;
8763 	int lookup_info = 1;
8764 	int ret;
8765 
8766 	while (level >= 0) {
8767 		ret = walk_down_proc(trans, root, path, wc, lookup_info);
8768 		if (ret > 0)
8769 			break;
8770 
8771 		if (level == 0)
8772 			break;
8773 
8774 		if (path->slots[level] >=
8775 		    btrfs_header_nritems(path->nodes[level]))
8776 			break;
8777 
8778 		ret = do_walk_down(trans, root, path, wc, &lookup_info);
8779 		if (ret > 0) {
8780 			path->slots[level]++;
8781 			continue;
8782 		} else if (ret < 0)
8783 			return ret;
8784 		level = wc->level;
8785 	}
8786 	return 0;
8787 }
8788 
walk_up_tree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct btrfs_path * path,struct walk_control * wc,int max_level)8789 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8790 				 struct btrfs_root *root,
8791 				 struct btrfs_path *path,
8792 				 struct walk_control *wc, int max_level)
8793 {
8794 	int level = wc->level;
8795 	int ret;
8796 
8797 	path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8798 	while (level < max_level && path->nodes[level]) {
8799 		wc->level = level;
8800 		if (path->slots[level] + 1 <
8801 		    btrfs_header_nritems(path->nodes[level])) {
8802 			path->slots[level]++;
8803 			return 0;
8804 		} else {
8805 			ret = walk_up_proc(trans, root, path, wc);
8806 			if (ret > 0)
8807 				return 0;
8808 			if (ret < 0)
8809 				return ret;
8810 
8811 			if (path->locks[level]) {
8812 				btrfs_tree_unlock_rw(path->nodes[level],
8813 						     path->locks[level]);
8814 				path->locks[level] = 0;
8815 			}
8816 			free_extent_buffer(path->nodes[level]);
8817 			path->nodes[level] = NULL;
8818 			level++;
8819 		}
8820 	}
8821 	return 1;
8822 }
8823 
8824 /*
8825  * drop a subvolume tree.
8826  *
8827  * this function traverses the tree freeing any blocks that only
8828  * referenced by the tree.
8829  *
8830  * when a shared tree block is found. this function decreases its
8831  * reference count by one. if update_ref is true, this function
8832  * also make sure backrefs for the shared block and all lower level
8833  * blocks are properly updated.
8834  *
8835  * If called with for_reloc == 0, may exit early with -EAGAIN
8836  */
btrfs_drop_snapshot(struct btrfs_root * root,struct btrfs_block_rsv * block_rsv,int update_ref,int for_reloc)8837 int btrfs_drop_snapshot(struct btrfs_root *root,
8838 			 struct btrfs_block_rsv *block_rsv, int update_ref,
8839 			 int for_reloc)
8840 {
8841 	struct btrfs_path *path;
8842 	struct btrfs_trans_handle *trans;
8843 	struct btrfs_root *tree_root = root->fs_info->tree_root;
8844 	struct btrfs_root_item *root_item = &root->root_item;
8845 	struct walk_control *wc;
8846 	struct btrfs_key key;
8847 	int err = 0;
8848 	int ret;
8849 	int level;
8850 	bool root_dropped = false;
8851 
8852 	btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8853 
8854 	path = btrfs_alloc_path();
8855 	if (!path) {
8856 		err = -ENOMEM;
8857 		goto out;
8858 	}
8859 
8860 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
8861 	if (!wc) {
8862 		btrfs_free_path(path);
8863 		err = -ENOMEM;
8864 		goto out;
8865 	}
8866 
8867 	trans = btrfs_start_transaction(tree_root, 0);
8868 	if (IS_ERR(trans)) {
8869 		err = PTR_ERR(trans);
8870 		goto out_free;
8871 	}
8872 
8873 	if (block_rsv)
8874 		trans->block_rsv = block_rsv;
8875 
8876 	if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8877 		level = btrfs_header_level(root->node);
8878 		path->nodes[level] = btrfs_lock_root_node(root);
8879 		btrfs_set_lock_blocking(path->nodes[level]);
8880 		path->slots[level] = 0;
8881 		path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8882 		memset(&wc->update_progress, 0,
8883 		       sizeof(wc->update_progress));
8884 	} else {
8885 		btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8886 		memcpy(&wc->update_progress, &key,
8887 		       sizeof(wc->update_progress));
8888 
8889 		level = root_item->drop_level;
8890 		BUG_ON(level == 0);
8891 		path->lowest_level = level;
8892 		ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8893 		path->lowest_level = 0;
8894 		if (ret < 0) {
8895 			err = ret;
8896 			goto out_end_trans;
8897 		}
8898 		WARN_ON(ret > 0);
8899 
8900 		/*
8901 		 * unlock our path, this is safe because only this
8902 		 * function is allowed to delete this snapshot
8903 		 */
8904 		btrfs_unlock_up_safe(path, 0);
8905 
8906 		level = btrfs_header_level(root->node);
8907 		while (1) {
8908 			btrfs_tree_lock(path->nodes[level]);
8909 			btrfs_set_lock_blocking(path->nodes[level]);
8910 			path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8911 
8912 			ret = btrfs_lookup_extent_info(trans, root,
8913 						path->nodes[level]->start,
8914 						level, 1, &wc->refs[level],
8915 						&wc->flags[level]);
8916 			if (ret < 0) {
8917 				err = ret;
8918 				goto out_end_trans;
8919 			}
8920 			BUG_ON(wc->refs[level] == 0);
8921 
8922 			if (level == root_item->drop_level)
8923 				break;
8924 
8925 			btrfs_tree_unlock(path->nodes[level]);
8926 			path->locks[level] = 0;
8927 			WARN_ON(wc->refs[level] != 1);
8928 			level--;
8929 		}
8930 	}
8931 
8932 	wc->level = level;
8933 	wc->shared_level = -1;
8934 	wc->stage = DROP_REFERENCE;
8935 	wc->update_ref = update_ref;
8936 	wc->keep_locks = 0;
8937 	wc->for_reloc = for_reloc;
8938 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8939 
8940 	while (1) {
8941 
8942 		ret = walk_down_tree(trans, root, path, wc);
8943 		if (ret < 0) {
8944 			err = ret;
8945 			break;
8946 		}
8947 
8948 		ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8949 		if (ret < 0) {
8950 			err = ret;
8951 			break;
8952 		}
8953 
8954 		if (ret > 0) {
8955 			BUG_ON(wc->stage != DROP_REFERENCE);
8956 			break;
8957 		}
8958 
8959 		if (wc->stage == DROP_REFERENCE) {
8960 			level = wc->level;
8961 			btrfs_node_key(path->nodes[level],
8962 				       &root_item->drop_progress,
8963 				       path->slots[level]);
8964 			root_item->drop_level = level;
8965 		}
8966 
8967 		BUG_ON(wc->level == 0);
8968 		if (btrfs_should_end_transaction(trans, tree_root) ||
8969 		    (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8970 			ret = btrfs_update_root(trans, tree_root,
8971 						&root->root_key,
8972 						root_item);
8973 			if (ret) {
8974 				btrfs_abort_transaction(trans, tree_root, ret);
8975 				err = ret;
8976 				goto out_end_trans;
8977 			}
8978 
8979 			btrfs_end_transaction_throttle(trans, tree_root);
8980 			if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8981 				pr_debug("BTRFS: drop snapshot early exit\n");
8982 				err = -EAGAIN;
8983 				goto out_free;
8984 			}
8985 
8986 			trans = btrfs_start_transaction(tree_root, 0);
8987 			if (IS_ERR(trans)) {
8988 				err = PTR_ERR(trans);
8989 				goto out_free;
8990 			}
8991 			if (block_rsv)
8992 				trans->block_rsv = block_rsv;
8993 		}
8994 	}
8995 	btrfs_release_path(path);
8996 	if (err)
8997 		goto out_end_trans;
8998 
8999 	ret = btrfs_del_root(trans, tree_root, &root->root_key);
9000 	if (ret) {
9001 		btrfs_abort_transaction(trans, tree_root, ret);
9002 		goto out_end_trans;
9003 	}
9004 
9005 	if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9006 		ret = btrfs_find_root(tree_root, &root->root_key, path,
9007 				      NULL, NULL);
9008 		if (ret < 0) {
9009 			btrfs_abort_transaction(trans, tree_root, ret);
9010 			err = ret;
9011 			goto out_end_trans;
9012 		} else if (ret > 0) {
9013 			/* if we fail to delete the orphan item this time
9014 			 * around, it'll get picked up the next time.
9015 			 *
9016 			 * The most common failure here is just -ENOENT.
9017 			 */
9018 			btrfs_del_orphan_item(trans, tree_root,
9019 					      root->root_key.objectid);
9020 		}
9021 	}
9022 
9023 	if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9024 		btrfs_add_dropped_root(trans, root);
9025 	} else {
9026 		free_extent_buffer(root->node);
9027 		free_extent_buffer(root->commit_root);
9028 		btrfs_put_fs_root(root);
9029 	}
9030 	root_dropped = true;
9031 out_end_trans:
9032 	btrfs_end_transaction_throttle(trans, tree_root);
9033 out_free:
9034 	kfree(wc);
9035 	btrfs_free_path(path);
9036 out:
9037 	/*
9038 	 * So if we need to stop dropping the snapshot for whatever reason we
9039 	 * need to make sure to add it back to the dead root list so that we
9040 	 * keep trying to do the work later.  This also cleans up roots if we
9041 	 * don't have it in the radix (like when we recover after a power fail
9042 	 * or unmount) so we don't leak memory.
9043 	 */
9044 	if (!for_reloc && root_dropped == false)
9045 		btrfs_add_dead_root(root);
9046 	if (err && err != -EAGAIN)
9047 		btrfs_std_error(root->fs_info, err, NULL);
9048 	return err;
9049 }
9050 
9051 /*
9052  * drop subtree rooted at tree block 'node'.
9053  *
9054  * NOTE: this function will unlock and release tree block 'node'
9055  * only used by relocation code
9056  */
btrfs_drop_subtree(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct extent_buffer * node,struct extent_buffer * parent)9057 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9058 			struct btrfs_root *root,
9059 			struct extent_buffer *node,
9060 			struct extent_buffer *parent)
9061 {
9062 	struct btrfs_path *path;
9063 	struct walk_control *wc;
9064 	int level;
9065 	int parent_level;
9066 	int ret = 0;
9067 	int wret;
9068 
9069 	BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9070 
9071 	path = btrfs_alloc_path();
9072 	if (!path)
9073 		return -ENOMEM;
9074 
9075 	wc = kzalloc(sizeof(*wc), GFP_NOFS);
9076 	if (!wc) {
9077 		btrfs_free_path(path);
9078 		return -ENOMEM;
9079 	}
9080 
9081 	btrfs_assert_tree_locked(parent);
9082 	parent_level = btrfs_header_level(parent);
9083 	extent_buffer_get(parent);
9084 	path->nodes[parent_level] = parent;
9085 	path->slots[parent_level] = btrfs_header_nritems(parent);
9086 
9087 	btrfs_assert_tree_locked(node);
9088 	level = btrfs_header_level(node);
9089 	path->nodes[level] = node;
9090 	path->slots[level] = 0;
9091 	path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9092 
9093 	wc->refs[parent_level] = 1;
9094 	wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9095 	wc->level = level;
9096 	wc->shared_level = -1;
9097 	wc->stage = DROP_REFERENCE;
9098 	wc->update_ref = 0;
9099 	wc->keep_locks = 1;
9100 	wc->for_reloc = 1;
9101 	wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9102 
9103 	while (1) {
9104 		wret = walk_down_tree(trans, root, path, wc);
9105 		if (wret < 0) {
9106 			ret = wret;
9107 			break;
9108 		}
9109 
9110 		wret = walk_up_tree(trans, root, path, wc, parent_level);
9111 		if (wret < 0)
9112 			ret = wret;
9113 		if (wret != 0)
9114 			break;
9115 	}
9116 
9117 	kfree(wc);
9118 	btrfs_free_path(path);
9119 	return ret;
9120 }
9121 
update_block_group_flags(struct btrfs_root * root,u64 flags)9122 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9123 {
9124 	u64 num_devices;
9125 	u64 stripped;
9126 
9127 	/*
9128 	 * if restripe for this chunk_type is on pick target profile and
9129 	 * return, otherwise do the usual balance
9130 	 */
9131 	stripped = get_restripe_target(root->fs_info, flags);
9132 	if (stripped)
9133 		return extended_to_chunk(stripped);
9134 
9135 	num_devices = root->fs_info->fs_devices->rw_devices;
9136 
9137 	stripped = BTRFS_BLOCK_GROUP_RAID0 |
9138 		BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9139 		BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9140 
9141 	if (num_devices == 1) {
9142 		stripped |= BTRFS_BLOCK_GROUP_DUP;
9143 		stripped = flags & ~stripped;
9144 
9145 		/* turn raid0 into single device chunks */
9146 		if (flags & BTRFS_BLOCK_GROUP_RAID0)
9147 			return stripped;
9148 
9149 		/* turn mirroring into duplication */
9150 		if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9151 			     BTRFS_BLOCK_GROUP_RAID10))
9152 			return stripped | BTRFS_BLOCK_GROUP_DUP;
9153 	} else {
9154 		/* they already had raid on here, just return */
9155 		if (flags & stripped)
9156 			return flags;
9157 
9158 		stripped |= BTRFS_BLOCK_GROUP_DUP;
9159 		stripped = flags & ~stripped;
9160 
9161 		/* switch duplicated blocks with raid1 */
9162 		if (flags & BTRFS_BLOCK_GROUP_DUP)
9163 			return stripped | BTRFS_BLOCK_GROUP_RAID1;
9164 
9165 		/* this is drive concat, leave it alone */
9166 	}
9167 
9168 	return flags;
9169 }
9170 
inc_block_group_ro(struct btrfs_block_group_cache * cache,int force)9171 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9172 {
9173 	struct btrfs_space_info *sinfo = cache->space_info;
9174 	u64 num_bytes;
9175 	u64 min_allocable_bytes;
9176 	int ret = -ENOSPC;
9177 
9178 	/*
9179 	 * We need some metadata space and system metadata space for
9180 	 * allocating chunks in some corner cases until we force to set
9181 	 * it to be readonly.
9182 	 */
9183 	if ((sinfo->flags &
9184 	     (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9185 	    !force)
9186 		min_allocable_bytes = 1 * 1024 * 1024;
9187 	else
9188 		min_allocable_bytes = 0;
9189 
9190 	spin_lock(&sinfo->lock);
9191 	spin_lock(&cache->lock);
9192 
9193 	if (cache->ro) {
9194 		cache->ro++;
9195 		ret = 0;
9196 		goto out;
9197 	}
9198 
9199 	num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9200 		    cache->bytes_super - btrfs_block_group_used(&cache->item);
9201 
9202 	if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9203 	    sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9204 	    min_allocable_bytes <= sinfo->total_bytes) {
9205 		sinfo->bytes_readonly += num_bytes;
9206 		cache->ro++;
9207 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9208 		ret = 0;
9209 	}
9210 out:
9211 	spin_unlock(&cache->lock);
9212 	spin_unlock(&sinfo->lock);
9213 	return ret;
9214 }
9215 
btrfs_inc_block_group_ro(struct btrfs_root * root,struct btrfs_block_group_cache * cache)9216 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9217 			     struct btrfs_block_group_cache *cache)
9218 
9219 {
9220 	struct btrfs_trans_handle *trans;
9221 	u64 alloc_flags;
9222 	int ret;
9223 
9224 again:
9225 	trans = btrfs_join_transaction(root);
9226 	if (IS_ERR(trans))
9227 		return PTR_ERR(trans);
9228 
9229 	/*
9230 	 * we're not allowed to set block groups readonly after the dirty
9231 	 * block groups cache has started writing.  If it already started,
9232 	 * back off and let this transaction commit
9233 	 */
9234 	mutex_lock(&root->fs_info->ro_block_group_mutex);
9235 	if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9236 		u64 transid = trans->transid;
9237 
9238 		mutex_unlock(&root->fs_info->ro_block_group_mutex);
9239 		btrfs_end_transaction(trans, root);
9240 
9241 		ret = btrfs_wait_for_commit(root, transid);
9242 		if (ret)
9243 			return ret;
9244 		goto again;
9245 	}
9246 
9247 	/*
9248 	 * if we are changing raid levels, try to allocate a corresponding
9249 	 * block group with the new raid level.
9250 	 */
9251 	alloc_flags = update_block_group_flags(root, cache->flags);
9252 	if (alloc_flags != cache->flags) {
9253 		ret = do_chunk_alloc(trans, root, alloc_flags,
9254 				     CHUNK_ALLOC_FORCE);
9255 		/*
9256 		 * ENOSPC is allowed here, we may have enough space
9257 		 * already allocated at the new raid level to
9258 		 * carry on
9259 		 */
9260 		if (ret == -ENOSPC)
9261 			ret = 0;
9262 		if (ret < 0)
9263 			goto out;
9264 	}
9265 
9266 	ret = inc_block_group_ro(cache, 0);
9267 	if (!ret)
9268 		goto out;
9269 	alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9270 	ret = do_chunk_alloc(trans, root, alloc_flags,
9271 			     CHUNK_ALLOC_FORCE);
9272 	if (ret < 0)
9273 		goto out;
9274 	ret = inc_block_group_ro(cache, 0);
9275 out:
9276 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9277 		alloc_flags = update_block_group_flags(root, cache->flags);
9278 		lock_chunks(root->fs_info->chunk_root);
9279 		check_system_chunk(trans, root, alloc_flags);
9280 		unlock_chunks(root->fs_info->chunk_root);
9281 	}
9282 	mutex_unlock(&root->fs_info->ro_block_group_mutex);
9283 
9284 	btrfs_end_transaction(trans, root);
9285 	return ret;
9286 }
9287 
btrfs_force_chunk_alloc(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 type)9288 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9289 			    struct btrfs_root *root, u64 type)
9290 {
9291 	u64 alloc_flags = get_alloc_profile(root, type);
9292 	return do_chunk_alloc(trans, root, alloc_flags,
9293 			      CHUNK_ALLOC_FORCE);
9294 }
9295 
9296 /*
9297  * helper to account the unused space of all the readonly block group in the
9298  * space_info. takes mirrors into account.
9299  */
btrfs_account_ro_block_groups_free_space(struct btrfs_space_info * sinfo)9300 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9301 {
9302 	struct btrfs_block_group_cache *block_group;
9303 	u64 free_bytes = 0;
9304 	int factor;
9305 
9306 	/* It's df, we don't care if it's racey */
9307 	if (list_empty(&sinfo->ro_bgs))
9308 		return 0;
9309 
9310 	spin_lock(&sinfo->lock);
9311 	list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9312 		spin_lock(&block_group->lock);
9313 
9314 		if (!block_group->ro) {
9315 			spin_unlock(&block_group->lock);
9316 			continue;
9317 		}
9318 
9319 		if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9320 					  BTRFS_BLOCK_GROUP_RAID10 |
9321 					  BTRFS_BLOCK_GROUP_DUP))
9322 			factor = 2;
9323 		else
9324 			factor = 1;
9325 
9326 		free_bytes += (block_group->key.offset -
9327 			       btrfs_block_group_used(&block_group->item)) *
9328 			       factor;
9329 
9330 		spin_unlock(&block_group->lock);
9331 	}
9332 	spin_unlock(&sinfo->lock);
9333 
9334 	return free_bytes;
9335 }
9336 
btrfs_dec_block_group_ro(struct btrfs_root * root,struct btrfs_block_group_cache * cache)9337 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9338 			      struct btrfs_block_group_cache *cache)
9339 {
9340 	struct btrfs_space_info *sinfo = cache->space_info;
9341 	u64 num_bytes;
9342 
9343 	BUG_ON(!cache->ro);
9344 
9345 	spin_lock(&sinfo->lock);
9346 	spin_lock(&cache->lock);
9347 	if (!--cache->ro) {
9348 		num_bytes = cache->key.offset - cache->reserved -
9349 			    cache->pinned - cache->bytes_super -
9350 			    btrfs_block_group_used(&cache->item);
9351 		sinfo->bytes_readonly -= num_bytes;
9352 		list_del_init(&cache->ro_list);
9353 	}
9354 	spin_unlock(&cache->lock);
9355 	spin_unlock(&sinfo->lock);
9356 }
9357 
9358 /*
9359  * checks to see if its even possible to relocate this block group.
9360  *
9361  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9362  * ok to go ahead and try.
9363  */
btrfs_can_relocate(struct btrfs_root * root,u64 bytenr)9364 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9365 {
9366 	struct btrfs_block_group_cache *block_group;
9367 	struct btrfs_space_info *space_info;
9368 	struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9369 	struct btrfs_device *device;
9370 	struct btrfs_trans_handle *trans;
9371 	u64 min_free;
9372 	u64 dev_min = 1;
9373 	u64 dev_nr = 0;
9374 	u64 target;
9375 	int index;
9376 	int full = 0;
9377 	int ret = 0;
9378 
9379 	block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9380 
9381 	/* odd, couldn't find the block group, leave it alone */
9382 	if (!block_group)
9383 		return -1;
9384 
9385 	min_free = btrfs_block_group_used(&block_group->item);
9386 
9387 	/* no bytes used, we're good */
9388 	if (!min_free)
9389 		goto out;
9390 
9391 	space_info = block_group->space_info;
9392 	spin_lock(&space_info->lock);
9393 
9394 	full = space_info->full;
9395 
9396 	/*
9397 	 * if this is the last block group we have in this space, we can't
9398 	 * relocate it unless we're able to allocate a new chunk below.
9399 	 *
9400 	 * Otherwise, we need to make sure we have room in the space to handle
9401 	 * all of the extents from this block group.  If we can, we're good
9402 	 */
9403 	if ((space_info->total_bytes != block_group->key.offset) &&
9404 	    (space_info->bytes_used + space_info->bytes_reserved +
9405 	     space_info->bytes_pinned + space_info->bytes_readonly +
9406 	     min_free < space_info->total_bytes)) {
9407 		spin_unlock(&space_info->lock);
9408 		goto out;
9409 	}
9410 	spin_unlock(&space_info->lock);
9411 
9412 	/*
9413 	 * ok we don't have enough space, but maybe we have free space on our
9414 	 * devices to allocate new chunks for relocation, so loop through our
9415 	 * alloc devices and guess if we have enough space.  if this block
9416 	 * group is going to be restriped, run checks against the target
9417 	 * profile instead of the current one.
9418 	 */
9419 	ret = -1;
9420 
9421 	/*
9422 	 * index:
9423 	 *      0: raid10
9424 	 *      1: raid1
9425 	 *      2: dup
9426 	 *      3: raid0
9427 	 *      4: single
9428 	 */
9429 	target = get_restripe_target(root->fs_info, block_group->flags);
9430 	if (target) {
9431 		index = __get_raid_index(extended_to_chunk(target));
9432 	} else {
9433 		/*
9434 		 * this is just a balance, so if we were marked as full
9435 		 * we know there is no space for a new chunk
9436 		 */
9437 		if (full)
9438 			goto out;
9439 
9440 		index = get_block_group_index(block_group);
9441 	}
9442 
9443 	if (index == BTRFS_RAID_RAID10) {
9444 		dev_min = 4;
9445 		/* Divide by 2 */
9446 		min_free >>= 1;
9447 	} else if (index == BTRFS_RAID_RAID1) {
9448 		dev_min = 2;
9449 	} else if (index == BTRFS_RAID_DUP) {
9450 		/* Multiply by 2 */
9451 		min_free <<= 1;
9452 	} else if (index == BTRFS_RAID_RAID0) {
9453 		dev_min = fs_devices->rw_devices;
9454 		min_free = div64_u64(min_free, dev_min);
9455 	}
9456 
9457 	/* We need to do this so that we can look at pending chunks */
9458 	trans = btrfs_join_transaction(root);
9459 	if (IS_ERR(trans)) {
9460 		ret = PTR_ERR(trans);
9461 		goto out;
9462 	}
9463 
9464 	mutex_lock(&root->fs_info->chunk_mutex);
9465 	list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9466 		u64 dev_offset;
9467 
9468 		/*
9469 		 * check to make sure we can actually find a chunk with enough
9470 		 * space to fit our block group in.
9471 		 */
9472 		if (device->total_bytes > device->bytes_used + min_free &&
9473 		    !device->is_tgtdev_for_dev_replace) {
9474 			ret = find_free_dev_extent(trans, device, min_free,
9475 						   &dev_offset, NULL);
9476 			if (!ret)
9477 				dev_nr++;
9478 
9479 			if (dev_nr >= dev_min)
9480 				break;
9481 
9482 			ret = -1;
9483 		}
9484 	}
9485 	mutex_unlock(&root->fs_info->chunk_mutex);
9486 	btrfs_end_transaction(trans, root);
9487 out:
9488 	btrfs_put_block_group(block_group);
9489 	return ret;
9490 }
9491 
find_first_block_group(struct btrfs_root * root,struct btrfs_path * path,struct btrfs_key * key)9492 static int find_first_block_group(struct btrfs_root *root,
9493 		struct btrfs_path *path, struct btrfs_key *key)
9494 {
9495 	int ret = 0;
9496 	struct btrfs_key found_key;
9497 	struct extent_buffer *leaf;
9498 	struct btrfs_block_group_item bg;
9499 	u64 flags;
9500 	int slot;
9501 
9502 	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9503 	if (ret < 0)
9504 		goto out;
9505 
9506 	while (1) {
9507 		slot = path->slots[0];
9508 		leaf = path->nodes[0];
9509 		if (slot >= btrfs_header_nritems(leaf)) {
9510 			ret = btrfs_next_leaf(root, path);
9511 			if (ret == 0)
9512 				continue;
9513 			if (ret < 0)
9514 				goto out;
9515 			break;
9516 		}
9517 		btrfs_item_key_to_cpu(leaf, &found_key, slot);
9518 
9519 		if (found_key.objectid >= key->objectid &&
9520 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9521 			struct extent_map_tree *em_tree;
9522 			struct extent_map *em;
9523 
9524 			em_tree = &root->fs_info->mapping_tree.map_tree;
9525 			read_lock(&em_tree->lock);
9526 			em = lookup_extent_mapping(em_tree, found_key.objectid,
9527 						   found_key.offset);
9528 			read_unlock(&em_tree->lock);
9529 			if (!em) {
9530 				btrfs_err(root->fs_info,
9531 			"logical %llu len %llu found bg but no related chunk",
9532 					  found_key.objectid, found_key.offset);
9533 				ret = -ENOENT;
9534 			} else if (em->start != found_key.objectid ||
9535 				   em->len != found_key.offset) {
9536 				btrfs_err(root->fs_info,
9537 		"block group %llu len %llu mismatch with chunk %llu len %llu",
9538 					  found_key.objectid, found_key.offset,
9539 					  em->start, em->len);
9540 				ret = -EUCLEAN;
9541 			} else {
9542 				read_extent_buffer(leaf, &bg,
9543 					btrfs_item_ptr_offset(leaf, slot),
9544 					sizeof(bg));
9545 				flags = btrfs_block_group_flags(&bg) &
9546 					BTRFS_BLOCK_GROUP_TYPE_MASK;
9547 
9548 				if (flags != (em->map_lookup->type &
9549 					      BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9550 					btrfs_err(root->fs_info,
9551 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
9552 						found_key.objectid,
9553 						found_key.offset, flags,
9554 						(BTRFS_BLOCK_GROUP_TYPE_MASK &
9555 						 em->map_lookup->type));
9556 					ret = -EUCLEAN;
9557 				} else {
9558 					ret = 0;
9559 				}
9560 			}
9561 			free_extent_map(em);
9562 			goto out;
9563 		}
9564 		path->slots[0]++;
9565 	}
9566 out:
9567 	return ret;
9568 }
9569 
btrfs_put_block_group_cache(struct btrfs_fs_info * info)9570 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9571 {
9572 	struct btrfs_block_group_cache *block_group;
9573 	u64 last = 0;
9574 
9575 	while (1) {
9576 		struct inode *inode;
9577 
9578 		block_group = btrfs_lookup_first_block_group(info, last);
9579 		while (block_group) {
9580 			wait_block_group_cache_done(block_group);
9581 			spin_lock(&block_group->lock);
9582 			if (block_group->iref)
9583 				break;
9584 			spin_unlock(&block_group->lock);
9585 			block_group = next_block_group(info->tree_root,
9586 						       block_group);
9587 		}
9588 		if (!block_group) {
9589 			if (last == 0)
9590 				break;
9591 			last = 0;
9592 			continue;
9593 		}
9594 
9595 		inode = block_group->inode;
9596 		block_group->iref = 0;
9597 		block_group->inode = NULL;
9598 		spin_unlock(&block_group->lock);
9599 		iput(inode);
9600 		last = block_group->key.objectid + block_group->key.offset;
9601 		btrfs_put_block_group(block_group);
9602 	}
9603 }
9604 
btrfs_free_block_groups(struct btrfs_fs_info * info)9605 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9606 {
9607 	struct btrfs_block_group_cache *block_group;
9608 	struct btrfs_space_info *space_info;
9609 	struct btrfs_caching_control *caching_ctl;
9610 	struct rb_node *n;
9611 
9612 	down_write(&info->commit_root_sem);
9613 	while (!list_empty(&info->caching_block_groups)) {
9614 		caching_ctl = list_entry(info->caching_block_groups.next,
9615 					 struct btrfs_caching_control, list);
9616 		list_del(&caching_ctl->list);
9617 		put_caching_control(caching_ctl);
9618 	}
9619 	up_write(&info->commit_root_sem);
9620 
9621 	spin_lock(&info->unused_bgs_lock);
9622 	while (!list_empty(&info->unused_bgs)) {
9623 		block_group = list_first_entry(&info->unused_bgs,
9624 					       struct btrfs_block_group_cache,
9625 					       bg_list);
9626 		list_del_init(&block_group->bg_list);
9627 		btrfs_put_block_group(block_group);
9628 	}
9629 	spin_unlock(&info->unused_bgs_lock);
9630 
9631 	spin_lock(&info->block_group_cache_lock);
9632 	while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9633 		block_group = rb_entry(n, struct btrfs_block_group_cache,
9634 				       cache_node);
9635 		rb_erase(&block_group->cache_node,
9636 			 &info->block_group_cache_tree);
9637 		RB_CLEAR_NODE(&block_group->cache_node);
9638 		spin_unlock(&info->block_group_cache_lock);
9639 
9640 		down_write(&block_group->space_info->groups_sem);
9641 		list_del(&block_group->list);
9642 		up_write(&block_group->space_info->groups_sem);
9643 
9644 		if (block_group->cached == BTRFS_CACHE_STARTED)
9645 			wait_block_group_cache_done(block_group);
9646 
9647 		/*
9648 		 * We haven't cached this block group, which means we could
9649 		 * possibly have excluded extents on this block group.
9650 		 */
9651 		if (block_group->cached == BTRFS_CACHE_NO ||
9652 		    block_group->cached == BTRFS_CACHE_ERROR)
9653 			free_excluded_extents(info->extent_root, block_group);
9654 
9655 		btrfs_remove_free_space_cache(block_group);
9656 		btrfs_put_block_group(block_group);
9657 
9658 		spin_lock(&info->block_group_cache_lock);
9659 	}
9660 	spin_unlock(&info->block_group_cache_lock);
9661 
9662 	/* now that all the block groups are freed, go through and
9663 	 * free all the space_info structs.  This is only called during
9664 	 * the final stages of unmount, and so we know nobody is
9665 	 * using them.  We call synchronize_rcu() once before we start,
9666 	 * just to be on the safe side.
9667 	 */
9668 	synchronize_rcu();
9669 
9670 	release_global_block_rsv(info);
9671 
9672 	while (!list_empty(&info->space_info)) {
9673 		int i;
9674 
9675 		space_info = list_entry(info->space_info.next,
9676 					struct btrfs_space_info,
9677 					list);
9678 		if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9679 			if (WARN_ON(space_info->bytes_pinned > 0 ||
9680 			    space_info->bytes_reserved > 0 ||
9681 			    space_info->bytes_may_use > 0)) {
9682 				dump_space_info(space_info, 0, 0);
9683 			}
9684 		}
9685 		list_del(&space_info->list);
9686 		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9687 			struct kobject *kobj;
9688 			kobj = space_info->block_group_kobjs[i];
9689 			space_info->block_group_kobjs[i] = NULL;
9690 			if (kobj) {
9691 				kobject_del(kobj);
9692 				kobject_put(kobj);
9693 			}
9694 		}
9695 		kobject_del(&space_info->kobj);
9696 		kobject_put(&space_info->kobj);
9697 	}
9698 	return 0;
9699 }
9700 
__link_block_group(struct btrfs_space_info * space_info,struct btrfs_block_group_cache * cache)9701 static void __link_block_group(struct btrfs_space_info *space_info,
9702 			       struct btrfs_block_group_cache *cache)
9703 {
9704 	int index = get_block_group_index(cache);
9705 	bool first = false;
9706 
9707 	down_write(&space_info->groups_sem);
9708 	if (list_empty(&space_info->block_groups[index]))
9709 		first = true;
9710 	list_add_tail(&cache->list, &space_info->block_groups[index]);
9711 	up_write(&space_info->groups_sem);
9712 
9713 	if (first) {
9714 		struct raid_kobject *rkobj;
9715 		int ret;
9716 
9717 		rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9718 		if (!rkobj)
9719 			goto out_err;
9720 		rkobj->raid_type = index;
9721 		kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9722 		ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9723 				  "%s", get_raid_name(index));
9724 		if (ret) {
9725 			kobject_put(&rkobj->kobj);
9726 			goto out_err;
9727 		}
9728 		space_info->block_group_kobjs[index] = &rkobj->kobj;
9729 	}
9730 
9731 	return;
9732 out_err:
9733 	pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9734 }
9735 
9736 static struct btrfs_block_group_cache *
btrfs_create_block_group_cache(struct btrfs_root * root,u64 start,u64 size)9737 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9738 {
9739 	struct btrfs_block_group_cache *cache;
9740 
9741 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
9742 	if (!cache)
9743 		return NULL;
9744 
9745 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9746 					GFP_NOFS);
9747 	if (!cache->free_space_ctl) {
9748 		kfree(cache);
9749 		return NULL;
9750 	}
9751 
9752 	cache->key.objectid = start;
9753 	cache->key.offset = size;
9754 	cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9755 
9756 	cache->sectorsize = root->sectorsize;
9757 	cache->fs_info = root->fs_info;
9758 	cache->full_stripe_len = btrfs_full_stripe_len(root,
9759 					       &root->fs_info->mapping_tree,
9760 					       start);
9761 	atomic_set(&cache->count, 1);
9762 	spin_lock_init(&cache->lock);
9763 	init_rwsem(&cache->data_rwsem);
9764 	INIT_LIST_HEAD(&cache->list);
9765 	INIT_LIST_HEAD(&cache->cluster_list);
9766 	INIT_LIST_HEAD(&cache->bg_list);
9767 	INIT_LIST_HEAD(&cache->ro_list);
9768 	INIT_LIST_HEAD(&cache->dirty_list);
9769 	INIT_LIST_HEAD(&cache->io_list);
9770 	btrfs_init_free_space_ctl(cache);
9771 	atomic_set(&cache->trimming, 0);
9772 
9773 	return cache;
9774 }
9775 
9776 
9777 /*
9778  * Iterate all chunks and verify that each of them has the corresponding block
9779  * group
9780  */
check_chunk_block_group_mappings(struct btrfs_fs_info * fs_info)9781 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
9782 {
9783 	struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
9784 	struct extent_map *em;
9785 	struct btrfs_block_group_cache *bg;
9786 	u64 start = 0;
9787 	int ret = 0;
9788 
9789 	while (1) {
9790 		read_lock(&map_tree->map_tree.lock);
9791 		/*
9792 		 * lookup_extent_mapping will return the first extent map
9793 		 * intersecting the range, so setting @len to 1 is enough to
9794 		 * get the first chunk.
9795 		 */
9796 		em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
9797 		read_unlock(&map_tree->map_tree.lock);
9798 		if (!em)
9799 			break;
9800 
9801 		bg = btrfs_lookup_block_group(fs_info, em->start);
9802 		if (!bg) {
9803 			btrfs_err(fs_info,
9804 	"chunk start=%llu len=%llu doesn't have corresponding block group",
9805 				     em->start, em->len);
9806 			ret = -EUCLEAN;
9807 			free_extent_map(em);
9808 			break;
9809 		}
9810 		if (bg->key.objectid != em->start ||
9811 		    bg->key.offset != em->len ||
9812 		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
9813 		    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
9814 			btrfs_err(fs_info,
9815 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
9816 				em->start, em->len,
9817 				em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
9818 				bg->key.objectid, bg->key.offset,
9819 				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
9820 			ret = -EUCLEAN;
9821 			free_extent_map(em);
9822 			btrfs_put_block_group(bg);
9823 			break;
9824 		}
9825 		start = em->start + em->len;
9826 		free_extent_map(em);
9827 		btrfs_put_block_group(bg);
9828 	}
9829 	return ret;
9830 }
9831 
btrfs_read_block_groups(struct btrfs_root * root)9832 int btrfs_read_block_groups(struct btrfs_root *root)
9833 {
9834 	struct btrfs_path *path;
9835 	int ret;
9836 	struct btrfs_block_group_cache *cache;
9837 	struct btrfs_fs_info *info = root->fs_info;
9838 	struct btrfs_space_info *space_info;
9839 	struct btrfs_key key;
9840 	struct btrfs_key found_key;
9841 	struct extent_buffer *leaf;
9842 	int need_clear = 0;
9843 	u64 cache_gen;
9844 	u64 feature;
9845 	int mixed;
9846 
9847 	feature = btrfs_super_incompat_flags(info->super_copy);
9848 	mixed = !!(feature & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS);
9849 
9850 	root = info->extent_root;
9851 	key.objectid = 0;
9852 	key.offset = 0;
9853 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9854 	path = btrfs_alloc_path();
9855 	if (!path)
9856 		return -ENOMEM;
9857 	path->reada = 1;
9858 
9859 	cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9860 	if (btrfs_test_opt(root, SPACE_CACHE) &&
9861 	    btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9862 		need_clear = 1;
9863 	if (btrfs_test_opt(root, CLEAR_CACHE))
9864 		need_clear = 1;
9865 
9866 	while (1) {
9867 		ret = find_first_block_group(root, path, &key);
9868 		if (ret > 0)
9869 			break;
9870 		if (ret != 0)
9871 			goto error;
9872 
9873 		leaf = path->nodes[0];
9874 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9875 
9876 		cache = btrfs_create_block_group_cache(root, found_key.objectid,
9877 						       found_key.offset);
9878 		if (!cache) {
9879 			ret = -ENOMEM;
9880 			goto error;
9881 		}
9882 
9883 		if (need_clear) {
9884 			/*
9885 			 * When we mount with old space cache, we need to
9886 			 * set BTRFS_DC_CLEAR and set dirty flag.
9887 			 *
9888 			 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9889 			 *    truncate the old free space cache inode and
9890 			 *    setup a new one.
9891 			 * b) Setting 'dirty flag' makes sure that we flush
9892 			 *    the new space cache info onto disk.
9893 			 */
9894 			if (btrfs_test_opt(root, SPACE_CACHE))
9895 				cache->disk_cache_state = BTRFS_DC_CLEAR;
9896 		}
9897 
9898 		read_extent_buffer(leaf, &cache->item,
9899 				   btrfs_item_ptr_offset(leaf, path->slots[0]),
9900 				   sizeof(cache->item));
9901 		cache->flags = btrfs_block_group_flags(&cache->item);
9902 		if (!mixed &&
9903 		    ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
9904 		    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
9905 			btrfs_err(info,
9906 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
9907 				  cache->key.objectid);
9908 			btrfs_put_block_group(cache);
9909 			ret = -EINVAL;
9910 			goto error;
9911 		}
9912 
9913 		key.objectid = found_key.objectid + found_key.offset;
9914 		btrfs_release_path(path);
9915 
9916 		/*
9917 		 * We need to exclude the super stripes now so that the space
9918 		 * info has super bytes accounted for, otherwise we'll think
9919 		 * we have more space than we actually do.
9920 		 */
9921 		ret = exclude_super_stripes(root, cache);
9922 		if (ret) {
9923 			/*
9924 			 * We may have excluded something, so call this just in
9925 			 * case.
9926 			 */
9927 			free_excluded_extents(root, cache);
9928 			btrfs_put_block_group(cache);
9929 			goto error;
9930 		}
9931 
9932 		/*
9933 		 * check for two cases, either we are full, and therefore
9934 		 * don't need to bother with the caching work since we won't
9935 		 * find any space, or we are empty, and we can just add all
9936 		 * the space in and be done with it.  This saves us _alot_ of
9937 		 * time, particularly in the full case.
9938 		 */
9939 		if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9940 			cache->last_byte_to_unpin = (u64)-1;
9941 			cache->cached = BTRFS_CACHE_FINISHED;
9942 			free_excluded_extents(root, cache);
9943 		} else if (btrfs_block_group_used(&cache->item) == 0) {
9944 			cache->last_byte_to_unpin = (u64)-1;
9945 			cache->cached = BTRFS_CACHE_FINISHED;
9946 			add_new_free_space(cache, root->fs_info,
9947 					   found_key.objectid,
9948 					   found_key.objectid +
9949 					   found_key.offset);
9950 			free_excluded_extents(root, cache);
9951 		}
9952 
9953 		ret = btrfs_add_block_group_cache(root->fs_info, cache);
9954 		if (ret) {
9955 			btrfs_remove_free_space_cache(cache);
9956 			btrfs_put_block_group(cache);
9957 			goto error;
9958 		}
9959 
9960 		ret = update_space_info(info, cache->flags, found_key.offset,
9961 					btrfs_block_group_used(&cache->item),
9962 					&space_info);
9963 		if (ret) {
9964 			btrfs_remove_free_space_cache(cache);
9965 			spin_lock(&info->block_group_cache_lock);
9966 			rb_erase(&cache->cache_node,
9967 				 &info->block_group_cache_tree);
9968 			RB_CLEAR_NODE(&cache->cache_node);
9969 			spin_unlock(&info->block_group_cache_lock);
9970 			btrfs_put_block_group(cache);
9971 			goto error;
9972 		}
9973 
9974 		cache->space_info = space_info;
9975 		spin_lock(&cache->space_info->lock);
9976 		cache->space_info->bytes_readonly += cache->bytes_super;
9977 		spin_unlock(&cache->space_info->lock);
9978 
9979 		__link_block_group(space_info, cache);
9980 
9981 		set_avail_alloc_bits(root->fs_info, cache->flags);
9982 		if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9983 			inc_block_group_ro(cache, 1);
9984 		} else if (btrfs_block_group_used(&cache->item) == 0) {
9985 			spin_lock(&info->unused_bgs_lock);
9986 			/* Should always be true but just in case. */
9987 			if (list_empty(&cache->bg_list)) {
9988 				btrfs_get_block_group(cache);
9989 				list_add_tail(&cache->bg_list,
9990 					      &info->unused_bgs);
9991 			}
9992 			spin_unlock(&info->unused_bgs_lock);
9993 		}
9994 	}
9995 
9996 	list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9997 		if (!(get_alloc_profile(root, space_info->flags) &
9998 		      (BTRFS_BLOCK_GROUP_RAID10 |
9999 		       BTRFS_BLOCK_GROUP_RAID1 |
10000 		       BTRFS_BLOCK_GROUP_RAID5 |
10001 		       BTRFS_BLOCK_GROUP_RAID6 |
10002 		       BTRFS_BLOCK_GROUP_DUP)))
10003 			continue;
10004 		/*
10005 		 * avoid allocating from un-mirrored block group if there are
10006 		 * mirrored block groups.
10007 		 */
10008 		list_for_each_entry(cache,
10009 				&space_info->block_groups[BTRFS_RAID_RAID0],
10010 				list)
10011 			inc_block_group_ro(cache, 1);
10012 		list_for_each_entry(cache,
10013 				&space_info->block_groups[BTRFS_RAID_SINGLE],
10014 				list)
10015 			inc_block_group_ro(cache, 1);
10016 	}
10017 
10018 	init_global_block_rsv(info);
10019 	ret = check_chunk_block_group_mappings(info);
10020 error:
10021 	btrfs_free_path(path);
10022 	return ret;
10023 }
10024 
btrfs_create_pending_block_groups(struct btrfs_trans_handle * trans,struct btrfs_root * root)10025 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
10026 				       struct btrfs_root *root)
10027 {
10028 	struct btrfs_block_group_cache *block_group;
10029 	struct btrfs_root *extent_root = root->fs_info->extent_root;
10030 	struct btrfs_block_group_item item;
10031 	struct btrfs_key key;
10032 	int ret = 0;
10033 	bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
10034 
10035 	trans->can_flush_pending_bgs = false;
10036 	while (!list_empty(&trans->new_bgs)) {
10037 		block_group = list_first_entry(&trans->new_bgs,
10038 					       struct btrfs_block_group_cache,
10039 					       bg_list);
10040 		if (ret)
10041 			goto next;
10042 
10043 		spin_lock(&block_group->lock);
10044 		memcpy(&item, &block_group->item, sizeof(item));
10045 		memcpy(&key, &block_group->key, sizeof(key));
10046 		spin_unlock(&block_group->lock);
10047 
10048 		ret = btrfs_insert_item(trans, extent_root, &key, &item,
10049 					sizeof(item));
10050 		if (ret)
10051 			btrfs_abort_transaction(trans, extent_root, ret);
10052 		ret = btrfs_finish_chunk_alloc(trans, extent_root,
10053 					       key.objectid, key.offset);
10054 		if (ret)
10055 			btrfs_abort_transaction(trans, extent_root, ret);
10056 next:
10057 		list_del_init(&block_group->bg_list);
10058 	}
10059 	trans->can_flush_pending_bgs = can_flush_pending_bgs;
10060 }
10061 
btrfs_make_block_group(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 bytes_used,u64 type,u64 chunk_objectid,u64 chunk_offset,u64 size)10062 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
10063 			   struct btrfs_root *root, u64 bytes_used,
10064 			   u64 type, u64 chunk_objectid, u64 chunk_offset,
10065 			   u64 size)
10066 {
10067 	int ret;
10068 	struct btrfs_root *extent_root;
10069 	struct btrfs_block_group_cache *cache;
10070 
10071 	extent_root = root->fs_info->extent_root;
10072 
10073 	btrfs_set_log_full_commit(root->fs_info, trans);
10074 
10075 	cache = btrfs_create_block_group_cache(root, chunk_offset, size);
10076 	if (!cache)
10077 		return -ENOMEM;
10078 
10079 	btrfs_set_block_group_used(&cache->item, bytes_used);
10080 	btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
10081 	btrfs_set_block_group_flags(&cache->item, type);
10082 
10083 	cache->flags = type;
10084 	cache->last_byte_to_unpin = (u64)-1;
10085 	cache->cached = BTRFS_CACHE_FINISHED;
10086 	ret = exclude_super_stripes(root, cache);
10087 	if (ret) {
10088 		/*
10089 		 * We may have excluded something, so call this just in
10090 		 * case.
10091 		 */
10092 		free_excluded_extents(root, cache);
10093 		btrfs_put_block_group(cache);
10094 		return ret;
10095 	}
10096 
10097 	add_new_free_space(cache, root->fs_info, chunk_offset,
10098 			   chunk_offset + size);
10099 
10100 	free_excluded_extents(root, cache);
10101 
10102 #ifdef CONFIG_BTRFS_DEBUG
10103 	if (btrfs_should_fragment_free_space(root, cache)) {
10104 		u64 new_bytes_used = size - bytes_used;
10105 
10106 		bytes_used += new_bytes_used >> 1;
10107 		fragment_free_space(root, cache);
10108 	}
10109 #endif
10110 	/*
10111 	 * Call to ensure the corresponding space_info object is created and
10112 	 * assigned to our block group, but don't update its counters just yet.
10113 	 * We want our bg to be added to the rbtree with its ->space_info set.
10114 	 */
10115 	ret = update_space_info(root->fs_info, cache->flags, 0, 0,
10116 				&cache->space_info);
10117 	if (ret) {
10118 		btrfs_remove_free_space_cache(cache);
10119 		btrfs_put_block_group(cache);
10120 		return ret;
10121 	}
10122 
10123 	ret = btrfs_add_block_group_cache(root->fs_info, cache);
10124 	if (ret) {
10125 		btrfs_remove_free_space_cache(cache);
10126 		btrfs_put_block_group(cache);
10127 		return ret;
10128 	}
10129 
10130 	/*
10131 	 * Now that our block group has its ->space_info set and is inserted in
10132 	 * the rbtree, update the space info's counters.
10133 	 */
10134 	ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10135 				&cache->space_info);
10136 	if (ret) {
10137 		btrfs_remove_free_space_cache(cache);
10138 		spin_lock(&root->fs_info->block_group_cache_lock);
10139 		rb_erase(&cache->cache_node,
10140 			 &root->fs_info->block_group_cache_tree);
10141 		RB_CLEAR_NODE(&cache->cache_node);
10142 		spin_unlock(&root->fs_info->block_group_cache_lock);
10143 		btrfs_put_block_group(cache);
10144 		return ret;
10145 	}
10146 	update_global_block_rsv(root->fs_info);
10147 
10148 	spin_lock(&cache->space_info->lock);
10149 	cache->space_info->bytes_readonly += cache->bytes_super;
10150 	spin_unlock(&cache->space_info->lock);
10151 
10152 	__link_block_group(cache->space_info, cache);
10153 
10154 	list_add_tail(&cache->bg_list, &trans->new_bgs);
10155 
10156 	set_avail_alloc_bits(extent_root->fs_info, type);
10157 
10158 	return 0;
10159 }
10160 
clear_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)10161 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10162 {
10163 	u64 extra_flags = chunk_to_extended(flags) &
10164 				BTRFS_EXTENDED_PROFILE_MASK;
10165 
10166 	write_seqlock(&fs_info->profiles_lock);
10167 	if (flags & BTRFS_BLOCK_GROUP_DATA)
10168 		fs_info->avail_data_alloc_bits &= ~extra_flags;
10169 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
10170 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10171 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10172 		fs_info->avail_system_alloc_bits &= ~extra_flags;
10173 	write_sequnlock(&fs_info->profiles_lock);
10174 }
10175 
btrfs_remove_block_group(struct btrfs_trans_handle * trans,struct btrfs_root * root,u64 group_start,struct extent_map * em)10176 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10177 			     struct btrfs_root *root, u64 group_start,
10178 			     struct extent_map *em)
10179 {
10180 	struct btrfs_path *path;
10181 	struct btrfs_block_group_cache *block_group;
10182 	struct btrfs_free_cluster *cluster;
10183 	struct btrfs_root *tree_root = root->fs_info->tree_root;
10184 	struct btrfs_key key;
10185 	struct inode *inode;
10186 	struct kobject *kobj = NULL;
10187 	int ret;
10188 	int index;
10189 	int factor;
10190 	struct btrfs_caching_control *caching_ctl = NULL;
10191 	bool remove_em;
10192 
10193 	root = root->fs_info->extent_root;
10194 
10195 	block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10196 	BUG_ON(!block_group);
10197 	BUG_ON(!block_group->ro);
10198 
10199 	/*
10200 	 * Free the reserved super bytes from this block group before
10201 	 * remove it.
10202 	 */
10203 	free_excluded_extents(root, block_group);
10204 
10205 	memcpy(&key, &block_group->key, sizeof(key));
10206 	index = get_block_group_index(block_group);
10207 	if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10208 				  BTRFS_BLOCK_GROUP_RAID1 |
10209 				  BTRFS_BLOCK_GROUP_RAID10))
10210 		factor = 2;
10211 	else
10212 		factor = 1;
10213 
10214 	/* make sure this block group isn't part of an allocation cluster */
10215 	cluster = &root->fs_info->data_alloc_cluster;
10216 	spin_lock(&cluster->refill_lock);
10217 	btrfs_return_cluster_to_free_space(block_group, cluster);
10218 	spin_unlock(&cluster->refill_lock);
10219 
10220 	/*
10221 	 * make sure this block group isn't part of a metadata
10222 	 * allocation cluster
10223 	 */
10224 	cluster = &root->fs_info->meta_alloc_cluster;
10225 	spin_lock(&cluster->refill_lock);
10226 	btrfs_return_cluster_to_free_space(block_group, cluster);
10227 	spin_unlock(&cluster->refill_lock);
10228 
10229 	path = btrfs_alloc_path();
10230 	if (!path) {
10231 		ret = -ENOMEM;
10232 		goto out;
10233 	}
10234 
10235 	/*
10236 	 * get the inode first so any iput calls done for the io_list
10237 	 * aren't the final iput (no unlinks allowed now)
10238 	 */
10239 	inode = lookup_free_space_inode(tree_root, block_group, path);
10240 
10241 	mutex_lock(&trans->transaction->cache_write_mutex);
10242 	/*
10243 	 * make sure our free spache cache IO is done before remove the
10244 	 * free space inode
10245 	 */
10246 	spin_lock(&trans->transaction->dirty_bgs_lock);
10247 	if (!list_empty(&block_group->io_list)) {
10248 		list_del_init(&block_group->io_list);
10249 
10250 		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10251 
10252 		spin_unlock(&trans->transaction->dirty_bgs_lock);
10253 		btrfs_wait_cache_io(root, trans, block_group,
10254 				    &block_group->io_ctl, path,
10255 				    block_group->key.objectid);
10256 		btrfs_put_block_group(block_group);
10257 		spin_lock(&trans->transaction->dirty_bgs_lock);
10258 	}
10259 
10260 	if (!list_empty(&block_group->dirty_list)) {
10261 		list_del_init(&block_group->dirty_list);
10262 		btrfs_put_block_group(block_group);
10263 	}
10264 	spin_unlock(&trans->transaction->dirty_bgs_lock);
10265 	mutex_unlock(&trans->transaction->cache_write_mutex);
10266 
10267 	if (!IS_ERR(inode)) {
10268 		ret = btrfs_orphan_add(trans, inode);
10269 		if (ret) {
10270 			btrfs_add_delayed_iput(inode);
10271 			goto out;
10272 		}
10273 		clear_nlink(inode);
10274 		/* One for the block groups ref */
10275 		spin_lock(&block_group->lock);
10276 		if (block_group->iref) {
10277 			block_group->iref = 0;
10278 			block_group->inode = NULL;
10279 			spin_unlock(&block_group->lock);
10280 			iput(inode);
10281 		} else {
10282 			spin_unlock(&block_group->lock);
10283 		}
10284 		/* One for our lookup ref */
10285 		btrfs_add_delayed_iput(inode);
10286 	}
10287 
10288 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10289 	key.offset = block_group->key.objectid;
10290 	key.type = 0;
10291 
10292 	ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10293 	if (ret < 0)
10294 		goto out;
10295 	if (ret > 0)
10296 		btrfs_release_path(path);
10297 	if (ret == 0) {
10298 		ret = btrfs_del_item(trans, tree_root, path);
10299 		if (ret)
10300 			goto out;
10301 		btrfs_release_path(path);
10302 	}
10303 
10304 	spin_lock(&root->fs_info->block_group_cache_lock);
10305 	rb_erase(&block_group->cache_node,
10306 		 &root->fs_info->block_group_cache_tree);
10307 	RB_CLEAR_NODE(&block_group->cache_node);
10308 
10309 	if (root->fs_info->first_logical_byte == block_group->key.objectid)
10310 		root->fs_info->first_logical_byte = (u64)-1;
10311 	spin_unlock(&root->fs_info->block_group_cache_lock);
10312 
10313 	down_write(&block_group->space_info->groups_sem);
10314 	/*
10315 	 * we must use list_del_init so people can check to see if they
10316 	 * are still on the list after taking the semaphore
10317 	 */
10318 	list_del_init(&block_group->list);
10319 	if (list_empty(&block_group->space_info->block_groups[index])) {
10320 		kobj = block_group->space_info->block_group_kobjs[index];
10321 		block_group->space_info->block_group_kobjs[index] = NULL;
10322 		clear_avail_alloc_bits(root->fs_info, block_group->flags);
10323 	}
10324 	up_write(&block_group->space_info->groups_sem);
10325 	if (kobj) {
10326 		kobject_del(kobj);
10327 		kobject_put(kobj);
10328 	}
10329 
10330 	if (block_group->has_caching_ctl)
10331 		caching_ctl = get_caching_control(block_group);
10332 	if (block_group->cached == BTRFS_CACHE_STARTED)
10333 		wait_block_group_cache_done(block_group);
10334 	if (block_group->has_caching_ctl) {
10335 		down_write(&root->fs_info->commit_root_sem);
10336 		if (!caching_ctl) {
10337 			struct btrfs_caching_control *ctl;
10338 
10339 			list_for_each_entry(ctl,
10340 				    &root->fs_info->caching_block_groups, list)
10341 				if (ctl->block_group == block_group) {
10342 					caching_ctl = ctl;
10343 					atomic_inc(&caching_ctl->count);
10344 					break;
10345 				}
10346 		}
10347 		if (caching_ctl)
10348 			list_del_init(&caching_ctl->list);
10349 		up_write(&root->fs_info->commit_root_sem);
10350 		if (caching_ctl) {
10351 			/* Once for the caching bgs list and once for us. */
10352 			put_caching_control(caching_ctl);
10353 			put_caching_control(caching_ctl);
10354 		}
10355 	}
10356 
10357 	spin_lock(&trans->transaction->dirty_bgs_lock);
10358 	if (!list_empty(&block_group->dirty_list)) {
10359 		WARN_ON(1);
10360 	}
10361 	if (!list_empty(&block_group->io_list)) {
10362 		WARN_ON(1);
10363 	}
10364 	spin_unlock(&trans->transaction->dirty_bgs_lock);
10365 	btrfs_remove_free_space_cache(block_group);
10366 
10367 	spin_lock(&block_group->space_info->lock);
10368 	list_del_init(&block_group->ro_list);
10369 
10370 	if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10371 		WARN_ON(block_group->space_info->total_bytes
10372 			< block_group->key.offset);
10373 		WARN_ON(block_group->space_info->bytes_readonly
10374 			< block_group->key.offset);
10375 		WARN_ON(block_group->space_info->disk_total
10376 			< block_group->key.offset * factor);
10377 	}
10378 	block_group->space_info->total_bytes -= block_group->key.offset;
10379 	block_group->space_info->bytes_readonly -= block_group->key.offset;
10380 	block_group->space_info->disk_total -= block_group->key.offset * factor;
10381 
10382 	spin_unlock(&block_group->space_info->lock);
10383 
10384 	memcpy(&key, &block_group->key, sizeof(key));
10385 
10386 	lock_chunks(root);
10387 	if (!list_empty(&em->list)) {
10388 		/* We're in the transaction->pending_chunks list. */
10389 		free_extent_map(em);
10390 	}
10391 	spin_lock(&block_group->lock);
10392 	block_group->removed = 1;
10393 	/*
10394 	 * At this point trimming can't start on this block group, because we
10395 	 * removed the block group from the tree fs_info->block_group_cache_tree
10396 	 * so no one can't find it anymore and even if someone already got this
10397 	 * block group before we removed it from the rbtree, they have already
10398 	 * incremented block_group->trimming - if they didn't, they won't find
10399 	 * any free space entries because we already removed them all when we
10400 	 * called btrfs_remove_free_space_cache().
10401 	 *
10402 	 * And we must not remove the extent map from the fs_info->mapping_tree
10403 	 * to prevent the same logical address range and physical device space
10404 	 * ranges from being reused for a new block group. This is because our
10405 	 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10406 	 * completely transactionless, so while it is trimming a range the
10407 	 * currently running transaction might finish and a new one start,
10408 	 * allowing for new block groups to be created that can reuse the same
10409 	 * physical device locations unless we take this special care.
10410 	 *
10411 	 * There may also be an implicit trim operation if the file system
10412 	 * is mounted with -odiscard. The same protections must remain
10413 	 * in place until the extents have been discarded completely when
10414 	 * the transaction commit has completed.
10415 	 */
10416 	remove_em = (atomic_read(&block_group->trimming) == 0);
10417 	/*
10418 	 * Make sure a trimmer task always sees the em in the pinned_chunks list
10419 	 * if it sees block_group->removed == 1 (needs to lock block_group->lock
10420 	 * before checking block_group->removed).
10421 	 */
10422 	if (!remove_em) {
10423 		/*
10424 		 * Our em might be in trans->transaction->pending_chunks which
10425 		 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10426 		 * and so is the fs_info->pinned_chunks list.
10427 		 *
10428 		 * So at this point we must be holding the chunk_mutex to avoid
10429 		 * any races with chunk allocation (more specifically at
10430 		 * volumes.c:contains_pending_extent()), to ensure it always
10431 		 * sees the em, either in the pending_chunks list or in the
10432 		 * pinned_chunks list.
10433 		 */
10434 		list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10435 	}
10436 	spin_unlock(&block_group->lock);
10437 
10438 	if (remove_em) {
10439 		struct extent_map_tree *em_tree;
10440 
10441 		em_tree = &root->fs_info->mapping_tree.map_tree;
10442 		write_lock(&em_tree->lock);
10443 		/*
10444 		 * The em might be in the pending_chunks list, so make sure the
10445 		 * chunk mutex is locked, since remove_extent_mapping() will
10446 		 * delete us from that list.
10447 		 */
10448 		remove_extent_mapping(em_tree, em);
10449 		write_unlock(&em_tree->lock);
10450 		/* once for the tree */
10451 		free_extent_map(em);
10452 	}
10453 
10454 	unlock_chunks(root);
10455 
10456 	btrfs_put_block_group(block_group);
10457 	btrfs_put_block_group(block_group);
10458 
10459 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10460 	if (ret > 0)
10461 		ret = -EIO;
10462 	if (ret < 0)
10463 		goto out;
10464 
10465 	ret = btrfs_del_item(trans, root, path);
10466 out:
10467 	btrfs_free_path(path);
10468 	return ret;
10469 }
10470 
10471 struct btrfs_trans_handle *
btrfs_start_trans_remove_block_group(struct btrfs_fs_info * fs_info,const u64 chunk_offset)10472 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10473 				     const u64 chunk_offset)
10474 {
10475 	struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10476 	struct extent_map *em;
10477 	struct map_lookup *map;
10478 	unsigned int num_items;
10479 
10480 	read_lock(&em_tree->lock);
10481 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10482 	read_unlock(&em_tree->lock);
10483 	ASSERT(em && em->start == chunk_offset);
10484 
10485 	/*
10486 	 * We need to reserve 3 + N units from the metadata space info in order
10487 	 * to remove a block group (done at btrfs_remove_chunk() and at
10488 	 * btrfs_remove_block_group()), which are used for:
10489 	 *
10490 	 * 1 unit for adding the free space inode's orphan (located in the tree
10491 	 * of tree roots).
10492 	 * 1 unit for deleting the block group item (located in the extent
10493 	 * tree).
10494 	 * 1 unit for deleting the free space item (located in tree of tree
10495 	 * roots).
10496 	 * N units for deleting N device extent items corresponding to each
10497 	 * stripe (located in the device tree).
10498 	 *
10499 	 * In order to remove a block group we also need to reserve units in the
10500 	 * system space info in order to update the chunk tree (update one or
10501 	 * more device items and remove one chunk item), but this is done at
10502 	 * btrfs_remove_chunk() through a call to check_system_chunk().
10503 	 */
10504 	map = em->map_lookup;
10505 	num_items = 3 + map->num_stripes;
10506 	free_extent_map(em);
10507 
10508 	return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10509 							   num_items, 1);
10510 }
10511 
10512 /*
10513  * Process the unused_bgs list and remove any that don't have any allocated
10514  * space inside of them.
10515  */
btrfs_delete_unused_bgs(struct btrfs_fs_info * fs_info)10516 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10517 {
10518 	struct btrfs_block_group_cache *block_group;
10519 	struct btrfs_space_info *space_info;
10520 	struct btrfs_root *root = fs_info->extent_root;
10521 	struct btrfs_trans_handle *trans;
10522 	int ret = 0;
10523 
10524 	if (!fs_info->open)
10525 		return;
10526 
10527 	spin_lock(&fs_info->unused_bgs_lock);
10528 	while (!list_empty(&fs_info->unused_bgs)) {
10529 		u64 start, end;
10530 		int trimming;
10531 
10532 		block_group = list_first_entry(&fs_info->unused_bgs,
10533 					       struct btrfs_block_group_cache,
10534 					       bg_list);
10535 		list_del_init(&block_group->bg_list);
10536 
10537 		space_info = block_group->space_info;
10538 
10539 		if (ret || btrfs_mixed_space_info(space_info)) {
10540 			btrfs_put_block_group(block_group);
10541 			continue;
10542 		}
10543 		spin_unlock(&fs_info->unused_bgs_lock);
10544 
10545 		mutex_lock(&fs_info->delete_unused_bgs_mutex);
10546 
10547 		/* Don't want to race with allocators so take the groups_sem */
10548 		down_write(&space_info->groups_sem);
10549 		spin_lock(&block_group->lock);
10550 		if (block_group->reserved || block_group->pinned ||
10551 		    btrfs_block_group_used(&block_group->item) ||
10552 		    block_group->ro ||
10553 		    list_is_singular(&block_group->list)) {
10554 			/*
10555 			 * We want to bail if we made new allocations or have
10556 			 * outstanding allocations in this block group.  We do
10557 			 * the ro check in case balance is currently acting on
10558 			 * this block group.
10559 			 */
10560 			spin_unlock(&block_group->lock);
10561 			up_write(&space_info->groups_sem);
10562 			goto next;
10563 		}
10564 		spin_unlock(&block_group->lock);
10565 
10566 		/* We don't want to force the issue, only flip if it's ok. */
10567 		ret = inc_block_group_ro(block_group, 0);
10568 		up_write(&space_info->groups_sem);
10569 		if (ret < 0) {
10570 			ret = 0;
10571 			goto next;
10572 		}
10573 
10574 		/*
10575 		 * Want to do this before we do anything else so we can recover
10576 		 * properly if we fail to join the transaction.
10577 		 */
10578 		trans = btrfs_start_trans_remove_block_group(fs_info,
10579 						     block_group->key.objectid);
10580 		if (IS_ERR(trans)) {
10581 			btrfs_dec_block_group_ro(root, block_group);
10582 			ret = PTR_ERR(trans);
10583 			goto next;
10584 		}
10585 
10586 		/*
10587 		 * We could have pending pinned extents for this block group,
10588 		 * just delete them, we don't care about them anymore.
10589 		 */
10590 		start = block_group->key.objectid;
10591 		end = start + block_group->key.offset - 1;
10592 		/*
10593 		 * Hold the unused_bg_unpin_mutex lock to avoid racing with
10594 		 * btrfs_finish_extent_commit(). If we are at transaction N,
10595 		 * another task might be running finish_extent_commit() for the
10596 		 * previous transaction N - 1, and have seen a range belonging
10597 		 * to the block group in freed_extents[] before we were able to
10598 		 * clear the whole block group range from freed_extents[]. This
10599 		 * means that task can lookup for the block group after we
10600 		 * unpinned it from freed_extents[] and removed it, leading to
10601 		 * a BUG_ON() at btrfs_unpin_extent_range().
10602 		 */
10603 		mutex_lock(&fs_info->unused_bg_unpin_mutex);
10604 		ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10605 				  EXTENT_DIRTY, GFP_NOFS);
10606 		if (ret) {
10607 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10608 			btrfs_dec_block_group_ro(root, block_group);
10609 			goto end_trans;
10610 		}
10611 		ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10612 				  EXTENT_DIRTY, GFP_NOFS);
10613 		if (ret) {
10614 			mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10615 			btrfs_dec_block_group_ro(root, block_group);
10616 			goto end_trans;
10617 		}
10618 		mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10619 
10620 		/* Reset pinned so btrfs_put_block_group doesn't complain */
10621 		spin_lock(&space_info->lock);
10622 		spin_lock(&block_group->lock);
10623 
10624 		space_info->bytes_pinned -= block_group->pinned;
10625 		space_info->bytes_readonly += block_group->pinned;
10626 		percpu_counter_add(&space_info->total_bytes_pinned,
10627 				   -block_group->pinned);
10628 		block_group->pinned = 0;
10629 
10630 		spin_unlock(&block_group->lock);
10631 		spin_unlock(&space_info->lock);
10632 
10633 		/* DISCARD can flip during remount */
10634 		trimming = btrfs_test_opt(root, DISCARD);
10635 
10636 		/* Implicit trim during transaction commit. */
10637 		if (trimming)
10638 			btrfs_get_block_group_trimming(block_group);
10639 
10640 		/*
10641 		 * Btrfs_remove_chunk will abort the transaction if things go
10642 		 * horribly wrong.
10643 		 */
10644 		ret = btrfs_remove_chunk(trans, root,
10645 					 block_group->key.objectid);
10646 
10647 		if (ret) {
10648 			if (trimming)
10649 				btrfs_put_block_group_trimming(block_group);
10650 			goto end_trans;
10651 		}
10652 
10653 		/*
10654 		 * If we're not mounted with -odiscard, we can just forget
10655 		 * about this block group. Otherwise we'll need to wait
10656 		 * until transaction commit to do the actual discard.
10657 		 */
10658 		if (trimming) {
10659 			spin_lock(&fs_info->unused_bgs_lock);
10660 			/*
10661 			 * A concurrent scrub might have added us to the list
10662 			 * fs_info->unused_bgs, so use a list_move operation
10663 			 * to add the block group to the deleted_bgs list.
10664 			 */
10665 			list_move(&block_group->bg_list,
10666 				  &trans->transaction->deleted_bgs);
10667 			spin_unlock(&fs_info->unused_bgs_lock);
10668 			btrfs_get_block_group(block_group);
10669 		}
10670 end_trans:
10671 		btrfs_end_transaction(trans, root);
10672 next:
10673 		mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10674 		btrfs_put_block_group(block_group);
10675 		spin_lock(&fs_info->unused_bgs_lock);
10676 	}
10677 	spin_unlock(&fs_info->unused_bgs_lock);
10678 }
10679 
btrfs_init_space_info(struct btrfs_fs_info * fs_info)10680 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10681 {
10682 	struct btrfs_space_info *space_info;
10683 	struct btrfs_super_block *disk_super;
10684 	u64 features;
10685 	u64 flags;
10686 	int mixed = 0;
10687 	int ret;
10688 
10689 	disk_super = fs_info->super_copy;
10690 	if (!btrfs_super_root(disk_super))
10691 		return -EINVAL;
10692 
10693 	features = btrfs_super_incompat_flags(disk_super);
10694 	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10695 		mixed = 1;
10696 
10697 	flags = BTRFS_BLOCK_GROUP_SYSTEM;
10698 	ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10699 	if (ret)
10700 		goto out;
10701 
10702 	if (mixed) {
10703 		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10704 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10705 	} else {
10706 		flags = BTRFS_BLOCK_GROUP_METADATA;
10707 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10708 		if (ret)
10709 			goto out;
10710 
10711 		flags = BTRFS_BLOCK_GROUP_DATA;
10712 		ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10713 	}
10714 out:
10715 	return ret;
10716 }
10717 
btrfs_error_unpin_extent_range(struct btrfs_root * root,u64 start,u64 end)10718 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10719 {
10720 	return unpin_extent_range(root, start, end, false);
10721 }
10722 
10723 /*
10724  * It used to be that old block groups would be left around forever.
10725  * Iterating over them would be enough to trim unused space.  Since we
10726  * now automatically remove them, we also need to iterate over unallocated
10727  * space.
10728  *
10729  * We don't want a transaction for this since the discard may take a
10730  * substantial amount of time.  We don't require that a transaction be
10731  * running, but we do need to take a running transaction into account
10732  * to ensure that we're not discarding chunks that were released in
10733  * the current transaction.
10734  *
10735  * Holding the chunks lock will prevent other threads from allocating
10736  * or releasing chunks, but it won't prevent a running transaction
10737  * from committing and releasing the memory that the pending chunks
10738  * list head uses.  For that, we need to take a reference to the
10739  * transaction.
10740  */
btrfs_trim_free_extents(struct btrfs_device * device,struct fstrim_range * range,u64 * trimmed)10741 static int btrfs_trim_free_extents(struct btrfs_device *device,
10742 				   struct fstrim_range *range, u64 *trimmed)
10743 {
10744 	u64 start = range->start, len = 0;
10745 	int ret;
10746 
10747 	*trimmed = 0;
10748 
10749 	/* Discard not supported = nothing to do. */
10750 	if (!blk_queue_discard(bdev_get_queue(device->bdev)))
10751 		return 0;
10752 
10753 	/* Not writeable = nothing to do. */
10754 	if (!device->writeable)
10755 		return 0;
10756 
10757 	/* No free space = nothing to do. */
10758 	if (device->total_bytes <= device->bytes_used)
10759 		return 0;
10760 
10761 	ret = 0;
10762 
10763 	while (1) {
10764 		struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10765 		struct btrfs_transaction *trans;
10766 		u64 bytes;
10767 
10768 		ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10769 		if (ret)
10770 			return ret;
10771 
10772 		down_read(&fs_info->commit_root_sem);
10773 
10774 		spin_lock(&fs_info->trans_lock);
10775 		trans = fs_info->running_transaction;
10776 		if (trans)
10777 			atomic_inc(&trans->use_count);
10778 		spin_unlock(&fs_info->trans_lock);
10779 
10780 		ret = find_free_dev_extent_start(trans, device, range->minlen,
10781 						 start, &start, &len);
10782 		if (trans)
10783 			btrfs_put_transaction(trans);
10784 
10785 		if (ret) {
10786 			up_read(&fs_info->commit_root_sem);
10787 			mutex_unlock(&fs_info->chunk_mutex);
10788 			if (ret == -ENOSPC)
10789 				ret = 0;
10790 			break;
10791 		}
10792 
10793 		/* If we are out of the passed range break */
10794 		if (start > range->start + range->len - 1) {
10795 			mutex_unlock(&fs_info->chunk_mutex);
10796 			ret = 0;
10797 			break;
10798 		}
10799 
10800 		start = max(range->start, start);
10801 		len = min(range->len, len);
10802 
10803 		ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10804 		up_read(&fs_info->commit_root_sem);
10805 		mutex_unlock(&fs_info->chunk_mutex);
10806 
10807 		if (ret)
10808 			break;
10809 
10810 		start += len;
10811 		*trimmed += bytes;
10812 
10813 		/* We've trimmed enough */
10814 		if (*trimmed >= range->len)
10815 			break;
10816 
10817 		if (fatal_signal_pending(current)) {
10818 			ret = -ERESTARTSYS;
10819 			break;
10820 		}
10821 
10822 		cond_resched();
10823 	}
10824 
10825 	return ret;
10826 }
10827 
btrfs_trim_fs(struct btrfs_root * root,struct fstrim_range * range)10828 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10829 {
10830 	struct btrfs_fs_info *fs_info = root->fs_info;
10831 	struct btrfs_block_group_cache *cache = NULL;
10832 	struct btrfs_device *device;
10833 	struct list_head *devices;
10834 	u64 group_trimmed;
10835 	u64 start;
10836 	u64 end;
10837 	u64 trimmed = 0;
10838 	int ret = 0;
10839 
10840 	cache = btrfs_lookup_first_block_group(fs_info, range->start);
10841 	while (cache) {
10842 		if (cache->key.objectid >= (range->start + range->len)) {
10843 			btrfs_put_block_group(cache);
10844 			break;
10845 		}
10846 
10847 		start = max(range->start, cache->key.objectid);
10848 		end = min(range->start + range->len,
10849 				cache->key.objectid + cache->key.offset);
10850 
10851 		if (end - start >= range->minlen) {
10852 			if (!block_group_cache_done(cache)) {
10853 				ret = cache_block_group(cache, 0);
10854 				if (ret) {
10855 					btrfs_put_block_group(cache);
10856 					break;
10857 				}
10858 				ret = wait_block_group_cache_done(cache);
10859 				if (ret) {
10860 					btrfs_put_block_group(cache);
10861 					break;
10862 				}
10863 			}
10864 			ret = btrfs_trim_block_group(cache,
10865 						     &group_trimmed,
10866 						     start,
10867 						     end,
10868 						     range->minlen);
10869 
10870 			trimmed += group_trimmed;
10871 			if (ret) {
10872 				btrfs_put_block_group(cache);
10873 				break;
10874 			}
10875 		}
10876 
10877 		cache = next_block_group(fs_info->tree_root, cache);
10878 	}
10879 
10880 	mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10881 	devices = &root->fs_info->fs_devices->devices;
10882 	list_for_each_entry(device, devices, dev_list) {
10883 		ret = btrfs_trim_free_extents(device, range, &group_trimmed);
10884 		if (ret)
10885 			break;
10886 
10887 		trimmed += group_trimmed;
10888 	}
10889 	mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10890 
10891 	range->len = trimmed;
10892 	return ret;
10893 }
10894 
10895 /*
10896  * btrfs_{start,end}_write_no_snapshoting() are similar to
10897  * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10898  * data into the page cache through nocow before the subvolume is snapshoted,
10899  * but flush the data into disk after the snapshot creation, or to prevent
10900  * operations while snapshoting is ongoing and that cause the snapshot to be
10901  * inconsistent (writes followed by expanding truncates for example).
10902  */
btrfs_end_write_no_snapshoting(struct btrfs_root * root)10903 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10904 {
10905 	percpu_counter_dec(&root->subv_writers->counter);
10906 	/*
10907 	 * Make sure counter is updated before we wake up waiters.
10908 	 */
10909 	smp_mb();
10910 	if (waitqueue_active(&root->subv_writers->wait))
10911 		wake_up(&root->subv_writers->wait);
10912 }
10913 
btrfs_start_write_no_snapshoting(struct btrfs_root * root)10914 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10915 {
10916 	if (atomic_read(&root->will_be_snapshoted))
10917 		return 0;
10918 
10919 	percpu_counter_inc(&root->subv_writers->counter);
10920 	/*
10921 	 * Make sure counter is updated before we check for snapshot creation.
10922 	 */
10923 	smp_mb();
10924 	if (atomic_read(&root->will_be_snapshoted)) {
10925 		btrfs_end_write_no_snapshoting(root);
10926 		return 0;
10927 	}
10928 	return 1;
10929 }
10930