• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/list_sort.h>
4 #include "misc.h"
5 #include "ctree.h"
6 #include "block-group.h"
7 #include "space-info.h"
8 #include "disk-io.h"
9 #include "free-space-cache.h"
10 #include "free-space-tree.h"
11 #include "volumes.h"
12 #include "transaction.h"
13 #include "ref-verify.h"
14 #include "sysfs.h"
15 #include "tree-log.h"
16 #include "delalloc-space.h"
17 #include "discard.h"
18 #include "raid56.h"
19 #include "zoned.h"
20 
21 /*
22  * Return target flags in extended format or 0 if restripe for this chunk_type
23  * is not in progress
24  *
25  * Should be called with balance_lock held
26  */
get_restripe_target(struct btrfs_fs_info * fs_info,u64 flags)27 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
28 {
29 	struct btrfs_balance_control *bctl = fs_info->balance_ctl;
30 	u64 target = 0;
31 
32 	if (!bctl)
33 		return 0;
34 
35 	if (flags & BTRFS_BLOCK_GROUP_DATA &&
36 	    bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
37 		target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
38 	} else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
39 		   bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
40 		target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
41 	} else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
42 		   bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
43 		target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
44 	}
45 
46 	return target;
47 }
48 
49 /*
50  * @flags: available profiles in extended format (see ctree.h)
51  *
52  * Return reduced profile in chunk format.  If profile changing is in progress
53  * (either running or paused) picks the target profile (if it's already
54  * available), otherwise falls back to plain reducing.
55  */
btrfs_reduce_alloc_profile(struct btrfs_fs_info * fs_info,u64 flags)56 static u64 btrfs_reduce_alloc_profile(struct btrfs_fs_info *fs_info, u64 flags)
57 {
58 	u64 num_devices = fs_info->fs_devices->rw_devices;
59 	u64 target;
60 	u64 raid_type;
61 	u64 allowed = 0;
62 
63 	/*
64 	 * See if restripe for this chunk_type is in progress, if so try to
65 	 * reduce to the target profile
66 	 */
67 	spin_lock(&fs_info->balance_lock);
68 	target = get_restripe_target(fs_info, flags);
69 	if (target) {
70 		spin_unlock(&fs_info->balance_lock);
71 		return extended_to_chunk(target);
72 	}
73 	spin_unlock(&fs_info->balance_lock);
74 
75 	/* First, mask out the RAID levels which aren't possible */
76 	for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
77 		if (num_devices >= btrfs_raid_array[raid_type].devs_min)
78 			allowed |= btrfs_raid_array[raid_type].bg_flag;
79 	}
80 	allowed &= flags;
81 
82 	/* Select the highest-redundancy RAID level. */
83 	if (allowed & BTRFS_BLOCK_GROUP_RAID1C4)
84 		allowed = BTRFS_BLOCK_GROUP_RAID1C4;
85 	else if (allowed & BTRFS_BLOCK_GROUP_RAID6)
86 		allowed = BTRFS_BLOCK_GROUP_RAID6;
87 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1C3)
88 		allowed = BTRFS_BLOCK_GROUP_RAID1C3;
89 	else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
90 		allowed = BTRFS_BLOCK_GROUP_RAID5;
91 	else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
92 		allowed = BTRFS_BLOCK_GROUP_RAID10;
93 	else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
94 		allowed = BTRFS_BLOCK_GROUP_RAID1;
95 	else if (allowed & BTRFS_BLOCK_GROUP_DUP)
96 		allowed = BTRFS_BLOCK_GROUP_DUP;
97 	else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
98 		allowed = BTRFS_BLOCK_GROUP_RAID0;
99 
100 	flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
101 
102 	return extended_to_chunk(flags | allowed);
103 }
104 
btrfs_get_alloc_profile(struct btrfs_fs_info * fs_info,u64 orig_flags)105 u64 btrfs_get_alloc_profile(struct btrfs_fs_info *fs_info, u64 orig_flags)
106 {
107 	unsigned seq;
108 	u64 flags;
109 
110 	do {
111 		flags = orig_flags;
112 		seq = read_seqbegin(&fs_info->profiles_lock);
113 
114 		if (flags & BTRFS_BLOCK_GROUP_DATA)
115 			flags |= fs_info->avail_data_alloc_bits;
116 		else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
117 			flags |= fs_info->avail_system_alloc_bits;
118 		else if (flags & BTRFS_BLOCK_GROUP_METADATA)
119 			flags |= fs_info->avail_metadata_alloc_bits;
120 	} while (read_seqretry(&fs_info->profiles_lock, seq));
121 
122 	return btrfs_reduce_alloc_profile(fs_info, flags);
123 }
124 
btrfs_get_block_group(struct btrfs_block_group * cache)125 void btrfs_get_block_group(struct btrfs_block_group *cache)
126 {
127 	refcount_inc(&cache->refs);
128 }
129 
btrfs_put_block_group(struct btrfs_block_group * cache)130 void btrfs_put_block_group(struct btrfs_block_group *cache)
131 {
132 	if (refcount_dec_and_test(&cache->refs)) {
133 		WARN_ON(cache->pinned > 0);
134 		/*
135 		 * If there was a failure to cleanup a log tree, very likely due
136 		 * to an IO failure on a writeback attempt of one or more of its
137 		 * extent buffers, we could not do proper (and cheap) unaccounting
138 		 * of their reserved space, so don't warn on reserved > 0 in that
139 		 * case.
140 		 */
141 		if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
142 		    !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
143 			WARN_ON(cache->reserved > 0);
144 
145 		/*
146 		 * A block_group shouldn't be on the discard_list anymore.
147 		 * Remove the block_group from the discard_list to prevent us
148 		 * from causing a panic due to NULL pointer dereference.
149 		 */
150 		if (WARN_ON(!list_empty(&cache->discard_list)))
151 			btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
152 						  cache);
153 
154 		/*
155 		 * If not empty, someone is still holding mutex of
156 		 * full_stripe_lock, which can only be released by caller.
157 		 * And it will definitely cause use-after-free when caller
158 		 * tries to release full stripe lock.
159 		 *
160 		 * No better way to resolve, but only to warn.
161 		 */
162 		WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
163 		kfree(cache->free_space_ctl);
164 		kfree(cache->physical_map);
165 		kfree(cache);
166 	}
167 }
168 
169 /*
170  * This adds the block group to the fs_info rb tree for the block group cache
171  */
btrfs_add_block_group_cache(struct btrfs_fs_info * info,struct btrfs_block_group * block_group)172 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
173 				       struct btrfs_block_group *block_group)
174 {
175 	struct rb_node **p;
176 	struct rb_node *parent = NULL;
177 	struct btrfs_block_group *cache;
178 	bool leftmost = true;
179 
180 	ASSERT(block_group->length != 0);
181 
182 	write_lock(&info->block_group_cache_lock);
183 	p = &info->block_group_cache_tree.rb_root.rb_node;
184 
185 	while (*p) {
186 		parent = *p;
187 		cache = rb_entry(parent, struct btrfs_block_group, cache_node);
188 		if (block_group->start < cache->start) {
189 			p = &(*p)->rb_left;
190 		} else if (block_group->start > cache->start) {
191 			p = &(*p)->rb_right;
192 			leftmost = false;
193 		} else {
194 			write_unlock(&info->block_group_cache_lock);
195 			return -EEXIST;
196 		}
197 	}
198 
199 	rb_link_node(&block_group->cache_node, parent, p);
200 	rb_insert_color_cached(&block_group->cache_node,
201 			       &info->block_group_cache_tree, leftmost);
202 
203 	write_unlock(&info->block_group_cache_lock);
204 
205 	return 0;
206 }
207 
208 /*
209  * This will return the block group at or after bytenr if contains is 0, else
210  * it will return the block group that contains the bytenr
211  */
block_group_cache_tree_search(struct btrfs_fs_info * info,u64 bytenr,int contains)212 static struct btrfs_block_group *block_group_cache_tree_search(
213 		struct btrfs_fs_info *info, u64 bytenr, int contains)
214 {
215 	struct btrfs_block_group *cache, *ret = NULL;
216 	struct rb_node *n;
217 	u64 end, start;
218 
219 	read_lock(&info->block_group_cache_lock);
220 	n = info->block_group_cache_tree.rb_root.rb_node;
221 
222 	while (n) {
223 		cache = rb_entry(n, struct btrfs_block_group, cache_node);
224 		end = cache->start + cache->length - 1;
225 		start = cache->start;
226 
227 		if (bytenr < start) {
228 			if (!contains && (!ret || start < ret->start))
229 				ret = cache;
230 			n = n->rb_left;
231 		} else if (bytenr > start) {
232 			if (contains && bytenr <= end) {
233 				ret = cache;
234 				break;
235 			}
236 			n = n->rb_right;
237 		} else {
238 			ret = cache;
239 			break;
240 		}
241 	}
242 	if (ret)
243 		btrfs_get_block_group(ret);
244 	read_unlock(&info->block_group_cache_lock);
245 
246 	return ret;
247 }
248 
249 /*
250  * Return the block group that starts at or after bytenr
251  */
btrfs_lookup_first_block_group(struct btrfs_fs_info * info,u64 bytenr)252 struct btrfs_block_group *btrfs_lookup_first_block_group(
253 		struct btrfs_fs_info *info, u64 bytenr)
254 {
255 	return block_group_cache_tree_search(info, bytenr, 0);
256 }
257 
258 /*
259  * Return the block group that contains the given bytenr
260  */
btrfs_lookup_block_group(struct btrfs_fs_info * info,u64 bytenr)261 struct btrfs_block_group *btrfs_lookup_block_group(
262 		struct btrfs_fs_info *info, u64 bytenr)
263 {
264 	return block_group_cache_tree_search(info, bytenr, 1);
265 }
266 
btrfs_next_block_group(struct btrfs_block_group * cache)267 struct btrfs_block_group *btrfs_next_block_group(
268 		struct btrfs_block_group *cache)
269 {
270 	struct btrfs_fs_info *fs_info = cache->fs_info;
271 	struct rb_node *node;
272 
273 	read_lock(&fs_info->block_group_cache_lock);
274 
275 	/* If our block group was removed, we need a full search. */
276 	if (RB_EMPTY_NODE(&cache->cache_node)) {
277 		const u64 next_bytenr = cache->start + cache->length;
278 
279 		read_unlock(&fs_info->block_group_cache_lock);
280 		btrfs_put_block_group(cache);
281 		return btrfs_lookup_first_block_group(fs_info, next_bytenr);
282 	}
283 	node = rb_next(&cache->cache_node);
284 	btrfs_put_block_group(cache);
285 	if (node) {
286 		cache = rb_entry(node, struct btrfs_block_group, cache_node);
287 		btrfs_get_block_group(cache);
288 	} else
289 		cache = NULL;
290 	read_unlock(&fs_info->block_group_cache_lock);
291 	return cache;
292 }
293 
294 /**
295  * Check if we can do a NOCOW write for a given extent.
296  *
297  * @fs_info:       The filesystem information object.
298  * @bytenr:        Logical start address of the extent.
299  *
300  * Check if we can do a NOCOW write for the given extent, and increments the
301  * number of NOCOW writers in the block group that contains the extent, as long
302  * as the block group exists and it's currently not in read-only mode.
303  *
304  * Returns: A non-NULL block group pointer if we can do a NOCOW write, the caller
305  *          is responsible for calling btrfs_dec_nocow_writers() later.
306  *
307  *          Or NULL if we can not do a NOCOW write
308  */
btrfs_inc_nocow_writers(struct btrfs_fs_info * fs_info,u64 bytenr)309 struct btrfs_block_group *btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info,
310 						  u64 bytenr)
311 {
312 	struct btrfs_block_group *bg;
313 	bool can_nocow = true;
314 
315 	bg = btrfs_lookup_block_group(fs_info, bytenr);
316 	if (!bg)
317 		return NULL;
318 
319 	spin_lock(&bg->lock);
320 	if (bg->ro)
321 		can_nocow = false;
322 	else
323 		atomic_inc(&bg->nocow_writers);
324 	spin_unlock(&bg->lock);
325 
326 	if (!can_nocow) {
327 		btrfs_put_block_group(bg);
328 		return NULL;
329 	}
330 
331 	/* No put on block group, done by btrfs_dec_nocow_writers(). */
332 	return bg;
333 }
334 
335 /**
336  * Decrement the number of NOCOW writers in a block group.
337  *
338  * @bg:       The block group.
339  *
340  * This is meant to be called after a previous call to btrfs_inc_nocow_writers(),
341  * and on the block group returned by that call. Typically this is called after
342  * creating an ordered extent for a NOCOW write, to prevent races with scrub and
343  * relocation.
344  *
345  * After this call, the caller should not use the block group anymore. It it wants
346  * to use it, then it should get a reference on it before calling this function.
347  */
btrfs_dec_nocow_writers(struct btrfs_block_group * bg)348 void btrfs_dec_nocow_writers(struct btrfs_block_group *bg)
349 {
350 	if (atomic_dec_and_test(&bg->nocow_writers))
351 		wake_up_var(&bg->nocow_writers);
352 
353 	/* For the lookup done by a previous call to btrfs_inc_nocow_writers(). */
354 	btrfs_put_block_group(bg);
355 }
356 
btrfs_wait_nocow_writers(struct btrfs_block_group * bg)357 void btrfs_wait_nocow_writers(struct btrfs_block_group *bg)
358 {
359 	wait_var_event(&bg->nocow_writers, !atomic_read(&bg->nocow_writers));
360 }
361 
btrfs_dec_block_group_reservations(struct btrfs_fs_info * fs_info,const u64 start)362 void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
363 					const u64 start)
364 {
365 	struct btrfs_block_group *bg;
366 
367 	bg = btrfs_lookup_block_group(fs_info, start);
368 	ASSERT(bg);
369 	if (atomic_dec_and_test(&bg->reservations))
370 		wake_up_var(&bg->reservations);
371 	btrfs_put_block_group(bg);
372 }
373 
btrfs_wait_block_group_reservations(struct btrfs_block_group * bg)374 void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg)
375 {
376 	struct btrfs_space_info *space_info = bg->space_info;
377 
378 	ASSERT(bg->ro);
379 
380 	if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA))
381 		return;
382 
383 	/*
384 	 * Our block group is read only but before we set it to read only,
385 	 * some task might have had allocated an extent from it already, but it
386 	 * has not yet created a respective ordered extent (and added it to a
387 	 * root's list of ordered extents).
388 	 * Therefore wait for any task currently allocating extents, since the
389 	 * block group's reservations counter is incremented while a read lock
390 	 * on the groups' semaphore is held and decremented after releasing
391 	 * the read access on that semaphore and creating the ordered extent.
392 	 */
393 	down_write(&space_info->groups_sem);
394 	up_write(&space_info->groups_sem);
395 
396 	wait_var_event(&bg->reservations, !atomic_read(&bg->reservations));
397 }
398 
btrfs_get_caching_control(struct btrfs_block_group * cache)399 struct btrfs_caching_control *btrfs_get_caching_control(
400 		struct btrfs_block_group *cache)
401 {
402 	struct btrfs_caching_control *ctl;
403 
404 	spin_lock(&cache->lock);
405 	if (!cache->caching_ctl) {
406 		spin_unlock(&cache->lock);
407 		return NULL;
408 	}
409 
410 	ctl = cache->caching_ctl;
411 	refcount_inc(&ctl->count);
412 	spin_unlock(&cache->lock);
413 	return ctl;
414 }
415 
btrfs_put_caching_control(struct btrfs_caching_control * ctl)416 void btrfs_put_caching_control(struct btrfs_caching_control *ctl)
417 {
418 	if (refcount_dec_and_test(&ctl->count))
419 		kfree(ctl);
420 }
421 
422 /*
423  * When we wait for progress in the block group caching, its because our
424  * allocation attempt failed at least once.  So, we must sleep and let some
425  * progress happen before we try again.
426  *
427  * This function will sleep at least once waiting for new free space to show
428  * up, and then it will check the block group free space numbers for our min
429  * num_bytes.  Another option is to have it go ahead and look in the rbtree for
430  * a free extent of a given size, but this is a good start.
431  *
432  * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
433  * any of the information in this block group.
434  */
btrfs_wait_block_group_cache_progress(struct btrfs_block_group * cache,u64 num_bytes)435 void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
436 					   u64 num_bytes)
437 {
438 	struct btrfs_caching_control *caching_ctl;
439 	int progress;
440 
441 	caching_ctl = btrfs_get_caching_control(cache);
442 	if (!caching_ctl)
443 		return;
444 
445 	/*
446 	 * We've already failed to allocate from this block group, so even if
447 	 * there's enough space in the block group it isn't contiguous enough to
448 	 * allow for an allocation, so wait for at least the next wakeup tick,
449 	 * or for the thing to be done.
450 	 */
451 	progress = atomic_read(&caching_ctl->progress);
452 
453 	wait_event(caching_ctl->wait, btrfs_block_group_done(cache) ||
454 		   (progress != atomic_read(&caching_ctl->progress) &&
455 		    (cache->free_space_ctl->free_space >= num_bytes)));
456 
457 	btrfs_put_caching_control(caching_ctl);
458 }
459 
btrfs_caching_ctl_wait_done(struct btrfs_block_group * cache,struct btrfs_caching_control * caching_ctl)460 static int btrfs_caching_ctl_wait_done(struct btrfs_block_group *cache,
461 				       struct btrfs_caching_control *caching_ctl)
462 {
463 	wait_event(caching_ctl->wait, btrfs_block_group_done(cache));
464 	return cache->cached == BTRFS_CACHE_ERROR ? -EIO : 0;
465 }
466 
btrfs_wait_block_group_cache_done(struct btrfs_block_group * cache)467 static int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache)
468 {
469 	struct btrfs_caching_control *caching_ctl;
470 	int ret;
471 
472 	caching_ctl = btrfs_get_caching_control(cache);
473 	if (!caching_ctl)
474 		return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
475 	ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
476 	btrfs_put_caching_control(caching_ctl);
477 	return ret;
478 }
479 
480 #ifdef CONFIG_BTRFS_DEBUG
fragment_free_space(struct btrfs_block_group * block_group)481 static void fragment_free_space(struct btrfs_block_group *block_group)
482 {
483 	struct btrfs_fs_info *fs_info = block_group->fs_info;
484 	u64 start = block_group->start;
485 	u64 len = block_group->length;
486 	u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
487 		fs_info->nodesize : fs_info->sectorsize;
488 	u64 step = chunk << 1;
489 
490 	while (len > chunk) {
491 		btrfs_remove_free_space(block_group, start, chunk);
492 		start += step;
493 		if (len < step)
494 			len = 0;
495 		else
496 			len -= step;
497 	}
498 }
499 #endif
500 
501 /*
502  * This is only called by btrfs_cache_block_group, since we could have freed
503  * extents we need to check the pinned_extents for any extents that can't be
504  * used yet since their free space will be released as soon as the transaction
505  * commits.
506  */
add_new_free_space(struct btrfs_block_group * block_group,u64 start,u64 end,u64 * total_added_ret)507 int add_new_free_space(struct btrfs_block_group *block_group, u64 start, u64 end,
508 		       u64 *total_added_ret)
509 {
510 	struct btrfs_fs_info *info = block_group->fs_info;
511 	u64 extent_start, extent_end, size;
512 	int ret;
513 
514 	if (total_added_ret)
515 		*total_added_ret = 0;
516 
517 	while (start < end) {
518 		ret = find_first_extent_bit(&info->excluded_extents, start,
519 					    &extent_start, &extent_end,
520 					    EXTENT_DIRTY | EXTENT_UPTODATE,
521 					    NULL);
522 		if (ret)
523 			break;
524 
525 		if (extent_start <= start) {
526 			start = extent_end + 1;
527 		} else if (extent_start > start && extent_start < end) {
528 			size = extent_start - start;
529 			ret = btrfs_add_free_space_async_trimmed(block_group,
530 								 start, size);
531 			if (ret)
532 				return ret;
533 			if (total_added_ret)
534 				*total_added_ret += size;
535 			start = extent_end + 1;
536 		} else {
537 			break;
538 		}
539 	}
540 
541 	if (start < end) {
542 		size = end - start;
543 		ret = btrfs_add_free_space_async_trimmed(block_group, start,
544 							 size);
545 		if (ret)
546 			return ret;
547 		if (total_added_ret)
548 			*total_added_ret += size;
549 	}
550 
551 	return 0;
552 }
553 
load_extent_tree_free(struct btrfs_caching_control * caching_ctl)554 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
555 {
556 	struct btrfs_block_group *block_group = caching_ctl->block_group;
557 	struct btrfs_fs_info *fs_info = block_group->fs_info;
558 	struct btrfs_root *extent_root;
559 	struct btrfs_path *path;
560 	struct extent_buffer *leaf;
561 	struct btrfs_key key;
562 	u64 total_found = 0;
563 	u64 last = 0;
564 	u32 nritems;
565 	int ret;
566 	bool wakeup = true;
567 
568 	path = btrfs_alloc_path();
569 	if (!path)
570 		return -ENOMEM;
571 
572 	last = max_t(u64, block_group->start, BTRFS_SUPER_INFO_OFFSET);
573 	extent_root = btrfs_extent_root(fs_info, last);
574 
575 #ifdef CONFIG_BTRFS_DEBUG
576 	/*
577 	 * If we're fragmenting we don't want to make anybody think we can
578 	 * allocate from this block group until we've had a chance to fragment
579 	 * the free space.
580 	 */
581 	if (btrfs_should_fragment_free_space(block_group))
582 		wakeup = false;
583 #endif
584 	/*
585 	 * We don't want to deadlock with somebody trying to allocate a new
586 	 * extent for the extent root while also trying to search the extent
587 	 * root to add free space.  So we skip locking and search the commit
588 	 * root, since its read-only
589 	 */
590 	path->skip_locking = 1;
591 	path->search_commit_root = 1;
592 	path->reada = READA_FORWARD;
593 
594 	key.objectid = last;
595 	key.offset = 0;
596 	key.type = BTRFS_EXTENT_ITEM_KEY;
597 
598 next:
599 	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
600 	if (ret < 0)
601 		goto out;
602 
603 	leaf = path->nodes[0];
604 	nritems = btrfs_header_nritems(leaf);
605 
606 	while (1) {
607 		if (btrfs_fs_closing(fs_info) > 1) {
608 			last = (u64)-1;
609 			break;
610 		}
611 
612 		if (path->slots[0] < nritems) {
613 			btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
614 		} else {
615 			ret = btrfs_find_next_key(extent_root, path, &key, 0, 0);
616 			if (ret)
617 				break;
618 
619 			if (need_resched() ||
620 			    rwsem_is_contended(&fs_info->commit_root_sem)) {
621 				btrfs_release_path(path);
622 				up_read(&fs_info->commit_root_sem);
623 				mutex_unlock(&caching_ctl->mutex);
624 				cond_resched();
625 				mutex_lock(&caching_ctl->mutex);
626 				down_read(&fs_info->commit_root_sem);
627 				goto next;
628 			}
629 
630 			ret = btrfs_next_leaf(extent_root, path);
631 			if (ret < 0)
632 				goto out;
633 			if (ret)
634 				break;
635 			leaf = path->nodes[0];
636 			nritems = btrfs_header_nritems(leaf);
637 			continue;
638 		}
639 
640 		if (key.objectid < last) {
641 			key.objectid = last;
642 			key.offset = 0;
643 			key.type = BTRFS_EXTENT_ITEM_KEY;
644 			btrfs_release_path(path);
645 			goto next;
646 		}
647 
648 		if (key.objectid < block_group->start) {
649 			path->slots[0]++;
650 			continue;
651 		}
652 
653 		if (key.objectid >= block_group->start + block_group->length)
654 			break;
655 
656 		if (key.type == BTRFS_EXTENT_ITEM_KEY ||
657 		    key.type == BTRFS_METADATA_ITEM_KEY) {
658 			u64 space_added;
659 
660 			ret = add_new_free_space(block_group, last, key.objectid,
661 						 &space_added);
662 			if (ret)
663 				goto out;
664 			total_found += space_added;
665 			if (key.type == BTRFS_METADATA_ITEM_KEY)
666 				last = key.objectid +
667 					fs_info->nodesize;
668 			else
669 				last = key.objectid + key.offset;
670 
671 			if (total_found > CACHING_CTL_WAKE_UP) {
672 				total_found = 0;
673 				if (wakeup) {
674 					atomic_inc(&caching_ctl->progress);
675 					wake_up(&caching_ctl->wait);
676 				}
677 			}
678 		}
679 		path->slots[0]++;
680 	}
681 
682 	ret = add_new_free_space(block_group, last,
683 				 block_group->start + block_group->length,
684 				 NULL);
685 out:
686 	btrfs_free_path(path);
687 	return ret;
688 }
689 
caching_thread(struct btrfs_work * work)690 static noinline void caching_thread(struct btrfs_work *work)
691 {
692 	struct btrfs_block_group *block_group;
693 	struct btrfs_fs_info *fs_info;
694 	struct btrfs_caching_control *caching_ctl;
695 	int ret;
696 
697 	caching_ctl = container_of(work, struct btrfs_caching_control, work);
698 	block_group = caching_ctl->block_group;
699 	fs_info = block_group->fs_info;
700 
701 	mutex_lock(&caching_ctl->mutex);
702 	down_read(&fs_info->commit_root_sem);
703 
704 	if (btrfs_test_opt(fs_info, SPACE_CACHE)) {
705 		ret = load_free_space_cache(block_group);
706 		if (ret == 1) {
707 			ret = 0;
708 			goto done;
709 		}
710 
711 		/*
712 		 * We failed to load the space cache, set ourselves to
713 		 * CACHE_STARTED and carry on.
714 		 */
715 		spin_lock(&block_group->lock);
716 		block_group->cached = BTRFS_CACHE_STARTED;
717 		spin_unlock(&block_group->lock);
718 		wake_up(&caching_ctl->wait);
719 	}
720 
721 	/*
722 	 * If we are in the transaction that populated the free space tree we
723 	 * can't actually cache from the free space tree as our commit root and
724 	 * real root are the same, so we could change the contents of the blocks
725 	 * while caching.  Instead do the slow caching in this case, and after
726 	 * the transaction has committed we will be safe.
727 	 */
728 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) &&
729 	    !(test_bit(BTRFS_FS_FREE_SPACE_TREE_UNTRUSTED, &fs_info->flags)))
730 		ret = load_free_space_tree(caching_ctl);
731 	else
732 		ret = load_extent_tree_free(caching_ctl);
733 done:
734 	spin_lock(&block_group->lock);
735 	block_group->caching_ctl = NULL;
736 	block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
737 	spin_unlock(&block_group->lock);
738 
739 #ifdef CONFIG_BTRFS_DEBUG
740 	if (btrfs_should_fragment_free_space(block_group)) {
741 		u64 bytes_used;
742 
743 		spin_lock(&block_group->space_info->lock);
744 		spin_lock(&block_group->lock);
745 		bytes_used = block_group->length - block_group->used;
746 		block_group->space_info->bytes_used += bytes_used >> 1;
747 		spin_unlock(&block_group->lock);
748 		spin_unlock(&block_group->space_info->lock);
749 		fragment_free_space(block_group);
750 	}
751 #endif
752 
753 	up_read(&fs_info->commit_root_sem);
754 	btrfs_free_excluded_extents(block_group);
755 	mutex_unlock(&caching_ctl->mutex);
756 
757 	wake_up(&caching_ctl->wait);
758 
759 	btrfs_put_caching_control(caching_ctl);
760 	btrfs_put_block_group(block_group);
761 }
762 
btrfs_cache_block_group(struct btrfs_block_group * cache,bool wait)763 int btrfs_cache_block_group(struct btrfs_block_group *cache, bool wait)
764 {
765 	struct btrfs_fs_info *fs_info = cache->fs_info;
766 	struct btrfs_caching_control *caching_ctl = NULL;
767 	int ret = 0;
768 
769 	/* Allocator for zoned filesystems does not use the cache at all */
770 	if (btrfs_is_zoned(fs_info))
771 		return 0;
772 
773 	caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
774 	if (!caching_ctl)
775 		return -ENOMEM;
776 
777 	INIT_LIST_HEAD(&caching_ctl->list);
778 	mutex_init(&caching_ctl->mutex);
779 	init_waitqueue_head(&caching_ctl->wait);
780 	caching_ctl->block_group = cache;
781 	refcount_set(&caching_ctl->count, 2);
782 	atomic_set(&caching_ctl->progress, 0);
783 	btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL);
784 
785 	spin_lock(&cache->lock);
786 	if (cache->cached != BTRFS_CACHE_NO) {
787 		kfree(caching_ctl);
788 
789 		caching_ctl = cache->caching_ctl;
790 		if (caching_ctl)
791 			refcount_inc(&caching_ctl->count);
792 		spin_unlock(&cache->lock);
793 		goto out;
794 	}
795 	WARN_ON(cache->caching_ctl);
796 	cache->caching_ctl = caching_ctl;
797 	cache->cached = BTRFS_CACHE_STARTED;
798 	spin_unlock(&cache->lock);
799 
800 	write_lock(&fs_info->block_group_cache_lock);
801 	refcount_inc(&caching_ctl->count);
802 	list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
803 	write_unlock(&fs_info->block_group_cache_lock);
804 
805 	btrfs_get_block_group(cache);
806 
807 	btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
808 out:
809 	if (wait && caching_ctl)
810 		ret = btrfs_caching_ctl_wait_done(cache, caching_ctl);
811 	if (caching_ctl)
812 		btrfs_put_caching_control(caching_ctl);
813 
814 	return ret;
815 }
816 
clear_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)817 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
818 {
819 	u64 extra_flags = chunk_to_extended(flags) &
820 				BTRFS_EXTENDED_PROFILE_MASK;
821 
822 	write_seqlock(&fs_info->profiles_lock);
823 	if (flags & BTRFS_BLOCK_GROUP_DATA)
824 		fs_info->avail_data_alloc_bits &= ~extra_flags;
825 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
826 		fs_info->avail_metadata_alloc_bits &= ~extra_flags;
827 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
828 		fs_info->avail_system_alloc_bits &= ~extra_flags;
829 	write_sequnlock(&fs_info->profiles_lock);
830 }
831 
832 /*
833  * Clear incompat bits for the following feature(s):
834  *
835  * - RAID56 - in case there's neither RAID5 nor RAID6 profile block group
836  *            in the whole filesystem
837  *
838  * - RAID1C34 - same as above for RAID1C3 and RAID1C4 block groups
839  */
clear_incompat_bg_bits(struct btrfs_fs_info * fs_info,u64 flags)840 static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
841 {
842 	bool found_raid56 = false;
843 	bool found_raid1c34 = false;
844 
845 	if ((flags & BTRFS_BLOCK_GROUP_RAID56_MASK) ||
846 	    (flags & BTRFS_BLOCK_GROUP_RAID1C3) ||
847 	    (flags & BTRFS_BLOCK_GROUP_RAID1C4)) {
848 		struct list_head *head = &fs_info->space_info;
849 		struct btrfs_space_info *sinfo;
850 
851 		list_for_each_entry_rcu(sinfo, head, list) {
852 			down_read(&sinfo->groups_sem);
853 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID5]))
854 				found_raid56 = true;
855 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID6]))
856 				found_raid56 = true;
857 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C3]))
858 				found_raid1c34 = true;
859 			if (!list_empty(&sinfo->block_groups[BTRFS_RAID_RAID1C4]))
860 				found_raid1c34 = true;
861 			up_read(&sinfo->groups_sem);
862 		}
863 		if (!found_raid56)
864 			btrfs_clear_fs_incompat(fs_info, RAID56);
865 		if (!found_raid1c34)
866 			btrfs_clear_fs_incompat(fs_info, RAID1C34);
867 	}
868 }
869 
remove_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_block_group * block_group)870 static int remove_block_group_item(struct btrfs_trans_handle *trans,
871 				   struct btrfs_path *path,
872 				   struct btrfs_block_group *block_group)
873 {
874 	struct btrfs_fs_info *fs_info = trans->fs_info;
875 	struct btrfs_root *root;
876 	struct btrfs_key key;
877 	int ret;
878 
879 	root = btrfs_block_group_root(fs_info);
880 	key.objectid = block_group->start;
881 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
882 	key.offset = block_group->length;
883 
884 	ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
885 	if (ret > 0)
886 		ret = -ENOENT;
887 	if (ret < 0)
888 		return ret;
889 
890 	ret = btrfs_del_item(trans, root, path);
891 	return ret;
892 }
893 
btrfs_remove_block_group(struct btrfs_trans_handle * trans,u64 group_start,struct extent_map * em)894 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
895 			     u64 group_start, struct extent_map *em)
896 {
897 	struct btrfs_fs_info *fs_info = trans->fs_info;
898 	struct btrfs_path *path;
899 	struct btrfs_block_group *block_group;
900 	struct btrfs_free_cluster *cluster;
901 	struct inode *inode;
902 	struct kobject *kobj = NULL;
903 	int ret;
904 	int index;
905 	int factor;
906 	struct btrfs_caching_control *caching_ctl = NULL;
907 	bool remove_em;
908 	bool remove_rsv = false;
909 
910 	block_group = btrfs_lookup_block_group(fs_info, group_start);
911 	BUG_ON(!block_group);
912 	BUG_ON(!block_group->ro);
913 
914 	trace_btrfs_remove_block_group(block_group);
915 	/*
916 	 * Free the reserved super bytes from this block group before
917 	 * remove it.
918 	 */
919 	btrfs_free_excluded_extents(block_group);
920 	btrfs_free_ref_tree_range(fs_info, block_group->start,
921 				  block_group->length);
922 
923 	index = btrfs_bg_flags_to_raid_index(block_group->flags);
924 	factor = btrfs_bg_type_to_factor(block_group->flags);
925 
926 	/* make sure this block group isn't part of an allocation cluster */
927 	cluster = &fs_info->data_alloc_cluster;
928 	spin_lock(&cluster->refill_lock);
929 	btrfs_return_cluster_to_free_space(block_group, cluster);
930 	spin_unlock(&cluster->refill_lock);
931 
932 	/*
933 	 * make sure this block group isn't part of a metadata
934 	 * allocation cluster
935 	 */
936 	cluster = &fs_info->meta_alloc_cluster;
937 	spin_lock(&cluster->refill_lock);
938 	btrfs_return_cluster_to_free_space(block_group, cluster);
939 	spin_unlock(&cluster->refill_lock);
940 
941 	btrfs_clear_treelog_bg(block_group);
942 	btrfs_clear_data_reloc_bg(block_group);
943 
944 	path = btrfs_alloc_path();
945 	if (!path) {
946 		ret = -ENOMEM;
947 		goto out;
948 	}
949 
950 	/*
951 	 * get the inode first so any iput calls done for the io_list
952 	 * aren't the final iput (no unlinks allowed now)
953 	 */
954 	inode = lookup_free_space_inode(block_group, path);
955 
956 	mutex_lock(&trans->transaction->cache_write_mutex);
957 	/*
958 	 * Make sure our free space cache IO is done before removing the
959 	 * free space inode
960 	 */
961 	spin_lock(&trans->transaction->dirty_bgs_lock);
962 	if (!list_empty(&block_group->io_list)) {
963 		list_del_init(&block_group->io_list);
964 
965 		WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
966 
967 		spin_unlock(&trans->transaction->dirty_bgs_lock);
968 		btrfs_wait_cache_io(trans, block_group, path);
969 		btrfs_put_block_group(block_group);
970 		spin_lock(&trans->transaction->dirty_bgs_lock);
971 	}
972 
973 	if (!list_empty(&block_group->dirty_list)) {
974 		list_del_init(&block_group->dirty_list);
975 		remove_rsv = true;
976 		btrfs_put_block_group(block_group);
977 	}
978 	spin_unlock(&trans->transaction->dirty_bgs_lock);
979 	mutex_unlock(&trans->transaction->cache_write_mutex);
980 
981 	ret = btrfs_remove_free_space_inode(trans, inode, block_group);
982 	if (ret)
983 		goto out;
984 
985 	write_lock(&fs_info->block_group_cache_lock);
986 	rb_erase_cached(&block_group->cache_node,
987 			&fs_info->block_group_cache_tree);
988 	RB_CLEAR_NODE(&block_group->cache_node);
989 
990 	/* Once for the block groups rbtree */
991 	btrfs_put_block_group(block_group);
992 
993 	write_unlock(&fs_info->block_group_cache_lock);
994 
995 	down_write(&block_group->space_info->groups_sem);
996 	/*
997 	 * we must use list_del_init so people can check to see if they
998 	 * are still on the list after taking the semaphore
999 	 */
1000 	list_del_init(&block_group->list);
1001 	if (list_empty(&block_group->space_info->block_groups[index])) {
1002 		kobj = block_group->space_info->block_group_kobjs[index];
1003 		block_group->space_info->block_group_kobjs[index] = NULL;
1004 		clear_avail_alloc_bits(fs_info, block_group->flags);
1005 	}
1006 	up_write(&block_group->space_info->groups_sem);
1007 	clear_incompat_bg_bits(fs_info, block_group->flags);
1008 	if (kobj) {
1009 		kobject_del(kobj);
1010 		kobject_put(kobj);
1011 	}
1012 
1013 	if (block_group->cached == BTRFS_CACHE_STARTED)
1014 		btrfs_wait_block_group_cache_done(block_group);
1015 
1016 	write_lock(&fs_info->block_group_cache_lock);
1017 	caching_ctl = btrfs_get_caching_control(block_group);
1018 	if (!caching_ctl) {
1019 		struct btrfs_caching_control *ctl;
1020 
1021 		list_for_each_entry(ctl, &fs_info->caching_block_groups, list) {
1022 			if (ctl->block_group == block_group) {
1023 				caching_ctl = ctl;
1024 				refcount_inc(&caching_ctl->count);
1025 				break;
1026 			}
1027 		}
1028 	}
1029 	if (caching_ctl)
1030 		list_del_init(&caching_ctl->list);
1031 	write_unlock(&fs_info->block_group_cache_lock);
1032 
1033 	if (caching_ctl) {
1034 		/* Once for the caching bgs list and once for us. */
1035 		btrfs_put_caching_control(caching_ctl);
1036 		btrfs_put_caching_control(caching_ctl);
1037 	}
1038 
1039 	spin_lock(&trans->transaction->dirty_bgs_lock);
1040 	WARN_ON(!list_empty(&block_group->dirty_list));
1041 	WARN_ON(!list_empty(&block_group->io_list));
1042 	spin_unlock(&trans->transaction->dirty_bgs_lock);
1043 
1044 	btrfs_remove_free_space_cache(block_group);
1045 
1046 	spin_lock(&block_group->space_info->lock);
1047 	list_del_init(&block_group->ro_list);
1048 
1049 	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
1050 		WARN_ON(block_group->space_info->total_bytes
1051 			< block_group->length);
1052 		WARN_ON(block_group->space_info->bytes_readonly
1053 			< block_group->length - block_group->zone_unusable);
1054 		WARN_ON(block_group->space_info->bytes_zone_unusable
1055 			< block_group->zone_unusable);
1056 		WARN_ON(block_group->space_info->disk_total
1057 			< block_group->length * factor);
1058 		WARN_ON(test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
1059 				 &block_group->runtime_flags) &&
1060 			block_group->space_info->active_total_bytes
1061 			< block_group->length);
1062 	}
1063 	block_group->space_info->total_bytes -= block_group->length;
1064 	if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags))
1065 		block_group->space_info->active_total_bytes -= block_group->length;
1066 	block_group->space_info->bytes_readonly -=
1067 		(block_group->length - block_group->zone_unusable);
1068 	block_group->space_info->bytes_zone_unusable -=
1069 		block_group->zone_unusable;
1070 	block_group->space_info->disk_total -= block_group->length * factor;
1071 
1072 	spin_unlock(&block_group->space_info->lock);
1073 
1074 	/*
1075 	 * Remove the free space for the block group from the free space tree
1076 	 * and the block group's item from the extent tree before marking the
1077 	 * block group as removed. This is to prevent races with tasks that
1078 	 * freeze and unfreeze a block group, this task and another task
1079 	 * allocating a new block group - the unfreeze task ends up removing
1080 	 * the block group's extent map before the task calling this function
1081 	 * deletes the block group item from the extent tree, allowing for
1082 	 * another task to attempt to create another block group with the same
1083 	 * item key (and failing with -EEXIST and a transaction abort).
1084 	 */
1085 	ret = remove_block_group_free_space(trans, block_group);
1086 	if (ret)
1087 		goto out;
1088 
1089 	ret = remove_block_group_item(trans, path, block_group);
1090 	if (ret < 0)
1091 		goto out;
1092 
1093 	spin_lock(&block_group->lock);
1094 	set_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags);
1095 
1096 	/*
1097 	 * At this point trimming or scrub can't start on this block group,
1098 	 * because we removed the block group from the rbtree
1099 	 * fs_info->block_group_cache_tree so no one can't find it anymore and
1100 	 * even if someone already got this block group before we removed it
1101 	 * from the rbtree, they have already incremented block_group->frozen -
1102 	 * if they didn't, for the trimming case they won't find any free space
1103 	 * entries because we already removed them all when we called
1104 	 * btrfs_remove_free_space_cache().
1105 	 *
1106 	 * And we must not remove the extent map from the fs_info->mapping_tree
1107 	 * to prevent the same logical address range and physical device space
1108 	 * ranges from being reused for a new block group. This is needed to
1109 	 * avoid races with trimming and scrub.
1110 	 *
1111 	 * An fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
1112 	 * completely transactionless, so while it is trimming a range the
1113 	 * currently running transaction might finish and a new one start,
1114 	 * allowing for new block groups to be created that can reuse the same
1115 	 * physical device locations unless we take this special care.
1116 	 *
1117 	 * There may also be an implicit trim operation if the file system
1118 	 * is mounted with -odiscard. The same protections must remain
1119 	 * in place until the extents have been discarded completely when
1120 	 * the transaction commit has completed.
1121 	 */
1122 	remove_em = (atomic_read(&block_group->frozen) == 0);
1123 	spin_unlock(&block_group->lock);
1124 
1125 	if (remove_em) {
1126 		struct extent_map_tree *em_tree;
1127 
1128 		em_tree = &fs_info->mapping_tree;
1129 		write_lock(&em_tree->lock);
1130 		remove_extent_mapping(em_tree, em);
1131 		write_unlock(&em_tree->lock);
1132 		/* once for the tree */
1133 		free_extent_map(em);
1134 	}
1135 
1136 out:
1137 	/* Once for the lookup reference */
1138 	btrfs_put_block_group(block_group);
1139 	if (remove_rsv)
1140 		btrfs_delayed_refs_rsv_release(fs_info, 1);
1141 	btrfs_free_path(path);
1142 	return ret;
1143 }
1144 
btrfs_start_trans_remove_block_group(struct btrfs_fs_info * fs_info,const u64 chunk_offset)1145 struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
1146 		struct btrfs_fs_info *fs_info, const u64 chunk_offset)
1147 {
1148 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
1149 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
1150 	struct extent_map *em;
1151 	struct map_lookup *map;
1152 	unsigned int num_items;
1153 
1154 	read_lock(&em_tree->lock);
1155 	em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1156 	read_unlock(&em_tree->lock);
1157 	ASSERT(em && em->start == chunk_offset);
1158 
1159 	/*
1160 	 * We need to reserve 3 + N units from the metadata space info in order
1161 	 * to remove a block group (done at btrfs_remove_chunk() and at
1162 	 * btrfs_remove_block_group()), which are used for:
1163 	 *
1164 	 * 1 unit for adding the free space inode's orphan (located in the tree
1165 	 * of tree roots).
1166 	 * 1 unit for deleting the block group item (located in the extent
1167 	 * tree).
1168 	 * 1 unit for deleting the free space item (located in tree of tree
1169 	 * roots).
1170 	 * N units for deleting N device extent items corresponding to each
1171 	 * stripe (located in the device tree).
1172 	 *
1173 	 * In order to remove a block group we also need to reserve units in the
1174 	 * system space info in order to update the chunk tree (update one or
1175 	 * more device items and remove one chunk item), but this is done at
1176 	 * btrfs_remove_chunk() through a call to check_system_chunk().
1177 	 */
1178 	map = em->map_lookup;
1179 	num_items = 3 + map->num_stripes;
1180 	free_extent_map(em);
1181 
1182 	return btrfs_start_transaction_fallback_global_rsv(root, num_items);
1183 }
1184 
1185 /*
1186  * Mark block group @cache read-only, so later write won't happen to block
1187  * group @cache.
1188  *
1189  * If @force is not set, this function will only mark the block group readonly
1190  * if we have enough free space (1M) in other metadata/system block groups.
1191  * If @force is not set, this function will mark the block group readonly
1192  * without checking free space.
1193  *
1194  * NOTE: This function doesn't care if other block groups can contain all the
1195  * data in this block group. That check should be done by relocation routine,
1196  * not this function.
1197  */
inc_block_group_ro(struct btrfs_block_group * cache,int force)1198 static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
1199 {
1200 	struct btrfs_space_info *sinfo = cache->space_info;
1201 	u64 num_bytes;
1202 	int ret = -ENOSPC;
1203 
1204 	spin_lock(&sinfo->lock);
1205 	spin_lock(&cache->lock);
1206 
1207 	if (cache->swap_extents) {
1208 		ret = -ETXTBSY;
1209 		goto out;
1210 	}
1211 
1212 	if (cache->ro) {
1213 		cache->ro++;
1214 		ret = 0;
1215 		goto out;
1216 	}
1217 
1218 	num_bytes = cache->length - cache->reserved - cache->pinned -
1219 		    cache->bytes_super - cache->zone_unusable - cache->used;
1220 
1221 	/*
1222 	 * Data never overcommits, even in mixed mode, so do just the straight
1223 	 * check of left over space in how much we have allocated.
1224 	 */
1225 	if (force) {
1226 		ret = 0;
1227 	} else if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA) {
1228 		u64 sinfo_used = btrfs_space_info_used(sinfo, true);
1229 
1230 		/*
1231 		 * Here we make sure if we mark this bg RO, we still have enough
1232 		 * free space as buffer.
1233 		 */
1234 		if (sinfo_used + num_bytes <= sinfo->total_bytes)
1235 			ret = 0;
1236 	} else {
1237 		/*
1238 		 * We overcommit metadata, so we need to do the
1239 		 * btrfs_can_overcommit check here, and we need to pass in
1240 		 * BTRFS_RESERVE_NO_FLUSH to give ourselves the most amount of
1241 		 * leeway to allow us to mark this block group as read only.
1242 		 */
1243 		if (btrfs_can_overcommit(cache->fs_info, sinfo, num_bytes,
1244 					 BTRFS_RESERVE_NO_FLUSH))
1245 			ret = 0;
1246 	}
1247 
1248 	if (!ret) {
1249 		sinfo->bytes_readonly += num_bytes;
1250 		if (btrfs_is_zoned(cache->fs_info)) {
1251 			/* Migrate zone_unusable bytes to readonly */
1252 			sinfo->bytes_readonly += cache->zone_unusable;
1253 			sinfo->bytes_zone_unusable -= cache->zone_unusable;
1254 			cache->zone_unusable = 0;
1255 		}
1256 		cache->ro++;
1257 		list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
1258 	}
1259 out:
1260 	spin_unlock(&cache->lock);
1261 	spin_unlock(&sinfo->lock);
1262 	if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) {
1263 		btrfs_info(cache->fs_info,
1264 			"unable to make block group %llu ro", cache->start);
1265 		btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0);
1266 	}
1267 	return ret;
1268 }
1269 
clean_pinned_extents(struct btrfs_trans_handle * trans,struct btrfs_block_group * bg)1270 static bool clean_pinned_extents(struct btrfs_trans_handle *trans,
1271 				 struct btrfs_block_group *bg)
1272 {
1273 	struct btrfs_fs_info *fs_info = bg->fs_info;
1274 	struct btrfs_transaction *prev_trans = NULL;
1275 	const u64 start = bg->start;
1276 	const u64 end = start + bg->length - 1;
1277 	int ret;
1278 
1279 	spin_lock(&fs_info->trans_lock);
1280 	if (trans->transaction->list.prev != &fs_info->trans_list) {
1281 		prev_trans = list_last_entry(&trans->transaction->list,
1282 					     struct btrfs_transaction, list);
1283 		refcount_inc(&prev_trans->use_count);
1284 	}
1285 	spin_unlock(&fs_info->trans_lock);
1286 
1287 	/*
1288 	 * Hold the unused_bg_unpin_mutex lock to avoid racing with
1289 	 * btrfs_finish_extent_commit(). If we are at transaction N, another
1290 	 * task might be running finish_extent_commit() for the previous
1291 	 * transaction N - 1, and have seen a range belonging to the block
1292 	 * group in pinned_extents before we were able to clear the whole block
1293 	 * group range from pinned_extents. This means that task can lookup for
1294 	 * the block group after we unpinned it from pinned_extents and removed
1295 	 * it, leading to a BUG_ON() at unpin_extent_range().
1296 	 */
1297 	mutex_lock(&fs_info->unused_bg_unpin_mutex);
1298 	if (prev_trans) {
1299 		ret = clear_extent_bits(&prev_trans->pinned_extents, start, end,
1300 					EXTENT_DIRTY);
1301 		if (ret)
1302 			goto out;
1303 	}
1304 
1305 	ret = clear_extent_bits(&trans->transaction->pinned_extents, start, end,
1306 				EXTENT_DIRTY);
1307 out:
1308 	mutex_unlock(&fs_info->unused_bg_unpin_mutex);
1309 	if (prev_trans)
1310 		btrfs_put_transaction(prev_trans);
1311 
1312 	return ret == 0;
1313 }
1314 
1315 /*
1316  * Process the unused_bgs list and remove any that don't have any allocated
1317  * space inside of them.
1318  */
btrfs_delete_unused_bgs(struct btrfs_fs_info * fs_info)1319 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
1320 {
1321 	LIST_HEAD(retry_list);
1322 	struct btrfs_block_group *block_group;
1323 	struct btrfs_space_info *space_info;
1324 	struct btrfs_trans_handle *trans;
1325 	const bool async_trim_enabled = btrfs_test_opt(fs_info, DISCARD_ASYNC);
1326 	int ret = 0;
1327 
1328 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1329 		return;
1330 
1331 	if (btrfs_fs_closing(fs_info))
1332 		return;
1333 
1334 	/*
1335 	 * Long running balances can keep us blocked here for eternity, so
1336 	 * simply skip deletion if we're unable to get the mutex.
1337 	 */
1338 	if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1339 		return;
1340 
1341 	spin_lock(&fs_info->unused_bgs_lock);
1342 	while (!list_empty(&fs_info->unused_bgs)) {
1343 		u64 used;
1344 		int trimming;
1345 
1346 		block_group = list_first_entry(&fs_info->unused_bgs,
1347 					       struct btrfs_block_group,
1348 					       bg_list);
1349 		list_del_init(&block_group->bg_list);
1350 
1351 		space_info = block_group->space_info;
1352 
1353 		if (ret || btrfs_mixed_space_info(space_info)) {
1354 			btrfs_put_block_group(block_group);
1355 			continue;
1356 		}
1357 		spin_unlock(&fs_info->unused_bgs_lock);
1358 
1359 		btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
1360 
1361 		/* Don't want to race with allocators so take the groups_sem */
1362 		down_write(&space_info->groups_sem);
1363 
1364 		/*
1365 		 * Async discard moves the final block group discard to be prior
1366 		 * to the unused_bgs code path.  Therefore, if it's not fully
1367 		 * trimmed, punt it back to the async discard lists.
1368 		 */
1369 		if (btrfs_test_opt(fs_info, DISCARD_ASYNC) &&
1370 		    !btrfs_is_free_space_trimmed(block_group)) {
1371 			trace_btrfs_skip_unused_block_group(block_group);
1372 			up_write(&space_info->groups_sem);
1373 			/* Requeue if we failed because of async discard */
1374 			btrfs_discard_queue_work(&fs_info->discard_ctl,
1375 						 block_group);
1376 			goto next;
1377 		}
1378 
1379 		spin_lock(&space_info->lock);
1380 		spin_lock(&block_group->lock);
1381 		if (btrfs_is_block_group_used(block_group) || block_group->ro ||
1382 		    list_is_singular(&block_group->list)) {
1383 			/*
1384 			 * We want to bail if we made new allocations or have
1385 			 * outstanding allocations in this block group.  We do
1386 			 * the ro check in case balance is currently acting on
1387 			 * this block group.
1388 			 */
1389 			trace_btrfs_skip_unused_block_group(block_group);
1390 			spin_unlock(&block_group->lock);
1391 			spin_unlock(&space_info->lock);
1392 			up_write(&space_info->groups_sem);
1393 			goto next;
1394 		}
1395 
1396 		/*
1397 		 * The block group may be unused but there may be space reserved
1398 		 * accounting with the existence of that block group, that is,
1399 		 * space_info->bytes_may_use was incremented by a task but no
1400 		 * space was yet allocated from the block group by the task.
1401 		 * That space may or may not be allocated, as we are generally
1402 		 * pessimistic about space reservation for metadata as well as
1403 		 * for data when using compression (as we reserve space based on
1404 		 * the worst case, when data can't be compressed, and before
1405 		 * actually attempting compression, before starting writeback).
1406 		 *
1407 		 * So check if the total space of the space_info minus the size
1408 		 * of this block group is less than the used space of the
1409 		 * space_info - if that's the case, then it means we have tasks
1410 		 * that might be relying on the block group in order to allocate
1411 		 * extents, and add back the block group to the unused list when
1412 		 * we finish, so that we retry later in case no tasks ended up
1413 		 * needing to allocate extents from the block group.
1414 		 */
1415 		used = btrfs_space_info_used(space_info, true);
1416 		if (space_info->total_bytes - block_group->length < used) {
1417 			/*
1418 			 * Add a reference for the list, compensate for the ref
1419 			 * drop under the "next" label for the
1420 			 * fs_info->unused_bgs list.
1421 			 */
1422 			btrfs_get_block_group(block_group);
1423 			list_add_tail(&block_group->bg_list, &retry_list);
1424 
1425 			trace_btrfs_skip_unused_block_group(block_group);
1426 			spin_unlock(&block_group->lock);
1427 			spin_unlock(&space_info->lock);
1428 			up_write(&space_info->groups_sem);
1429 			goto next;
1430 		}
1431 
1432 		spin_unlock(&block_group->lock);
1433 		spin_unlock(&space_info->lock);
1434 
1435 		/* We don't want to force the issue, only flip if it's ok. */
1436 		ret = inc_block_group_ro(block_group, 0);
1437 		up_write(&space_info->groups_sem);
1438 		if (ret < 0) {
1439 			ret = 0;
1440 			goto next;
1441 		}
1442 
1443 		ret = btrfs_zone_finish(block_group);
1444 		if (ret < 0) {
1445 			btrfs_dec_block_group_ro(block_group);
1446 			if (ret == -EAGAIN)
1447 				ret = 0;
1448 			goto next;
1449 		}
1450 
1451 		/*
1452 		 * Want to do this before we do anything else so we can recover
1453 		 * properly if we fail to join the transaction.
1454 		 */
1455 		trans = btrfs_start_trans_remove_block_group(fs_info,
1456 						     block_group->start);
1457 		if (IS_ERR(trans)) {
1458 			btrfs_dec_block_group_ro(block_group);
1459 			ret = PTR_ERR(trans);
1460 			goto next;
1461 		}
1462 
1463 		/*
1464 		 * We could have pending pinned extents for this block group,
1465 		 * just delete them, we don't care about them anymore.
1466 		 */
1467 		if (!clean_pinned_extents(trans, block_group)) {
1468 			btrfs_dec_block_group_ro(block_group);
1469 			goto end_trans;
1470 		}
1471 
1472 		/*
1473 		 * At this point, the block_group is read only and should fail
1474 		 * new allocations.  However, btrfs_finish_extent_commit() can
1475 		 * cause this block_group to be placed back on the discard
1476 		 * lists because now the block_group isn't fully discarded.
1477 		 * Bail here and try again later after discarding everything.
1478 		 */
1479 		spin_lock(&fs_info->discard_ctl.lock);
1480 		if (!list_empty(&block_group->discard_list)) {
1481 			spin_unlock(&fs_info->discard_ctl.lock);
1482 			btrfs_dec_block_group_ro(block_group);
1483 			btrfs_discard_queue_work(&fs_info->discard_ctl,
1484 						 block_group);
1485 			goto end_trans;
1486 		}
1487 		spin_unlock(&fs_info->discard_ctl.lock);
1488 
1489 		/* Reset pinned so btrfs_put_block_group doesn't complain */
1490 		spin_lock(&space_info->lock);
1491 		spin_lock(&block_group->lock);
1492 
1493 		btrfs_space_info_update_bytes_pinned(fs_info, space_info,
1494 						     -block_group->pinned);
1495 		space_info->bytes_readonly += block_group->pinned;
1496 		block_group->pinned = 0;
1497 
1498 		spin_unlock(&block_group->lock);
1499 		spin_unlock(&space_info->lock);
1500 
1501 		/*
1502 		 * The normal path here is an unused block group is passed here,
1503 		 * then trimming is handled in the transaction commit path.
1504 		 * Async discard interposes before this to do the trimming
1505 		 * before coming down the unused block group path as trimming
1506 		 * will no longer be done later in the transaction commit path.
1507 		 */
1508 		if (!async_trim_enabled && btrfs_test_opt(fs_info, DISCARD_ASYNC))
1509 			goto flip_async;
1510 
1511 		/*
1512 		 * DISCARD can flip during remount. On zoned filesystems, we
1513 		 * need to reset sequential-required zones.
1514 		 */
1515 		trimming = btrfs_test_opt(fs_info, DISCARD_SYNC) ||
1516 				btrfs_is_zoned(fs_info);
1517 
1518 		/* Implicit trim during transaction commit. */
1519 		if (trimming)
1520 			btrfs_freeze_block_group(block_group);
1521 
1522 		/*
1523 		 * Btrfs_remove_chunk will abort the transaction if things go
1524 		 * horribly wrong.
1525 		 */
1526 		ret = btrfs_remove_chunk(trans, block_group->start);
1527 
1528 		if (ret) {
1529 			if (trimming)
1530 				btrfs_unfreeze_block_group(block_group);
1531 			goto end_trans;
1532 		}
1533 
1534 		/*
1535 		 * If we're not mounted with -odiscard, we can just forget
1536 		 * about this block group. Otherwise we'll need to wait
1537 		 * until transaction commit to do the actual discard.
1538 		 */
1539 		if (trimming) {
1540 			spin_lock(&fs_info->unused_bgs_lock);
1541 			/*
1542 			 * A concurrent scrub might have added us to the list
1543 			 * fs_info->unused_bgs, so use a list_move operation
1544 			 * to add the block group to the deleted_bgs list.
1545 			 */
1546 			list_move(&block_group->bg_list,
1547 				  &trans->transaction->deleted_bgs);
1548 			spin_unlock(&fs_info->unused_bgs_lock);
1549 			btrfs_get_block_group(block_group);
1550 		}
1551 end_trans:
1552 		btrfs_end_transaction(trans);
1553 next:
1554 		btrfs_put_block_group(block_group);
1555 		spin_lock(&fs_info->unused_bgs_lock);
1556 	}
1557 	list_splice_tail(&retry_list, &fs_info->unused_bgs);
1558 	spin_unlock(&fs_info->unused_bgs_lock);
1559 	mutex_unlock(&fs_info->reclaim_bgs_lock);
1560 	return;
1561 
1562 flip_async:
1563 	btrfs_end_transaction(trans);
1564 	spin_lock(&fs_info->unused_bgs_lock);
1565 	list_splice_tail(&retry_list, &fs_info->unused_bgs);
1566 	spin_unlock(&fs_info->unused_bgs_lock);
1567 	mutex_unlock(&fs_info->reclaim_bgs_lock);
1568 	btrfs_put_block_group(block_group);
1569 	btrfs_discard_punt_unused_bgs_list(fs_info);
1570 }
1571 
btrfs_mark_bg_unused(struct btrfs_block_group * bg)1572 void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
1573 {
1574 	struct btrfs_fs_info *fs_info = bg->fs_info;
1575 
1576 	spin_lock(&fs_info->unused_bgs_lock);
1577 	if (list_empty(&bg->bg_list)) {
1578 		btrfs_get_block_group(bg);
1579 		trace_btrfs_add_unused_block_group(bg);
1580 		list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
1581 	} else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
1582 		/* Pull out the block group from the reclaim_bgs list. */
1583 		trace_btrfs_add_unused_block_group(bg);
1584 		list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
1585 	}
1586 	spin_unlock(&fs_info->unused_bgs_lock);
1587 }
1588 
1589 /*
1590  * We want block groups with a low number of used bytes to be in the beginning
1591  * of the list, so they will get reclaimed first.
1592  */
reclaim_bgs_cmp(void * unused,const struct list_head * a,const struct list_head * b)1593 static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
1594 			   const struct list_head *b)
1595 {
1596 	const struct btrfs_block_group *bg1, *bg2;
1597 
1598 	bg1 = list_entry(a, struct btrfs_block_group, bg_list);
1599 	bg2 = list_entry(b, struct btrfs_block_group, bg_list);
1600 
1601 	return bg1->used > bg2->used;
1602 }
1603 
btrfs_should_reclaim(struct btrfs_fs_info * fs_info)1604 static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
1605 {
1606 	if (btrfs_is_zoned(fs_info))
1607 		return btrfs_zoned_should_reclaim(fs_info);
1608 	return true;
1609 }
1610 
btrfs_reclaim_bgs_work(struct work_struct * work)1611 void btrfs_reclaim_bgs_work(struct work_struct *work)
1612 {
1613 	struct btrfs_fs_info *fs_info =
1614 		container_of(work, struct btrfs_fs_info, reclaim_bgs_work);
1615 	struct btrfs_block_group *bg;
1616 	struct btrfs_space_info *space_info;
1617 
1618 	if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
1619 		return;
1620 
1621 	if (btrfs_fs_closing(fs_info))
1622 		return;
1623 
1624 	if (!btrfs_should_reclaim(fs_info))
1625 		return;
1626 
1627 	sb_start_write(fs_info->sb);
1628 
1629 	if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
1630 		sb_end_write(fs_info->sb);
1631 		return;
1632 	}
1633 
1634 	/*
1635 	 * Long running balances can keep us blocked here for eternity, so
1636 	 * simply skip reclaim if we're unable to get the mutex.
1637 	 */
1638 	if (!mutex_trylock(&fs_info->reclaim_bgs_lock)) {
1639 		btrfs_exclop_finish(fs_info);
1640 		sb_end_write(fs_info->sb);
1641 		return;
1642 	}
1643 
1644 	spin_lock(&fs_info->unused_bgs_lock);
1645 	/*
1646 	 * Sort happens under lock because we can't simply splice it and sort.
1647 	 * The block groups might still be in use and reachable via bg_list,
1648 	 * and their presence in the reclaim_bgs list must be preserved.
1649 	 */
1650 	list_sort(NULL, &fs_info->reclaim_bgs, reclaim_bgs_cmp);
1651 	while (!list_empty(&fs_info->reclaim_bgs)) {
1652 		u64 zone_unusable;
1653 		int ret = 0;
1654 
1655 		bg = list_first_entry(&fs_info->reclaim_bgs,
1656 				      struct btrfs_block_group,
1657 				      bg_list);
1658 		list_del_init(&bg->bg_list);
1659 
1660 		space_info = bg->space_info;
1661 		spin_unlock(&fs_info->unused_bgs_lock);
1662 
1663 		/* Don't race with allocators so take the groups_sem */
1664 		down_write(&space_info->groups_sem);
1665 
1666 		spin_lock(&bg->lock);
1667 		if (bg->reserved || bg->pinned || bg->ro) {
1668 			/*
1669 			 * We want to bail if we made new allocations or have
1670 			 * outstanding allocations in this block group.  We do
1671 			 * the ro check in case balance is currently acting on
1672 			 * this block group.
1673 			 */
1674 			spin_unlock(&bg->lock);
1675 			up_write(&space_info->groups_sem);
1676 			goto next;
1677 		}
1678 		spin_unlock(&bg->lock);
1679 
1680 		/*
1681 		 * Get out fast, in case we're read-only or unmounting the
1682 		 * filesystem. It is OK to drop block groups from the list even
1683 		 * for the read-only case. As we did sb_start_write(),
1684 		 * "mount -o remount,ro" won't happen and read-only filesystem
1685 		 * means it is forced read-only due to a fatal error. So, it
1686 		 * never gets back to read-write to let us reclaim again.
1687 		 */
1688 		if (btrfs_need_cleaner_sleep(fs_info)) {
1689 			up_write(&space_info->groups_sem);
1690 			goto next;
1691 		}
1692 
1693 		/*
1694 		 * Cache the zone_unusable value before turning the block group
1695 		 * to read only. As soon as the blog group is read only it's
1696 		 * zone_unusable value gets moved to the block group's read-only
1697 		 * bytes and isn't available for calculations anymore.
1698 		 */
1699 		zone_unusable = bg->zone_unusable;
1700 		ret = inc_block_group_ro(bg, 0);
1701 		up_write(&space_info->groups_sem);
1702 		if (ret < 0)
1703 			goto next;
1704 
1705 		btrfs_info(fs_info,
1706 			"reclaiming chunk %llu with %llu%% used %llu%% unusable",
1707 				bg->start,
1708 				div64_u64(bg->used * 100, bg->length),
1709 				div64_u64(zone_unusable * 100, bg->length));
1710 		trace_btrfs_reclaim_block_group(bg);
1711 		ret = btrfs_relocate_chunk(fs_info, bg->start);
1712 		if (ret) {
1713 			btrfs_dec_block_group_ro(bg);
1714 			btrfs_err(fs_info, "error relocating chunk %llu",
1715 				  bg->start);
1716 		}
1717 
1718 next:
1719 		if (ret)
1720 			btrfs_mark_bg_to_reclaim(bg);
1721 		btrfs_put_block_group(bg);
1722 
1723 		mutex_unlock(&fs_info->reclaim_bgs_lock);
1724 		/*
1725 		 * Reclaiming all the block groups in the list can take really
1726 		 * long.  Prioritize cleaning up unused block groups.
1727 		 */
1728 		btrfs_delete_unused_bgs(fs_info);
1729 		/*
1730 		 * If we are interrupted by a balance, we can just bail out. The
1731 		 * cleaner thread restart again if necessary.
1732 		 */
1733 		if (!mutex_trylock(&fs_info->reclaim_bgs_lock))
1734 			goto end;
1735 		spin_lock(&fs_info->unused_bgs_lock);
1736 	}
1737 	spin_unlock(&fs_info->unused_bgs_lock);
1738 	mutex_unlock(&fs_info->reclaim_bgs_lock);
1739 end:
1740 	btrfs_exclop_finish(fs_info);
1741 	sb_end_write(fs_info->sb);
1742 }
1743 
btrfs_reclaim_bgs(struct btrfs_fs_info * fs_info)1744 void btrfs_reclaim_bgs(struct btrfs_fs_info *fs_info)
1745 {
1746 	spin_lock(&fs_info->unused_bgs_lock);
1747 	if (!list_empty(&fs_info->reclaim_bgs))
1748 		queue_work(system_unbound_wq, &fs_info->reclaim_bgs_work);
1749 	spin_unlock(&fs_info->unused_bgs_lock);
1750 }
1751 
btrfs_mark_bg_to_reclaim(struct btrfs_block_group * bg)1752 void btrfs_mark_bg_to_reclaim(struct btrfs_block_group *bg)
1753 {
1754 	struct btrfs_fs_info *fs_info = bg->fs_info;
1755 
1756 	spin_lock(&fs_info->unused_bgs_lock);
1757 	if (list_empty(&bg->bg_list)) {
1758 		btrfs_get_block_group(bg);
1759 		trace_btrfs_add_reclaim_block_group(bg);
1760 		list_add_tail(&bg->bg_list, &fs_info->reclaim_bgs);
1761 	}
1762 	spin_unlock(&fs_info->unused_bgs_lock);
1763 }
1764 
read_bg_from_eb(struct btrfs_fs_info * fs_info,struct btrfs_key * key,struct btrfs_path * path)1765 static int read_bg_from_eb(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
1766 			   struct btrfs_path *path)
1767 {
1768 	struct extent_map_tree *em_tree;
1769 	struct extent_map *em;
1770 	struct btrfs_block_group_item bg;
1771 	struct extent_buffer *leaf;
1772 	int slot;
1773 	u64 flags;
1774 	int ret = 0;
1775 
1776 	slot = path->slots[0];
1777 	leaf = path->nodes[0];
1778 
1779 	em_tree = &fs_info->mapping_tree;
1780 	read_lock(&em_tree->lock);
1781 	em = lookup_extent_mapping(em_tree, key->objectid, key->offset);
1782 	read_unlock(&em_tree->lock);
1783 	if (!em) {
1784 		btrfs_err(fs_info,
1785 			  "logical %llu len %llu found bg but no related chunk",
1786 			  key->objectid, key->offset);
1787 		return -ENOENT;
1788 	}
1789 
1790 	if (em->start != key->objectid || em->len != key->offset) {
1791 		btrfs_err(fs_info,
1792 			"block group %llu len %llu mismatch with chunk %llu len %llu",
1793 			key->objectid, key->offset, em->start, em->len);
1794 		ret = -EUCLEAN;
1795 		goto out_free_em;
1796 	}
1797 
1798 	read_extent_buffer(leaf, &bg, btrfs_item_ptr_offset(leaf, slot),
1799 			   sizeof(bg));
1800 	flags = btrfs_stack_block_group_flags(&bg) &
1801 		BTRFS_BLOCK_GROUP_TYPE_MASK;
1802 
1803 	if (flags != (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
1804 		btrfs_err(fs_info,
1805 "block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
1806 			  key->objectid, key->offset, flags,
1807 			  (BTRFS_BLOCK_GROUP_TYPE_MASK & em->map_lookup->type));
1808 		ret = -EUCLEAN;
1809 	}
1810 
1811 out_free_em:
1812 	free_extent_map(em);
1813 	return ret;
1814 }
1815 
find_first_block_group(struct btrfs_fs_info * fs_info,struct btrfs_path * path,struct btrfs_key * key)1816 static int find_first_block_group(struct btrfs_fs_info *fs_info,
1817 				  struct btrfs_path *path,
1818 				  struct btrfs_key *key)
1819 {
1820 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
1821 	int ret;
1822 	struct btrfs_key found_key;
1823 
1824 	btrfs_for_each_slot(root, key, &found_key, path, ret) {
1825 		if (found_key.objectid >= key->objectid &&
1826 		    found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
1827 			return read_bg_from_eb(fs_info, &found_key, path);
1828 		}
1829 	}
1830 	return ret;
1831 }
1832 
set_avail_alloc_bits(struct btrfs_fs_info * fs_info,u64 flags)1833 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1834 {
1835 	u64 extra_flags = chunk_to_extended(flags) &
1836 				BTRFS_EXTENDED_PROFILE_MASK;
1837 
1838 	write_seqlock(&fs_info->profiles_lock);
1839 	if (flags & BTRFS_BLOCK_GROUP_DATA)
1840 		fs_info->avail_data_alloc_bits |= extra_flags;
1841 	if (flags & BTRFS_BLOCK_GROUP_METADATA)
1842 		fs_info->avail_metadata_alloc_bits |= extra_flags;
1843 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1844 		fs_info->avail_system_alloc_bits |= extra_flags;
1845 	write_sequnlock(&fs_info->profiles_lock);
1846 }
1847 
1848 /**
1849  * Map a physical disk address to a list of logical addresses
1850  *
1851  * @fs_info:       the filesystem
1852  * @chunk_start:   logical address of block group
1853  * @bdev:	   physical device to resolve, can be NULL to indicate any device
1854  * @physical:	   physical address to map to logical addresses
1855  * @logical:	   return array of logical addresses which map to @physical
1856  * @naddrs:	   length of @logical
1857  * @stripe_len:    size of IO stripe for the given block group
1858  *
1859  * Maps a particular @physical disk address to a list of @logical addresses.
1860  * Used primarily to exclude those portions of a block group that contain super
1861  * block copies.
1862  */
btrfs_rmap_block(struct btrfs_fs_info * fs_info,u64 chunk_start,struct block_device * bdev,u64 physical,u64 ** logical,int * naddrs,int * stripe_len)1863 int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
1864 		     struct block_device *bdev, u64 physical, u64 **logical,
1865 		     int *naddrs, int *stripe_len)
1866 {
1867 	struct extent_map *em;
1868 	struct map_lookup *map;
1869 	u64 *buf;
1870 	u64 bytenr;
1871 	u64 data_stripe_length;
1872 	u64 io_stripe_size;
1873 	int i, nr = 0;
1874 	int ret = 0;
1875 
1876 	em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
1877 	if (IS_ERR(em))
1878 		return -EIO;
1879 
1880 	map = em->map_lookup;
1881 	data_stripe_length = em->orig_block_len;
1882 	io_stripe_size = map->stripe_len;
1883 	chunk_start = em->start;
1884 
1885 	/* For RAID5/6 adjust to a full IO stripe length */
1886 	if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
1887 		io_stripe_size = map->stripe_len * nr_data_stripes(map);
1888 
1889 	buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
1890 	if (!buf) {
1891 		ret = -ENOMEM;
1892 		goto out;
1893 	}
1894 
1895 	for (i = 0; i < map->num_stripes; i++) {
1896 		bool already_inserted = false;
1897 		u64 stripe_nr;
1898 		u64 offset;
1899 		int j;
1900 
1901 		if (!in_range(physical, map->stripes[i].physical,
1902 			      data_stripe_length))
1903 			continue;
1904 
1905 		if (bdev && map->stripes[i].dev->bdev != bdev)
1906 			continue;
1907 
1908 		stripe_nr = physical - map->stripes[i].physical;
1909 		stripe_nr = div64_u64_rem(stripe_nr, map->stripe_len, &offset);
1910 
1911 		if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
1912 				 BTRFS_BLOCK_GROUP_RAID10)) {
1913 			stripe_nr = stripe_nr * map->num_stripes + i;
1914 			stripe_nr = div_u64(stripe_nr, map->sub_stripes);
1915 		}
1916 		/*
1917 		 * The remaining case would be for RAID56, multiply by
1918 		 * nr_data_stripes().  Alternatively, just use rmap_len below
1919 		 * instead of map->stripe_len
1920 		 */
1921 
1922 		bytenr = chunk_start + stripe_nr * io_stripe_size + offset;
1923 
1924 		/* Ensure we don't add duplicate addresses */
1925 		for (j = 0; j < nr; j++) {
1926 			if (buf[j] == bytenr) {
1927 				already_inserted = true;
1928 				break;
1929 			}
1930 		}
1931 
1932 		if (!already_inserted)
1933 			buf[nr++] = bytenr;
1934 	}
1935 
1936 	*logical = buf;
1937 	*naddrs = nr;
1938 	*stripe_len = io_stripe_size;
1939 out:
1940 	free_extent_map(em);
1941 	return ret;
1942 }
1943 
exclude_super_stripes(struct btrfs_block_group * cache)1944 static int exclude_super_stripes(struct btrfs_block_group *cache)
1945 {
1946 	struct btrfs_fs_info *fs_info = cache->fs_info;
1947 	const bool zoned = btrfs_is_zoned(fs_info);
1948 	u64 bytenr;
1949 	u64 *logical;
1950 	int stripe_len;
1951 	int i, nr, ret;
1952 
1953 	if (cache->start < BTRFS_SUPER_INFO_OFFSET) {
1954 		stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->start;
1955 		cache->bytes_super += stripe_len;
1956 		ret = btrfs_add_excluded_extent(fs_info, cache->start,
1957 						stripe_len);
1958 		if (ret)
1959 			return ret;
1960 	}
1961 
1962 	for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
1963 		bytenr = btrfs_sb_offset(i);
1964 		ret = btrfs_rmap_block(fs_info, cache->start, NULL,
1965 				       bytenr, &logical, &nr, &stripe_len);
1966 		if (ret)
1967 			return ret;
1968 
1969 		/* Shouldn't have super stripes in sequential zones */
1970 		if (zoned && nr) {
1971 			kfree(logical);
1972 			btrfs_err(fs_info,
1973 			"zoned: block group %llu must not contain super block",
1974 				  cache->start);
1975 			return -EUCLEAN;
1976 		}
1977 
1978 		while (nr--) {
1979 			u64 len = min_t(u64, stripe_len,
1980 				cache->start + cache->length - logical[nr]);
1981 
1982 			cache->bytes_super += len;
1983 			ret = btrfs_add_excluded_extent(fs_info, logical[nr],
1984 							len);
1985 			if (ret) {
1986 				kfree(logical);
1987 				return ret;
1988 			}
1989 		}
1990 
1991 		kfree(logical);
1992 	}
1993 	return 0;
1994 }
1995 
btrfs_create_block_group_cache(struct btrfs_fs_info * fs_info,u64 start)1996 static struct btrfs_block_group *btrfs_create_block_group_cache(
1997 		struct btrfs_fs_info *fs_info, u64 start)
1998 {
1999 	struct btrfs_block_group *cache;
2000 
2001 	cache = kzalloc(sizeof(*cache), GFP_NOFS);
2002 	if (!cache)
2003 		return NULL;
2004 
2005 	cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
2006 					GFP_NOFS);
2007 	if (!cache->free_space_ctl) {
2008 		kfree(cache);
2009 		return NULL;
2010 	}
2011 
2012 	cache->start = start;
2013 
2014 	cache->fs_info = fs_info;
2015 	cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start);
2016 
2017 	cache->discard_index = BTRFS_DISCARD_INDEX_UNUSED;
2018 
2019 	refcount_set(&cache->refs, 1);
2020 	spin_lock_init(&cache->lock);
2021 	init_rwsem(&cache->data_rwsem);
2022 	INIT_LIST_HEAD(&cache->list);
2023 	INIT_LIST_HEAD(&cache->cluster_list);
2024 	INIT_LIST_HEAD(&cache->bg_list);
2025 	INIT_LIST_HEAD(&cache->ro_list);
2026 	INIT_LIST_HEAD(&cache->discard_list);
2027 	INIT_LIST_HEAD(&cache->dirty_list);
2028 	INIT_LIST_HEAD(&cache->io_list);
2029 	INIT_LIST_HEAD(&cache->active_bg_list);
2030 	btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
2031 	atomic_set(&cache->frozen, 0);
2032 	mutex_init(&cache->free_space_lock);
2033 	cache->full_stripe_locks_root.root = RB_ROOT;
2034 	mutex_init(&cache->full_stripe_locks_root.lock);
2035 
2036 	return cache;
2037 }
2038 
2039 /*
2040  * Iterate all chunks and verify that each of them has the corresponding block
2041  * group
2042  */
check_chunk_block_group_mappings(struct btrfs_fs_info * fs_info)2043 static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
2044 {
2045 	struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2046 	struct extent_map *em;
2047 	struct btrfs_block_group *bg;
2048 	u64 start = 0;
2049 	int ret = 0;
2050 
2051 	while (1) {
2052 		read_lock(&map_tree->lock);
2053 		/*
2054 		 * lookup_extent_mapping will return the first extent map
2055 		 * intersecting the range, so setting @len to 1 is enough to
2056 		 * get the first chunk.
2057 		 */
2058 		em = lookup_extent_mapping(map_tree, start, 1);
2059 		read_unlock(&map_tree->lock);
2060 		if (!em)
2061 			break;
2062 
2063 		bg = btrfs_lookup_block_group(fs_info, em->start);
2064 		if (!bg) {
2065 			btrfs_err(fs_info,
2066 	"chunk start=%llu len=%llu doesn't have corresponding block group",
2067 				     em->start, em->len);
2068 			ret = -EUCLEAN;
2069 			free_extent_map(em);
2070 			break;
2071 		}
2072 		if (bg->start != em->start || bg->length != em->len ||
2073 		    (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
2074 		    (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
2075 			btrfs_err(fs_info,
2076 "chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
2077 				em->start, em->len,
2078 				em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
2079 				bg->start, bg->length,
2080 				bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
2081 			ret = -EUCLEAN;
2082 			free_extent_map(em);
2083 			btrfs_put_block_group(bg);
2084 			break;
2085 		}
2086 		start = em->start + em->len;
2087 		free_extent_map(em);
2088 		btrfs_put_block_group(bg);
2089 	}
2090 	return ret;
2091 }
2092 
read_one_block_group(struct btrfs_fs_info * info,struct btrfs_block_group_item * bgi,const struct btrfs_key * key,int need_clear)2093 static int read_one_block_group(struct btrfs_fs_info *info,
2094 				struct btrfs_block_group_item *bgi,
2095 				const struct btrfs_key *key,
2096 				int need_clear)
2097 {
2098 	struct btrfs_block_group *cache;
2099 	const bool mixed = btrfs_fs_incompat(info, MIXED_GROUPS);
2100 	int ret;
2101 
2102 	ASSERT(key->type == BTRFS_BLOCK_GROUP_ITEM_KEY);
2103 
2104 	cache = btrfs_create_block_group_cache(info, key->objectid);
2105 	if (!cache)
2106 		return -ENOMEM;
2107 
2108 	cache->length = key->offset;
2109 	cache->used = btrfs_stack_block_group_used(bgi);
2110 	cache->flags = btrfs_stack_block_group_flags(bgi);
2111 	cache->global_root_id = btrfs_stack_block_group_chunk_objectid(bgi);
2112 
2113 	set_free_space_tree_thresholds(cache);
2114 
2115 	if (need_clear) {
2116 		/*
2117 		 * When we mount with old space cache, we need to
2118 		 * set BTRFS_DC_CLEAR and set dirty flag.
2119 		 *
2120 		 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
2121 		 *    truncate the old free space cache inode and
2122 		 *    setup a new one.
2123 		 * b) Setting 'dirty flag' makes sure that we flush
2124 		 *    the new space cache info onto disk.
2125 		 */
2126 		if (btrfs_test_opt(info, SPACE_CACHE))
2127 			cache->disk_cache_state = BTRFS_DC_CLEAR;
2128 	}
2129 	if (!mixed && ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) &&
2130 	    (cache->flags & BTRFS_BLOCK_GROUP_DATA))) {
2131 			btrfs_err(info,
2132 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
2133 				  cache->start);
2134 			ret = -EINVAL;
2135 			goto error;
2136 	}
2137 
2138 	ret = btrfs_load_block_group_zone_info(cache, false);
2139 	if (ret) {
2140 		btrfs_err(info, "zoned: failed to load zone info of bg %llu",
2141 			  cache->start);
2142 		goto error;
2143 	}
2144 
2145 	/*
2146 	 * We need to exclude the super stripes now so that the space info has
2147 	 * super bytes accounted for, otherwise we'll think we have more space
2148 	 * than we actually do.
2149 	 */
2150 	ret = exclude_super_stripes(cache);
2151 	if (ret) {
2152 		/* We may have excluded something, so call this just in case. */
2153 		btrfs_free_excluded_extents(cache);
2154 		goto error;
2155 	}
2156 
2157 	/*
2158 	 * For zoned filesystem, space after the allocation offset is the only
2159 	 * free space for a block group. So, we don't need any caching work.
2160 	 * btrfs_calc_zone_unusable() will set the amount of free space and
2161 	 * zone_unusable space.
2162 	 *
2163 	 * For regular filesystem, check for two cases, either we are full, and
2164 	 * therefore don't need to bother with the caching work since we won't
2165 	 * find any space, or we are empty, and we can just add all the space
2166 	 * in and be done with it.  This saves us _a_lot_ of time, particularly
2167 	 * in the full case.
2168 	 */
2169 	if (btrfs_is_zoned(info)) {
2170 		btrfs_calc_zone_unusable(cache);
2171 		/* Should not have any excluded extents. Just in case, though. */
2172 		btrfs_free_excluded_extents(cache);
2173 	} else if (cache->length == cache->used) {
2174 		cache->cached = BTRFS_CACHE_FINISHED;
2175 		btrfs_free_excluded_extents(cache);
2176 	} else if (cache->used == 0) {
2177 		cache->cached = BTRFS_CACHE_FINISHED;
2178 		ret = add_new_free_space(cache, cache->start,
2179 					 cache->start + cache->length, NULL);
2180 		btrfs_free_excluded_extents(cache);
2181 		if (ret)
2182 			goto error;
2183 	}
2184 
2185 	ret = btrfs_add_block_group_cache(info, cache);
2186 	if (ret) {
2187 		btrfs_remove_free_space_cache(cache);
2188 		goto error;
2189 	}
2190 	trace_btrfs_add_block_group(info, cache, 0);
2191 	btrfs_add_bg_to_space_info(info, cache);
2192 
2193 	set_avail_alloc_bits(info, cache->flags);
2194 	if (btrfs_chunk_writeable(info, cache->start)) {
2195 		if (cache->used == 0) {
2196 			ASSERT(list_empty(&cache->bg_list));
2197 			if (btrfs_test_opt(info, DISCARD_ASYNC))
2198 				btrfs_discard_queue_work(&info->discard_ctl, cache);
2199 			else
2200 				btrfs_mark_bg_unused(cache);
2201 		}
2202 	} else {
2203 		inc_block_group_ro(cache, 1);
2204 	}
2205 
2206 	return 0;
2207 error:
2208 	btrfs_put_block_group(cache);
2209 	return ret;
2210 }
2211 
fill_dummy_bgs(struct btrfs_fs_info * fs_info)2212 static int fill_dummy_bgs(struct btrfs_fs_info *fs_info)
2213 {
2214 	struct extent_map_tree *em_tree = &fs_info->mapping_tree;
2215 	struct rb_node *node;
2216 	int ret = 0;
2217 
2218 	for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
2219 		struct extent_map *em;
2220 		struct map_lookup *map;
2221 		struct btrfs_block_group *bg;
2222 
2223 		em = rb_entry(node, struct extent_map, rb_node);
2224 		map = em->map_lookup;
2225 		bg = btrfs_create_block_group_cache(fs_info, em->start);
2226 		if (!bg) {
2227 			ret = -ENOMEM;
2228 			break;
2229 		}
2230 
2231 		/* Fill dummy cache as FULL */
2232 		bg->length = em->len;
2233 		bg->flags = map->type;
2234 		bg->cached = BTRFS_CACHE_FINISHED;
2235 		bg->used = em->len;
2236 		bg->flags = map->type;
2237 		ret = btrfs_add_block_group_cache(fs_info, bg);
2238 		/*
2239 		 * We may have some valid block group cache added already, in
2240 		 * that case we skip to the next one.
2241 		 */
2242 		if (ret == -EEXIST) {
2243 			ret = 0;
2244 			btrfs_put_block_group(bg);
2245 			continue;
2246 		}
2247 
2248 		if (ret) {
2249 			btrfs_remove_free_space_cache(bg);
2250 			btrfs_put_block_group(bg);
2251 			break;
2252 		}
2253 
2254 		btrfs_add_bg_to_space_info(fs_info, bg);
2255 
2256 		set_avail_alloc_bits(fs_info, bg->flags);
2257 	}
2258 	if (!ret)
2259 		btrfs_init_global_block_rsv(fs_info);
2260 	return ret;
2261 }
2262 
btrfs_read_block_groups(struct btrfs_fs_info * info)2263 int btrfs_read_block_groups(struct btrfs_fs_info *info)
2264 {
2265 	struct btrfs_root *root = btrfs_block_group_root(info);
2266 	struct btrfs_path *path;
2267 	int ret;
2268 	struct btrfs_block_group *cache;
2269 	struct btrfs_space_info *space_info;
2270 	struct btrfs_key key;
2271 	int need_clear = 0;
2272 	u64 cache_gen;
2273 
2274 	/*
2275 	 * Either no extent root (with ibadroots rescue option) or we have
2276 	 * unsupported RO options. The fs can never be mounted read-write, so no
2277 	 * need to waste time searching block group items.
2278 	 *
2279 	 * This also allows new extent tree related changes to be RO compat,
2280 	 * no need for a full incompat flag.
2281 	 */
2282 	if (!root || (btrfs_super_compat_ro_flags(info->super_copy) &
2283 		      ~BTRFS_FEATURE_COMPAT_RO_SUPP))
2284 		return fill_dummy_bgs(info);
2285 
2286 	key.objectid = 0;
2287 	key.offset = 0;
2288 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2289 	path = btrfs_alloc_path();
2290 	if (!path)
2291 		return -ENOMEM;
2292 
2293 	cache_gen = btrfs_super_cache_generation(info->super_copy);
2294 	if (btrfs_test_opt(info, SPACE_CACHE) &&
2295 	    btrfs_super_generation(info->super_copy) != cache_gen)
2296 		need_clear = 1;
2297 	if (btrfs_test_opt(info, CLEAR_CACHE))
2298 		need_clear = 1;
2299 
2300 	while (1) {
2301 		struct btrfs_block_group_item bgi;
2302 		struct extent_buffer *leaf;
2303 		int slot;
2304 
2305 		ret = find_first_block_group(info, path, &key);
2306 		if (ret > 0)
2307 			break;
2308 		if (ret != 0)
2309 			goto error;
2310 
2311 		leaf = path->nodes[0];
2312 		slot = path->slots[0];
2313 
2314 		read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
2315 				   sizeof(bgi));
2316 
2317 		btrfs_item_key_to_cpu(leaf, &key, slot);
2318 		btrfs_release_path(path);
2319 		ret = read_one_block_group(info, &bgi, &key, need_clear);
2320 		if (ret < 0)
2321 			goto error;
2322 		key.objectid += key.offset;
2323 		key.offset = 0;
2324 	}
2325 	btrfs_release_path(path);
2326 
2327 	list_for_each_entry(space_info, &info->space_info, list) {
2328 		int i;
2329 
2330 		for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
2331 			if (list_empty(&space_info->block_groups[i]))
2332 				continue;
2333 			cache = list_first_entry(&space_info->block_groups[i],
2334 						 struct btrfs_block_group,
2335 						 list);
2336 			btrfs_sysfs_add_block_group_type(cache);
2337 		}
2338 
2339 		if (!(btrfs_get_alloc_profile(info, space_info->flags) &
2340 		      (BTRFS_BLOCK_GROUP_RAID10 |
2341 		       BTRFS_BLOCK_GROUP_RAID1_MASK |
2342 		       BTRFS_BLOCK_GROUP_RAID56_MASK |
2343 		       BTRFS_BLOCK_GROUP_DUP)))
2344 			continue;
2345 		/*
2346 		 * Avoid allocating from un-mirrored block group if there are
2347 		 * mirrored block groups.
2348 		 */
2349 		list_for_each_entry(cache,
2350 				&space_info->block_groups[BTRFS_RAID_RAID0],
2351 				list)
2352 			inc_block_group_ro(cache, 1);
2353 		list_for_each_entry(cache,
2354 				&space_info->block_groups[BTRFS_RAID_SINGLE],
2355 				list)
2356 			inc_block_group_ro(cache, 1);
2357 	}
2358 
2359 	btrfs_init_global_block_rsv(info);
2360 	ret = check_chunk_block_group_mappings(info);
2361 error:
2362 	btrfs_free_path(path);
2363 	/*
2364 	 * We've hit some error while reading the extent tree, and have
2365 	 * rescue=ibadroots mount option.
2366 	 * Try to fill the tree using dummy block groups so that the user can
2367 	 * continue to mount and grab their data.
2368 	 */
2369 	if (ret && btrfs_test_opt(info, IGNOREBADROOTS))
2370 		ret = fill_dummy_bgs(info);
2371 	return ret;
2372 }
2373 
2374 /*
2375  * This function, insert_block_group_item(), belongs to the phase 2 of chunk
2376  * allocation.
2377  *
2378  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2379  * phases.
2380  */
insert_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group)2381 static int insert_block_group_item(struct btrfs_trans_handle *trans,
2382 				   struct btrfs_block_group *block_group)
2383 {
2384 	struct btrfs_fs_info *fs_info = trans->fs_info;
2385 	struct btrfs_block_group_item bgi;
2386 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2387 	struct btrfs_key key;
2388 
2389 	spin_lock(&block_group->lock);
2390 	btrfs_set_stack_block_group_used(&bgi, block_group->used);
2391 	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2392 						   block_group->global_root_id);
2393 	btrfs_set_stack_block_group_flags(&bgi, block_group->flags);
2394 	key.objectid = block_group->start;
2395 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2396 	key.offset = block_group->length;
2397 	spin_unlock(&block_group->lock);
2398 
2399 	return btrfs_insert_item(trans, root, &key, &bgi, sizeof(bgi));
2400 }
2401 
insert_dev_extent(struct btrfs_trans_handle * trans,struct btrfs_device * device,u64 chunk_offset,u64 start,u64 num_bytes)2402 static int insert_dev_extent(struct btrfs_trans_handle *trans,
2403 			    struct btrfs_device *device, u64 chunk_offset,
2404 			    u64 start, u64 num_bytes)
2405 {
2406 	struct btrfs_fs_info *fs_info = device->fs_info;
2407 	struct btrfs_root *root = fs_info->dev_root;
2408 	struct btrfs_path *path;
2409 	struct btrfs_dev_extent *extent;
2410 	struct extent_buffer *leaf;
2411 	struct btrfs_key key;
2412 	int ret;
2413 
2414 	WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
2415 	WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
2416 	path = btrfs_alloc_path();
2417 	if (!path)
2418 		return -ENOMEM;
2419 
2420 	key.objectid = device->devid;
2421 	key.type = BTRFS_DEV_EXTENT_KEY;
2422 	key.offset = start;
2423 	ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*extent));
2424 	if (ret)
2425 		goto out;
2426 
2427 	leaf = path->nodes[0];
2428 	extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
2429 	btrfs_set_dev_extent_chunk_tree(leaf, extent, BTRFS_CHUNK_TREE_OBJECTID);
2430 	btrfs_set_dev_extent_chunk_objectid(leaf, extent,
2431 					    BTRFS_FIRST_CHUNK_TREE_OBJECTID);
2432 	btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
2433 
2434 	btrfs_set_dev_extent_length(leaf, extent, num_bytes);
2435 	btrfs_mark_buffer_dirty(leaf);
2436 out:
2437 	btrfs_free_path(path);
2438 	return ret;
2439 }
2440 
2441 /*
2442  * This function belongs to phase 2.
2443  *
2444  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2445  * phases.
2446  */
insert_dev_extents(struct btrfs_trans_handle * trans,u64 chunk_offset,u64 chunk_size)2447 static int insert_dev_extents(struct btrfs_trans_handle *trans,
2448 				   u64 chunk_offset, u64 chunk_size)
2449 {
2450 	struct btrfs_fs_info *fs_info = trans->fs_info;
2451 	struct btrfs_device *device;
2452 	struct extent_map *em;
2453 	struct map_lookup *map;
2454 	u64 dev_offset;
2455 	u64 stripe_size;
2456 	int i;
2457 	int ret = 0;
2458 
2459 	em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
2460 	if (IS_ERR(em))
2461 		return PTR_ERR(em);
2462 
2463 	map = em->map_lookup;
2464 	stripe_size = em->orig_block_len;
2465 
2466 	/*
2467 	 * Take the device list mutex to prevent races with the final phase of
2468 	 * a device replace operation that replaces the device object associated
2469 	 * with the map's stripes, because the device object's id can change
2470 	 * at any time during that final phase of the device replace operation
2471 	 * (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
2472 	 * replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
2473 	 * resulting in persisting a device extent item with such ID.
2474 	 */
2475 	mutex_lock(&fs_info->fs_devices->device_list_mutex);
2476 	for (i = 0; i < map->num_stripes; i++) {
2477 		device = map->stripes[i].dev;
2478 		dev_offset = map->stripes[i].physical;
2479 
2480 		ret = insert_dev_extent(trans, device, chunk_offset, dev_offset,
2481 				       stripe_size);
2482 		if (ret)
2483 			break;
2484 	}
2485 	mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2486 
2487 	free_extent_map(em);
2488 	return ret;
2489 }
2490 
2491 /*
2492  * This function, btrfs_create_pending_block_groups(), belongs to the phase 2 of
2493  * chunk allocation.
2494  *
2495  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
2496  * phases.
2497  */
btrfs_create_pending_block_groups(struct btrfs_trans_handle * trans)2498 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
2499 {
2500 	struct btrfs_fs_info *fs_info = trans->fs_info;
2501 	struct btrfs_block_group *block_group;
2502 	int ret = 0;
2503 
2504 	while (!list_empty(&trans->new_bgs)) {
2505 		int index;
2506 
2507 		block_group = list_first_entry(&trans->new_bgs,
2508 					       struct btrfs_block_group,
2509 					       bg_list);
2510 		if (ret)
2511 			goto next;
2512 
2513 		index = btrfs_bg_flags_to_raid_index(block_group->flags);
2514 
2515 		ret = insert_block_group_item(trans, block_group);
2516 		if (ret)
2517 			btrfs_abort_transaction(trans, ret);
2518 		if (!test_bit(BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
2519 			      &block_group->runtime_flags)) {
2520 			mutex_lock(&fs_info->chunk_mutex);
2521 			ret = btrfs_chunk_alloc_add_chunk_item(trans, block_group);
2522 			mutex_unlock(&fs_info->chunk_mutex);
2523 			if (ret)
2524 				btrfs_abort_transaction(trans, ret);
2525 		}
2526 		ret = insert_dev_extents(trans, block_group->start,
2527 					 block_group->length);
2528 		if (ret)
2529 			btrfs_abort_transaction(trans, ret);
2530 		add_block_group_free_space(trans, block_group);
2531 
2532 		/*
2533 		 * If we restriped during balance, we may have added a new raid
2534 		 * type, so now add the sysfs entries when it is safe to do so.
2535 		 * We don't have to worry about locking here as it's handled in
2536 		 * btrfs_sysfs_add_block_group_type.
2537 		 */
2538 		if (block_group->space_info->block_group_kobjs[index] == NULL)
2539 			btrfs_sysfs_add_block_group_type(block_group);
2540 
2541 		/* Already aborted the transaction if it failed. */
2542 next:
2543 		btrfs_delayed_refs_rsv_release(fs_info, 1);
2544 		list_del_init(&block_group->bg_list);
2545 		clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
2546 	}
2547 	btrfs_trans_release_chunk_metadata(trans);
2548 }
2549 
2550 /*
2551  * For extent tree v2 we use the block_group_item->chunk_offset to point at our
2552  * global root id.  For v1 it's always set to BTRFS_FIRST_CHUNK_TREE_OBJECTID.
2553  */
calculate_global_root_id(struct btrfs_fs_info * fs_info,u64 offset)2554 static u64 calculate_global_root_id(struct btrfs_fs_info *fs_info, u64 offset)
2555 {
2556 	u64 div = SZ_1G;
2557 	u64 index;
2558 
2559 	if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2))
2560 		return BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2561 
2562 	/* If we have a smaller fs index based on 128MiB. */
2563 	if (btrfs_super_total_bytes(fs_info->super_copy) <= (SZ_1G * 10ULL))
2564 		div = SZ_128M;
2565 
2566 	offset = div64_u64(offset, div);
2567 	div64_u64_rem(offset, fs_info->nr_global_roots, &index);
2568 	return index;
2569 }
2570 
btrfs_make_block_group(struct btrfs_trans_handle * trans,u64 bytes_used,u64 type,u64 chunk_offset,u64 size)2571 struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *trans,
2572 						 u64 bytes_used, u64 type,
2573 						 u64 chunk_offset, u64 size)
2574 {
2575 	struct btrfs_fs_info *fs_info = trans->fs_info;
2576 	struct btrfs_block_group *cache;
2577 	int ret;
2578 
2579 	btrfs_set_log_full_commit(trans);
2580 
2581 	cache = btrfs_create_block_group_cache(fs_info, chunk_offset);
2582 	if (!cache)
2583 		return ERR_PTR(-ENOMEM);
2584 
2585 	/*
2586 	 * Mark it as new before adding it to the rbtree of block groups or any
2587 	 * list, so that no other task finds it and calls btrfs_mark_bg_unused()
2588 	 * before the new flag is set.
2589 	 */
2590 	set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
2591 
2592 	cache->length = size;
2593 	set_free_space_tree_thresholds(cache);
2594 	cache->used = bytes_used;
2595 	cache->flags = type;
2596 	cache->cached = BTRFS_CACHE_FINISHED;
2597 	cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
2598 
2599 	if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
2600 		set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
2601 
2602 	ret = btrfs_load_block_group_zone_info(cache, true);
2603 	if (ret) {
2604 		btrfs_put_block_group(cache);
2605 		return ERR_PTR(ret);
2606 	}
2607 
2608 	ret = exclude_super_stripes(cache);
2609 	if (ret) {
2610 		/* We may have excluded something, so call this just in case */
2611 		btrfs_free_excluded_extents(cache);
2612 		btrfs_put_block_group(cache);
2613 		return ERR_PTR(ret);
2614 	}
2615 
2616 	ret = add_new_free_space(cache, chunk_offset, chunk_offset + size, NULL);
2617 	btrfs_free_excluded_extents(cache);
2618 	if (ret) {
2619 		btrfs_put_block_group(cache);
2620 		return ERR_PTR(ret);
2621 	}
2622 
2623 	/*
2624 	 * Ensure the corresponding space_info object is created and
2625 	 * assigned to our block group. We want our bg to be added to the rbtree
2626 	 * with its ->space_info set.
2627 	 */
2628 	cache->space_info = btrfs_find_space_info(fs_info, cache->flags);
2629 	ASSERT(cache->space_info);
2630 
2631 	ret = btrfs_add_block_group_cache(fs_info, cache);
2632 	if (ret) {
2633 		btrfs_remove_free_space_cache(cache);
2634 		btrfs_put_block_group(cache);
2635 		return ERR_PTR(ret);
2636 	}
2637 
2638 	/*
2639 	 * Now that our block group has its ->space_info set and is inserted in
2640 	 * the rbtree, update the space info's counters.
2641 	 */
2642 	trace_btrfs_add_block_group(fs_info, cache, 1);
2643 	btrfs_add_bg_to_space_info(fs_info, cache);
2644 	btrfs_update_global_block_rsv(fs_info);
2645 
2646 #ifdef CONFIG_BTRFS_DEBUG
2647 	if (btrfs_should_fragment_free_space(cache)) {
2648 		u64 new_bytes_used = size - bytes_used;
2649 
2650 		cache->space_info->bytes_used += new_bytes_used >> 1;
2651 		fragment_free_space(cache);
2652 	}
2653 #endif
2654 
2655 	list_add_tail(&cache->bg_list, &trans->new_bgs);
2656 	trans->delayed_ref_updates++;
2657 	btrfs_update_delayed_refs_rsv(trans);
2658 
2659 	set_avail_alloc_bits(fs_info, type);
2660 	return cache;
2661 }
2662 
2663 /*
2664  * Mark one block group RO, can be called several times for the same block
2665  * group.
2666  *
2667  * @cache:		the destination block group
2668  * @do_chunk_alloc:	whether need to do chunk pre-allocation, this is to
2669  * 			ensure we still have some free space after marking this
2670  * 			block group RO.
2671  */
btrfs_inc_block_group_ro(struct btrfs_block_group * cache,bool do_chunk_alloc)2672 int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
2673 			     bool do_chunk_alloc)
2674 {
2675 	struct btrfs_fs_info *fs_info = cache->fs_info;
2676 	struct btrfs_trans_handle *trans;
2677 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2678 	u64 alloc_flags;
2679 	int ret;
2680 	bool dirty_bg_running;
2681 
2682 	/*
2683 	 * This can only happen when we are doing read-only scrub on read-only
2684 	 * mount.
2685 	 * In that case we should not start a new transaction on read-only fs.
2686 	 * Thus here we skip all chunk allocations.
2687 	 */
2688 	if (sb_rdonly(fs_info->sb)) {
2689 		mutex_lock(&fs_info->ro_block_group_mutex);
2690 		ret = inc_block_group_ro(cache, 0);
2691 		mutex_unlock(&fs_info->ro_block_group_mutex);
2692 		return ret;
2693 	}
2694 
2695 	do {
2696 		trans = btrfs_join_transaction(root);
2697 		if (IS_ERR(trans))
2698 			return PTR_ERR(trans);
2699 
2700 		dirty_bg_running = false;
2701 
2702 		/*
2703 		 * We're not allowed to set block groups readonly after the dirty
2704 		 * block group cache has started writing.  If it already started,
2705 		 * back off and let this transaction commit.
2706 		 */
2707 		mutex_lock(&fs_info->ro_block_group_mutex);
2708 		if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
2709 			u64 transid = trans->transid;
2710 
2711 			mutex_unlock(&fs_info->ro_block_group_mutex);
2712 			btrfs_end_transaction(trans);
2713 
2714 			ret = btrfs_wait_for_commit(fs_info, transid);
2715 			if (ret)
2716 				return ret;
2717 			dirty_bg_running = true;
2718 		}
2719 	} while (dirty_bg_running);
2720 
2721 	if (do_chunk_alloc) {
2722 		/*
2723 		 * If we are changing raid levels, try to allocate a
2724 		 * corresponding block group with the new raid level.
2725 		 */
2726 		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2727 		if (alloc_flags != cache->flags) {
2728 			ret = btrfs_chunk_alloc(trans, alloc_flags,
2729 						CHUNK_ALLOC_FORCE);
2730 			/*
2731 			 * ENOSPC is allowed here, we may have enough space
2732 			 * already allocated at the new raid level to carry on
2733 			 */
2734 			if (ret == -ENOSPC)
2735 				ret = 0;
2736 			if (ret < 0)
2737 				goto out;
2738 		}
2739 	}
2740 
2741 	ret = inc_block_group_ro(cache, 0);
2742 	if (!ret)
2743 		goto out;
2744 	if (ret == -ETXTBSY)
2745 		goto unlock_out;
2746 
2747 	/*
2748 	 * Skip chunk alloction if the bg is SYSTEM, this is to avoid system
2749 	 * chunk allocation storm to exhaust the system chunk array.  Otherwise
2750 	 * we still want to try our best to mark the block group read-only.
2751 	 */
2752 	if (!do_chunk_alloc && ret == -ENOSPC &&
2753 	    (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
2754 		goto unlock_out;
2755 
2756 	alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
2757 	ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
2758 	if (ret < 0)
2759 		goto out;
2760 	/*
2761 	 * We have allocated a new chunk. We also need to activate that chunk to
2762 	 * grant metadata tickets for zoned filesystem.
2763 	 */
2764 	ret = btrfs_zoned_activate_one_bg(fs_info, cache->space_info, true);
2765 	if (ret < 0)
2766 		goto out;
2767 
2768 	ret = inc_block_group_ro(cache, 0);
2769 	if (ret == -ETXTBSY)
2770 		goto unlock_out;
2771 out:
2772 	if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
2773 		alloc_flags = btrfs_get_alloc_profile(fs_info, cache->flags);
2774 		mutex_lock(&fs_info->chunk_mutex);
2775 		check_system_chunk(trans, alloc_flags);
2776 		mutex_unlock(&fs_info->chunk_mutex);
2777 	}
2778 unlock_out:
2779 	mutex_unlock(&fs_info->ro_block_group_mutex);
2780 
2781 	btrfs_end_transaction(trans);
2782 	return ret;
2783 }
2784 
btrfs_dec_block_group_ro(struct btrfs_block_group * cache)2785 void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
2786 {
2787 	struct btrfs_space_info *sinfo = cache->space_info;
2788 	u64 num_bytes;
2789 
2790 	BUG_ON(!cache->ro);
2791 
2792 	spin_lock(&sinfo->lock);
2793 	spin_lock(&cache->lock);
2794 	if (!--cache->ro) {
2795 		if (btrfs_is_zoned(cache->fs_info)) {
2796 			/* Migrate zone_unusable bytes back */
2797 			cache->zone_unusable =
2798 				(cache->alloc_offset - cache->used) +
2799 				(cache->length - cache->zone_capacity);
2800 			sinfo->bytes_zone_unusable += cache->zone_unusable;
2801 			sinfo->bytes_readonly -= cache->zone_unusable;
2802 		}
2803 		num_bytes = cache->length - cache->reserved -
2804 			    cache->pinned - cache->bytes_super -
2805 			    cache->zone_unusable - cache->used;
2806 		sinfo->bytes_readonly -= num_bytes;
2807 		list_del_init(&cache->ro_list);
2808 	}
2809 	spin_unlock(&cache->lock);
2810 	spin_unlock(&sinfo->lock);
2811 }
2812 
update_block_group_item(struct btrfs_trans_handle * trans,struct btrfs_path * path,struct btrfs_block_group * cache)2813 static int update_block_group_item(struct btrfs_trans_handle *trans,
2814 				   struct btrfs_path *path,
2815 				   struct btrfs_block_group *cache)
2816 {
2817 	struct btrfs_fs_info *fs_info = trans->fs_info;
2818 	int ret;
2819 	struct btrfs_root *root = btrfs_block_group_root(fs_info);
2820 	unsigned long bi;
2821 	struct extent_buffer *leaf;
2822 	struct btrfs_block_group_item bgi;
2823 	struct btrfs_key key;
2824 
2825 	key.objectid = cache->start;
2826 	key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
2827 	key.offset = cache->length;
2828 
2829 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2830 	if (ret) {
2831 		if (ret > 0)
2832 			ret = -ENOENT;
2833 		goto fail;
2834 	}
2835 
2836 	leaf = path->nodes[0];
2837 	bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2838 	btrfs_set_stack_block_group_used(&bgi, cache->used);
2839 	btrfs_set_stack_block_group_chunk_objectid(&bgi,
2840 						   cache->global_root_id);
2841 	btrfs_set_stack_block_group_flags(&bgi, cache->flags);
2842 	write_extent_buffer(leaf, &bgi, bi, sizeof(bgi));
2843 	btrfs_mark_buffer_dirty(leaf);
2844 fail:
2845 	btrfs_release_path(path);
2846 	return ret;
2847 
2848 }
2849 
cache_save_setup(struct btrfs_block_group * block_group,struct btrfs_trans_handle * trans,struct btrfs_path * path)2850 static int cache_save_setup(struct btrfs_block_group *block_group,
2851 			    struct btrfs_trans_handle *trans,
2852 			    struct btrfs_path *path)
2853 {
2854 	struct btrfs_fs_info *fs_info = block_group->fs_info;
2855 	struct btrfs_root *root = fs_info->tree_root;
2856 	struct inode *inode = NULL;
2857 	struct extent_changeset *data_reserved = NULL;
2858 	u64 alloc_hint = 0;
2859 	int dcs = BTRFS_DC_ERROR;
2860 	u64 cache_size = 0;
2861 	int retries = 0;
2862 	int ret = 0;
2863 
2864 	if (!btrfs_test_opt(fs_info, SPACE_CACHE))
2865 		return 0;
2866 
2867 	/*
2868 	 * If this block group is smaller than 100 megs don't bother caching the
2869 	 * block group.
2870 	 */
2871 	if (block_group->length < (100 * SZ_1M)) {
2872 		spin_lock(&block_group->lock);
2873 		block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2874 		spin_unlock(&block_group->lock);
2875 		return 0;
2876 	}
2877 
2878 	if (TRANS_ABORTED(trans))
2879 		return 0;
2880 again:
2881 	inode = lookup_free_space_inode(block_group, path);
2882 	if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2883 		ret = PTR_ERR(inode);
2884 		btrfs_release_path(path);
2885 		goto out;
2886 	}
2887 
2888 	if (IS_ERR(inode)) {
2889 		BUG_ON(retries);
2890 		retries++;
2891 
2892 		if (block_group->ro)
2893 			goto out_free;
2894 
2895 		ret = create_free_space_inode(trans, block_group, path);
2896 		if (ret)
2897 			goto out_free;
2898 		goto again;
2899 	}
2900 
2901 	/*
2902 	 * We want to set the generation to 0, that way if anything goes wrong
2903 	 * from here on out we know not to trust this cache when we load up next
2904 	 * time.
2905 	 */
2906 	BTRFS_I(inode)->generation = 0;
2907 	ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
2908 	if (ret) {
2909 		/*
2910 		 * So theoretically we could recover from this, simply set the
2911 		 * super cache generation to 0 so we know to invalidate the
2912 		 * cache, but then we'd have to keep track of the block groups
2913 		 * that fail this way so we know we _have_ to reset this cache
2914 		 * before the next commit or risk reading stale cache.  So to
2915 		 * limit our exposure to horrible edge cases lets just abort the
2916 		 * transaction, this only happens in really bad situations
2917 		 * anyway.
2918 		 */
2919 		btrfs_abort_transaction(trans, ret);
2920 		goto out_put;
2921 	}
2922 	WARN_ON(ret);
2923 
2924 	/* We've already setup this transaction, go ahead and exit */
2925 	if (block_group->cache_generation == trans->transid &&
2926 	    i_size_read(inode)) {
2927 		dcs = BTRFS_DC_SETUP;
2928 		goto out_put;
2929 	}
2930 
2931 	if (i_size_read(inode) > 0) {
2932 		ret = btrfs_check_trunc_cache_free_space(fs_info,
2933 					&fs_info->global_block_rsv);
2934 		if (ret)
2935 			goto out_put;
2936 
2937 		ret = btrfs_truncate_free_space_cache(trans, NULL, inode);
2938 		if (ret)
2939 			goto out_put;
2940 	}
2941 
2942 	spin_lock(&block_group->lock);
2943 	if (block_group->cached != BTRFS_CACHE_FINISHED ||
2944 	    !btrfs_test_opt(fs_info, SPACE_CACHE)) {
2945 		/*
2946 		 * don't bother trying to write stuff out _if_
2947 		 * a) we're not cached,
2948 		 * b) we're with nospace_cache mount option,
2949 		 * c) we're with v2 space_cache (FREE_SPACE_TREE).
2950 		 */
2951 		dcs = BTRFS_DC_WRITTEN;
2952 		spin_unlock(&block_group->lock);
2953 		goto out_put;
2954 	}
2955 	spin_unlock(&block_group->lock);
2956 
2957 	/*
2958 	 * We hit an ENOSPC when setting up the cache in this transaction, just
2959 	 * skip doing the setup, we've already cleared the cache so we're safe.
2960 	 */
2961 	if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
2962 		ret = -ENOSPC;
2963 		goto out_put;
2964 	}
2965 
2966 	/*
2967 	 * Try to preallocate enough space based on how big the block group is.
2968 	 * Keep in mind this has to include any pinned space which could end up
2969 	 * taking up quite a bit since it's not folded into the other space
2970 	 * cache.
2971 	 */
2972 	cache_size = div_u64(block_group->length, SZ_256M);
2973 	if (!cache_size)
2974 		cache_size = 1;
2975 
2976 	cache_size *= 16;
2977 	cache_size *= fs_info->sectorsize;
2978 
2979 	ret = btrfs_check_data_free_space(BTRFS_I(inode), &data_reserved, 0,
2980 					  cache_size, false);
2981 	if (ret)
2982 		goto out_put;
2983 
2984 	ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, cache_size,
2985 					      cache_size, cache_size,
2986 					      &alloc_hint);
2987 	/*
2988 	 * Our cache requires contiguous chunks so that we don't modify a bunch
2989 	 * of metadata or split extents when writing the cache out, which means
2990 	 * we can enospc if we are heavily fragmented in addition to just normal
2991 	 * out of space conditions.  So if we hit this just skip setting up any
2992 	 * other block groups for this transaction, maybe we'll unpin enough
2993 	 * space the next time around.
2994 	 */
2995 	if (!ret)
2996 		dcs = BTRFS_DC_SETUP;
2997 	else if (ret == -ENOSPC)
2998 		set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
2999 
3000 out_put:
3001 	iput(inode);
3002 out_free:
3003 	btrfs_release_path(path);
3004 out:
3005 	spin_lock(&block_group->lock);
3006 	if (!ret && dcs == BTRFS_DC_SETUP)
3007 		block_group->cache_generation = trans->transid;
3008 	block_group->disk_cache_state = dcs;
3009 	spin_unlock(&block_group->lock);
3010 
3011 	extent_changeset_free(data_reserved);
3012 	return ret;
3013 }
3014 
btrfs_setup_space_cache(struct btrfs_trans_handle * trans)3015 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans)
3016 {
3017 	struct btrfs_fs_info *fs_info = trans->fs_info;
3018 	struct btrfs_block_group *cache, *tmp;
3019 	struct btrfs_transaction *cur_trans = trans->transaction;
3020 	struct btrfs_path *path;
3021 
3022 	if (list_empty(&cur_trans->dirty_bgs) ||
3023 	    !btrfs_test_opt(fs_info, SPACE_CACHE))
3024 		return 0;
3025 
3026 	path = btrfs_alloc_path();
3027 	if (!path)
3028 		return -ENOMEM;
3029 
3030 	/* Could add new block groups, use _safe just in case */
3031 	list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3032 				 dirty_list) {
3033 		if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3034 			cache_save_setup(cache, trans, path);
3035 	}
3036 
3037 	btrfs_free_path(path);
3038 	return 0;
3039 }
3040 
3041 /*
3042  * Transaction commit does final block group cache writeback during a critical
3043  * section where nothing is allowed to change the FS.  This is required in
3044  * order for the cache to actually match the block group, but can introduce a
3045  * lot of latency into the commit.
3046  *
3047  * So, btrfs_start_dirty_block_groups is here to kick off block group cache IO.
3048  * There's a chance we'll have to redo some of it if the block group changes
3049  * again during the commit, but it greatly reduces the commit latency by
3050  * getting rid of the easy block groups while we're still allowing others to
3051  * join the commit.
3052  */
btrfs_start_dirty_block_groups(struct btrfs_trans_handle * trans)3053 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
3054 {
3055 	struct btrfs_fs_info *fs_info = trans->fs_info;
3056 	struct btrfs_block_group *cache;
3057 	struct btrfs_transaction *cur_trans = trans->transaction;
3058 	int ret = 0;
3059 	int should_put;
3060 	struct btrfs_path *path = NULL;
3061 	LIST_HEAD(dirty);
3062 	struct list_head *io = &cur_trans->io_bgs;
3063 	int loops = 0;
3064 
3065 	spin_lock(&cur_trans->dirty_bgs_lock);
3066 	if (list_empty(&cur_trans->dirty_bgs)) {
3067 		spin_unlock(&cur_trans->dirty_bgs_lock);
3068 		return 0;
3069 	}
3070 	list_splice_init(&cur_trans->dirty_bgs, &dirty);
3071 	spin_unlock(&cur_trans->dirty_bgs_lock);
3072 
3073 again:
3074 	/* Make sure all the block groups on our dirty list actually exist */
3075 	btrfs_create_pending_block_groups(trans);
3076 
3077 	if (!path) {
3078 		path = btrfs_alloc_path();
3079 		if (!path) {
3080 			ret = -ENOMEM;
3081 			goto out;
3082 		}
3083 	}
3084 
3085 	/*
3086 	 * cache_write_mutex is here only to save us from balance or automatic
3087 	 * removal of empty block groups deleting this block group while we are
3088 	 * writing out the cache
3089 	 */
3090 	mutex_lock(&trans->transaction->cache_write_mutex);
3091 	while (!list_empty(&dirty)) {
3092 		bool drop_reserve = true;
3093 
3094 		cache = list_first_entry(&dirty, struct btrfs_block_group,
3095 					 dirty_list);
3096 		/*
3097 		 * This can happen if something re-dirties a block group that
3098 		 * is already under IO.  Just wait for it to finish and then do
3099 		 * it all again
3100 		 */
3101 		if (!list_empty(&cache->io_list)) {
3102 			list_del_init(&cache->io_list);
3103 			btrfs_wait_cache_io(trans, cache, path);
3104 			btrfs_put_block_group(cache);
3105 		}
3106 
3107 
3108 		/*
3109 		 * btrfs_wait_cache_io uses the cache->dirty_list to decide if
3110 		 * it should update the cache_state.  Don't delete until after
3111 		 * we wait.
3112 		 *
3113 		 * Since we're not running in the commit critical section
3114 		 * we need the dirty_bgs_lock to protect from update_block_group
3115 		 */
3116 		spin_lock(&cur_trans->dirty_bgs_lock);
3117 		list_del_init(&cache->dirty_list);
3118 		spin_unlock(&cur_trans->dirty_bgs_lock);
3119 
3120 		should_put = 1;
3121 
3122 		cache_save_setup(cache, trans, path);
3123 
3124 		if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3125 			cache->io_ctl.inode = NULL;
3126 			ret = btrfs_write_out_cache(trans, cache, path);
3127 			if (ret == 0 && cache->io_ctl.inode) {
3128 				should_put = 0;
3129 
3130 				/*
3131 				 * The cache_write_mutex is protecting the
3132 				 * io_list, also refer to the definition of
3133 				 * btrfs_transaction::io_bgs for more details
3134 				 */
3135 				list_add_tail(&cache->io_list, io);
3136 			} else {
3137 				/*
3138 				 * If we failed to write the cache, the
3139 				 * generation will be bad and life goes on
3140 				 */
3141 				ret = 0;
3142 			}
3143 		}
3144 		if (!ret) {
3145 			ret = update_block_group_item(trans, path, cache);
3146 			/*
3147 			 * Our block group might still be attached to the list
3148 			 * of new block groups in the transaction handle of some
3149 			 * other task (struct btrfs_trans_handle->new_bgs). This
3150 			 * means its block group item isn't yet in the extent
3151 			 * tree. If this happens ignore the error, as we will
3152 			 * try again later in the critical section of the
3153 			 * transaction commit.
3154 			 */
3155 			if (ret == -ENOENT) {
3156 				ret = 0;
3157 				spin_lock(&cur_trans->dirty_bgs_lock);
3158 				if (list_empty(&cache->dirty_list)) {
3159 					list_add_tail(&cache->dirty_list,
3160 						      &cur_trans->dirty_bgs);
3161 					btrfs_get_block_group(cache);
3162 					drop_reserve = false;
3163 				}
3164 				spin_unlock(&cur_trans->dirty_bgs_lock);
3165 			} else if (ret) {
3166 				btrfs_abort_transaction(trans, ret);
3167 			}
3168 		}
3169 
3170 		/* If it's not on the io list, we need to put the block group */
3171 		if (should_put)
3172 			btrfs_put_block_group(cache);
3173 		if (drop_reserve)
3174 			btrfs_delayed_refs_rsv_release(fs_info, 1);
3175 		/*
3176 		 * Avoid blocking other tasks for too long. It might even save
3177 		 * us from writing caches for block groups that are going to be
3178 		 * removed.
3179 		 */
3180 		mutex_unlock(&trans->transaction->cache_write_mutex);
3181 		if (ret)
3182 			goto out;
3183 		mutex_lock(&trans->transaction->cache_write_mutex);
3184 	}
3185 	mutex_unlock(&trans->transaction->cache_write_mutex);
3186 
3187 	/*
3188 	 * Go through delayed refs for all the stuff we've just kicked off
3189 	 * and then loop back (just once)
3190 	 */
3191 	if (!ret)
3192 		ret = btrfs_run_delayed_refs(trans, 0);
3193 	if (!ret && loops == 0) {
3194 		loops++;
3195 		spin_lock(&cur_trans->dirty_bgs_lock);
3196 		list_splice_init(&cur_trans->dirty_bgs, &dirty);
3197 		/*
3198 		 * dirty_bgs_lock protects us from concurrent block group
3199 		 * deletes too (not just cache_write_mutex).
3200 		 */
3201 		if (!list_empty(&dirty)) {
3202 			spin_unlock(&cur_trans->dirty_bgs_lock);
3203 			goto again;
3204 		}
3205 		spin_unlock(&cur_trans->dirty_bgs_lock);
3206 	}
3207 out:
3208 	if (ret < 0) {
3209 		spin_lock(&cur_trans->dirty_bgs_lock);
3210 		list_splice_init(&dirty, &cur_trans->dirty_bgs);
3211 		spin_unlock(&cur_trans->dirty_bgs_lock);
3212 		btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
3213 	}
3214 
3215 	btrfs_free_path(path);
3216 	return ret;
3217 }
3218 
btrfs_write_dirty_block_groups(struct btrfs_trans_handle * trans)3219 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans)
3220 {
3221 	struct btrfs_fs_info *fs_info = trans->fs_info;
3222 	struct btrfs_block_group *cache;
3223 	struct btrfs_transaction *cur_trans = trans->transaction;
3224 	int ret = 0;
3225 	int should_put;
3226 	struct btrfs_path *path;
3227 	struct list_head *io = &cur_trans->io_bgs;
3228 
3229 	path = btrfs_alloc_path();
3230 	if (!path)
3231 		return -ENOMEM;
3232 
3233 	/*
3234 	 * Even though we are in the critical section of the transaction commit,
3235 	 * we can still have concurrent tasks adding elements to this
3236 	 * transaction's list of dirty block groups. These tasks correspond to
3237 	 * endio free space workers started when writeback finishes for a
3238 	 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3239 	 * allocate new block groups as a result of COWing nodes of the root
3240 	 * tree when updating the free space inode. The writeback for the space
3241 	 * caches is triggered by an earlier call to
3242 	 * btrfs_start_dirty_block_groups() and iterations of the following
3243 	 * loop.
3244 	 * Also we want to do the cache_save_setup first and then run the
3245 	 * delayed refs to make sure we have the best chance at doing this all
3246 	 * in one shot.
3247 	 */
3248 	spin_lock(&cur_trans->dirty_bgs_lock);
3249 	while (!list_empty(&cur_trans->dirty_bgs)) {
3250 		cache = list_first_entry(&cur_trans->dirty_bgs,
3251 					 struct btrfs_block_group,
3252 					 dirty_list);
3253 
3254 		/*
3255 		 * This can happen if cache_save_setup re-dirties a block group
3256 		 * that is already under IO.  Just wait for it to finish and
3257 		 * then do it all again
3258 		 */
3259 		if (!list_empty(&cache->io_list)) {
3260 			spin_unlock(&cur_trans->dirty_bgs_lock);
3261 			list_del_init(&cache->io_list);
3262 			btrfs_wait_cache_io(trans, cache, path);
3263 			btrfs_put_block_group(cache);
3264 			spin_lock(&cur_trans->dirty_bgs_lock);
3265 		}
3266 
3267 		/*
3268 		 * Don't remove from the dirty list until after we've waited on
3269 		 * any pending IO
3270 		 */
3271 		list_del_init(&cache->dirty_list);
3272 		spin_unlock(&cur_trans->dirty_bgs_lock);
3273 		should_put = 1;
3274 
3275 		cache_save_setup(cache, trans, path);
3276 
3277 		if (!ret)
3278 			ret = btrfs_run_delayed_refs(trans,
3279 						     (unsigned long) -1);
3280 
3281 		if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3282 			cache->io_ctl.inode = NULL;
3283 			ret = btrfs_write_out_cache(trans, cache, path);
3284 			if (ret == 0 && cache->io_ctl.inode) {
3285 				should_put = 0;
3286 				list_add_tail(&cache->io_list, io);
3287 			} else {
3288 				/*
3289 				 * If we failed to write the cache, the
3290 				 * generation will be bad and life goes on
3291 				 */
3292 				ret = 0;
3293 			}
3294 		}
3295 		if (!ret) {
3296 			ret = update_block_group_item(trans, path, cache);
3297 			/*
3298 			 * One of the free space endio workers might have
3299 			 * created a new block group while updating a free space
3300 			 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3301 			 * and hasn't released its transaction handle yet, in
3302 			 * which case the new block group is still attached to
3303 			 * its transaction handle and its creation has not
3304 			 * finished yet (no block group item in the extent tree
3305 			 * yet, etc). If this is the case, wait for all free
3306 			 * space endio workers to finish and retry. This is a
3307 			 * very rare case so no need for a more efficient and
3308 			 * complex approach.
3309 			 */
3310 			if (ret == -ENOENT) {
3311 				wait_event(cur_trans->writer_wait,
3312 				   atomic_read(&cur_trans->num_writers) == 1);
3313 				ret = update_block_group_item(trans, path, cache);
3314 			}
3315 			if (ret)
3316 				btrfs_abort_transaction(trans, ret);
3317 		}
3318 
3319 		/* If its not on the io list, we need to put the block group */
3320 		if (should_put)
3321 			btrfs_put_block_group(cache);
3322 		btrfs_delayed_refs_rsv_release(fs_info, 1);
3323 		spin_lock(&cur_trans->dirty_bgs_lock);
3324 	}
3325 	spin_unlock(&cur_trans->dirty_bgs_lock);
3326 
3327 	/*
3328 	 * Refer to the definition of io_bgs member for details why it's safe
3329 	 * to use it without any locking
3330 	 */
3331 	while (!list_empty(io)) {
3332 		cache = list_first_entry(io, struct btrfs_block_group,
3333 					 io_list);
3334 		list_del_init(&cache->io_list);
3335 		btrfs_wait_cache_io(trans, cache, path);
3336 		btrfs_put_block_group(cache);
3337 	}
3338 
3339 	btrfs_free_path(path);
3340 	return ret;
3341 }
3342 
should_reclaim_block_group(struct btrfs_block_group * bg,u64 bytes_freed)3343 static inline bool should_reclaim_block_group(struct btrfs_block_group *bg,
3344 					      u64 bytes_freed)
3345 {
3346 	const struct btrfs_space_info *space_info = bg->space_info;
3347 	const int reclaim_thresh = READ_ONCE(space_info->bg_reclaim_threshold);
3348 	const u64 new_val = bg->used;
3349 	const u64 old_val = new_val + bytes_freed;
3350 	u64 thresh;
3351 
3352 	if (reclaim_thresh == 0)
3353 		return false;
3354 
3355 	thresh = div_factor_fine(bg->length, reclaim_thresh);
3356 
3357 	/*
3358 	 * If we were below the threshold before don't reclaim, we are likely a
3359 	 * brand new block group and we don't want to relocate new block groups.
3360 	 */
3361 	if (old_val < thresh)
3362 		return false;
3363 	if (new_val >= thresh)
3364 		return false;
3365 	return true;
3366 }
3367 
btrfs_update_block_group(struct btrfs_trans_handle * trans,u64 bytenr,u64 num_bytes,bool alloc)3368 int btrfs_update_block_group(struct btrfs_trans_handle *trans,
3369 			     u64 bytenr, u64 num_bytes, bool alloc)
3370 {
3371 	struct btrfs_fs_info *info = trans->fs_info;
3372 	struct btrfs_block_group *cache = NULL;
3373 	u64 total = num_bytes;
3374 	u64 old_val;
3375 	u64 byte_in_group;
3376 	int factor;
3377 	int ret = 0;
3378 
3379 	/* Block accounting for super block */
3380 	spin_lock(&info->delalloc_root_lock);
3381 	old_val = btrfs_super_bytes_used(info->super_copy);
3382 	if (alloc)
3383 		old_val += num_bytes;
3384 	else
3385 		old_val -= num_bytes;
3386 	btrfs_set_super_bytes_used(info->super_copy, old_val);
3387 	spin_unlock(&info->delalloc_root_lock);
3388 
3389 	while (total) {
3390 		struct btrfs_space_info *space_info;
3391 		bool reclaim = false;
3392 
3393 		cache = btrfs_lookup_block_group(info, bytenr);
3394 		if (!cache) {
3395 			ret = -ENOENT;
3396 			break;
3397 		}
3398 		space_info = cache->space_info;
3399 		factor = btrfs_bg_type_to_factor(cache->flags);
3400 
3401 		/*
3402 		 * If this block group has free space cache written out, we
3403 		 * need to make sure to load it if we are removing space.  This
3404 		 * is because we need the unpinning stage to actually add the
3405 		 * space back to the block group, otherwise we will leak space.
3406 		 */
3407 		if (!alloc && !btrfs_block_group_done(cache))
3408 			btrfs_cache_block_group(cache, true);
3409 
3410 		byte_in_group = bytenr - cache->start;
3411 		WARN_ON(byte_in_group > cache->length);
3412 
3413 		spin_lock(&space_info->lock);
3414 		spin_lock(&cache->lock);
3415 
3416 		if (btrfs_test_opt(info, SPACE_CACHE) &&
3417 		    cache->disk_cache_state < BTRFS_DC_CLEAR)
3418 			cache->disk_cache_state = BTRFS_DC_CLEAR;
3419 
3420 		old_val = cache->used;
3421 		num_bytes = min(total, cache->length - byte_in_group);
3422 		if (alloc) {
3423 			old_val += num_bytes;
3424 			cache->used = old_val;
3425 			cache->reserved -= num_bytes;
3426 			space_info->bytes_reserved -= num_bytes;
3427 			space_info->bytes_used += num_bytes;
3428 			space_info->disk_used += num_bytes * factor;
3429 			spin_unlock(&cache->lock);
3430 			spin_unlock(&space_info->lock);
3431 		} else {
3432 			old_val -= num_bytes;
3433 			cache->used = old_val;
3434 			cache->pinned += num_bytes;
3435 			btrfs_space_info_update_bytes_pinned(info, space_info,
3436 							     num_bytes);
3437 			space_info->bytes_used -= num_bytes;
3438 			space_info->disk_used -= num_bytes * factor;
3439 
3440 			reclaim = should_reclaim_block_group(cache, num_bytes);
3441 			spin_unlock(&cache->lock);
3442 			spin_unlock(&space_info->lock);
3443 
3444 			set_extent_dirty(&trans->transaction->pinned_extents,
3445 					 bytenr, bytenr + num_bytes - 1,
3446 					 GFP_NOFS | __GFP_NOFAIL);
3447 		}
3448 
3449 		spin_lock(&trans->transaction->dirty_bgs_lock);
3450 		if (list_empty(&cache->dirty_list)) {
3451 			list_add_tail(&cache->dirty_list,
3452 				      &trans->transaction->dirty_bgs);
3453 			trans->delayed_ref_updates++;
3454 			btrfs_get_block_group(cache);
3455 		}
3456 		spin_unlock(&trans->transaction->dirty_bgs_lock);
3457 
3458 		/*
3459 		 * No longer have used bytes in this block group, queue it for
3460 		 * deletion. We do this after adding the block group to the
3461 		 * dirty list to avoid races between cleaner kthread and space
3462 		 * cache writeout.
3463 		 */
3464 		if (!alloc && old_val == 0) {
3465 			if (!btrfs_test_opt(info, DISCARD_ASYNC))
3466 				btrfs_mark_bg_unused(cache);
3467 		} else if (!alloc && reclaim) {
3468 			btrfs_mark_bg_to_reclaim(cache);
3469 		}
3470 
3471 		btrfs_put_block_group(cache);
3472 		total -= num_bytes;
3473 		bytenr += num_bytes;
3474 	}
3475 
3476 	/* Modified block groups are accounted for in the delayed_refs_rsv. */
3477 	btrfs_update_delayed_refs_rsv(trans);
3478 	return ret;
3479 }
3480 
3481 /**
3482  * btrfs_add_reserved_bytes - update the block_group and space info counters
3483  * @cache:	The cache we are manipulating
3484  * @ram_bytes:  The number of bytes of file content, and will be same to
3485  *              @num_bytes except for the compress path.
3486  * @num_bytes:	The number of bytes in question
3487  * @delalloc:   The blocks are allocated for the delalloc write
3488  *
3489  * This is called by the allocator when it reserves space. If this is a
3490  * reservation and the block group has become read only we cannot make the
3491  * reservation and return -EAGAIN, otherwise this function always succeeds.
3492  */
btrfs_add_reserved_bytes(struct btrfs_block_group * cache,u64 ram_bytes,u64 num_bytes,int delalloc)3493 int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
3494 			     u64 ram_bytes, u64 num_bytes, int delalloc)
3495 {
3496 	struct btrfs_space_info *space_info = cache->space_info;
3497 	int ret = 0;
3498 
3499 	spin_lock(&space_info->lock);
3500 	spin_lock(&cache->lock);
3501 	if (cache->ro) {
3502 		ret = -EAGAIN;
3503 	} else {
3504 		cache->reserved += num_bytes;
3505 		space_info->bytes_reserved += num_bytes;
3506 		trace_btrfs_space_reservation(cache->fs_info, "space_info",
3507 					      space_info->flags, num_bytes, 1);
3508 		btrfs_space_info_update_bytes_may_use(cache->fs_info,
3509 						      space_info, -ram_bytes);
3510 		if (delalloc)
3511 			cache->delalloc_bytes += num_bytes;
3512 
3513 		/*
3514 		 * Compression can use less space than we reserved, so wake
3515 		 * tickets if that happens
3516 		 */
3517 		if (num_bytes < ram_bytes)
3518 			btrfs_try_granting_tickets(cache->fs_info, space_info);
3519 	}
3520 	spin_unlock(&cache->lock);
3521 	spin_unlock(&space_info->lock);
3522 	return ret;
3523 }
3524 
3525 /**
3526  * btrfs_free_reserved_bytes - update the block_group and space info counters
3527  * @cache:      The cache we are manipulating
3528  * @num_bytes:  The number of bytes in question
3529  * @delalloc:   The blocks are allocated for the delalloc write
3530  *
3531  * This is called by somebody who is freeing space that was never actually used
3532  * on disk.  For example if you reserve some space for a new leaf in transaction
3533  * A and before transaction A commits you free that leaf, you call this with
3534  * reserve set to 0 in order to clear the reservation.
3535  */
btrfs_free_reserved_bytes(struct btrfs_block_group * cache,u64 num_bytes,int delalloc)3536 void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
3537 			       u64 num_bytes, int delalloc)
3538 {
3539 	struct btrfs_space_info *space_info = cache->space_info;
3540 
3541 	spin_lock(&space_info->lock);
3542 	spin_lock(&cache->lock);
3543 	if (cache->ro)
3544 		space_info->bytes_readonly += num_bytes;
3545 	cache->reserved -= num_bytes;
3546 	space_info->bytes_reserved -= num_bytes;
3547 	space_info->max_extent_size = 0;
3548 
3549 	if (delalloc)
3550 		cache->delalloc_bytes -= num_bytes;
3551 	spin_unlock(&cache->lock);
3552 
3553 	btrfs_try_granting_tickets(cache->fs_info, space_info);
3554 	spin_unlock(&space_info->lock);
3555 }
3556 
force_metadata_allocation(struct btrfs_fs_info * info)3557 static void force_metadata_allocation(struct btrfs_fs_info *info)
3558 {
3559 	struct list_head *head = &info->space_info;
3560 	struct btrfs_space_info *found;
3561 
3562 	list_for_each_entry(found, head, list) {
3563 		if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3564 			found->force_alloc = CHUNK_ALLOC_FORCE;
3565 	}
3566 }
3567 
should_alloc_chunk(struct btrfs_fs_info * fs_info,struct btrfs_space_info * sinfo,int force)3568 static int should_alloc_chunk(struct btrfs_fs_info *fs_info,
3569 			      struct btrfs_space_info *sinfo, int force)
3570 {
3571 	u64 bytes_used = btrfs_space_info_used(sinfo, false);
3572 	u64 thresh;
3573 
3574 	if (force == CHUNK_ALLOC_FORCE)
3575 		return 1;
3576 
3577 	/*
3578 	 * in limited mode, we want to have some free space up to
3579 	 * about 1% of the FS size.
3580 	 */
3581 	if (force == CHUNK_ALLOC_LIMITED) {
3582 		thresh = btrfs_super_total_bytes(fs_info->super_copy);
3583 		thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
3584 
3585 		if (sinfo->total_bytes - bytes_used < thresh)
3586 			return 1;
3587 	}
3588 
3589 	if (bytes_used + SZ_2M < div_factor(sinfo->total_bytes, 8))
3590 		return 0;
3591 	return 1;
3592 }
3593 
btrfs_force_chunk_alloc(struct btrfs_trans_handle * trans,u64 type)3594 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, u64 type)
3595 {
3596 	u64 alloc_flags = btrfs_get_alloc_profile(trans->fs_info, type);
3597 
3598 	return btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
3599 }
3600 
do_chunk_alloc(struct btrfs_trans_handle * trans,u64 flags)3601 static struct btrfs_block_group *do_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags)
3602 {
3603 	struct btrfs_block_group *bg;
3604 	int ret;
3605 
3606 	/*
3607 	 * Check if we have enough space in the system space info because we
3608 	 * will need to update device items in the chunk btree and insert a new
3609 	 * chunk item in the chunk btree as well. This will allocate a new
3610 	 * system block group if needed.
3611 	 */
3612 	check_system_chunk(trans, flags);
3613 
3614 	bg = btrfs_create_chunk(trans, flags);
3615 	if (IS_ERR(bg)) {
3616 		ret = PTR_ERR(bg);
3617 		goto out;
3618 	}
3619 
3620 	ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3621 	/*
3622 	 * Normally we are not expected to fail with -ENOSPC here, since we have
3623 	 * previously reserved space in the system space_info and allocated one
3624 	 * new system chunk if necessary. However there are three exceptions:
3625 	 *
3626 	 * 1) We may have enough free space in the system space_info but all the
3627 	 *    existing system block groups have a profile which can not be used
3628 	 *    for extent allocation.
3629 	 *
3630 	 *    This happens when mounting in degraded mode. For example we have a
3631 	 *    RAID1 filesystem with 2 devices, lose one device and mount the fs
3632 	 *    using the other device in degraded mode. If we then allocate a chunk,
3633 	 *    we may have enough free space in the existing system space_info, but
3634 	 *    none of the block groups can be used for extent allocation since they
3635 	 *    have a RAID1 profile, and because we are in degraded mode with a
3636 	 *    single device, we are forced to allocate a new system chunk with a
3637 	 *    SINGLE profile. Making check_system_chunk() iterate over all system
3638 	 *    block groups and check if they have a usable profile and enough space
3639 	 *    can be slow on very large filesystems, so we tolerate the -ENOSPC and
3640 	 *    try again after forcing allocation of a new system chunk. Like this
3641 	 *    we avoid paying the cost of that search in normal circumstances, when
3642 	 *    we were not mounted in degraded mode;
3643 	 *
3644 	 * 2) We had enough free space info the system space_info, and one suitable
3645 	 *    block group to allocate from when we called check_system_chunk()
3646 	 *    above. However right after we called it, the only system block group
3647 	 *    with enough free space got turned into RO mode by a running scrub,
3648 	 *    and in this case we have to allocate a new one and retry. We only
3649 	 *    need do this allocate and retry once, since we have a transaction
3650 	 *    handle and scrub uses the commit root to search for block groups;
3651 	 *
3652 	 * 3) We had one system block group with enough free space when we called
3653 	 *    check_system_chunk(), but after that, right before we tried to
3654 	 *    allocate the last extent buffer we needed, a discard operation came
3655 	 *    in and it temporarily removed the last free space entry from the
3656 	 *    block group (discard removes a free space entry, discards it, and
3657 	 *    then adds back the entry to the block group cache).
3658 	 */
3659 	if (ret == -ENOSPC) {
3660 		const u64 sys_flags = btrfs_system_alloc_profile(trans->fs_info);
3661 		struct btrfs_block_group *sys_bg;
3662 
3663 		sys_bg = btrfs_create_chunk(trans, sys_flags);
3664 		if (IS_ERR(sys_bg)) {
3665 			ret = PTR_ERR(sys_bg);
3666 			btrfs_abort_transaction(trans, ret);
3667 			goto out;
3668 		}
3669 
3670 		ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3671 		if (ret) {
3672 			btrfs_abort_transaction(trans, ret);
3673 			goto out;
3674 		}
3675 
3676 		ret = btrfs_chunk_alloc_add_chunk_item(trans, bg);
3677 		if (ret) {
3678 			btrfs_abort_transaction(trans, ret);
3679 			goto out;
3680 		}
3681 	} else if (ret) {
3682 		btrfs_abort_transaction(trans, ret);
3683 		goto out;
3684 	}
3685 out:
3686 	btrfs_trans_release_chunk_metadata(trans);
3687 
3688 	if (ret)
3689 		return ERR_PTR(ret);
3690 
3691 	btrfs_get_block_group(bg);
3692 	return bg;
3693 }
3694 
3695 /*
3696  * Chunk allocation is done in 2 phases:
3697  *
3698  * 1) Phase 1 - through btrfs_chunk_alloc() we allocate device extents for
3699  *    the chunk, the chunk mapping, create its block group and add the items
3700  *    that belong in the chunk btree to it - more specifically, we need to
3701  *    update device items in the chunk btree and add a new chunk item to it.
3702  *
3703  * 2) Phase 2 - through btrfs_create_pending_block_groups(), we add the block
3704  *    group item to the extent btree and the device extent items to the devices
3705  *    btree.
3706  *
3707  * This is done to prevent deadlocks. For example when COWing a node from the
3708  * extent btree we are holding a write lock on the node's parent and if we
3709  * trigger chunk allocation and attempted to insert the new block group item
3710  * in the extent btree right way, we could deadlock because the path for the
3711  * insertion can include that parent node. At first glance it seems impossible
3712  * to trigger chunk allocation after starting a transaction since tasks should
3713  * reserve enough transaction units (metadata space), however while that is true
3714  * most of the time, chunk allocation may still be triggered for several reasons:
3715  *
3716  * 1) When reserving metadata, we check if there is enough free space in the
3717  *    metadata space_info and therefore don't trigger allocation of a new chunk.
3718  *    However later when the task actually tries to COW an extent buffer from
3719  *    the extent btree or from the device btree for example, it is forced to
3720  *    allocate a new block group (chunk) because the only one that had enough
3721  *    free space was just turned to RO mode by a running scrub for example (or
3722  *    device replace, block group reclaim thread, etc), so we can not use it
3723  *    for allocating an extent and end up being forced to allocate a new one;
3724  *
3725  * 2) Because we only check that the metadata space_info has enough free bytes,
3726  *    we end up not allocating a new metadata chunk in that case. However if
3727  *    the filesystem was mounted in degraded mode, none of the existing block
3728  *    groups might be suitable for extent allocation due to their incompatible
3729  *    profile (for e.g. mounting a 2 devices filesystem, where all block groups
3730  *    use a RAID1 profile, in degraded mode using a single device). In this case
3731  *    when the task attempts to COW some extent buffer of the extent btree for
3732  *    example, it will trigger allocation of a new metadata block group with a
3733  *    suitable profile (SINGLE profile in the example of the degraded mount of
3734  *    the RAID1 filesystem);
3735  *
3736  * 3) The task has reserved enough transaction units / metadata space, but when
3737  *    it attempts to COW an extent buffer from the extent or device btree for
3738  *    example, it does not find any free extent in any metadata block group,
3739  *    therefore forced to try to allocate a new metadata block group.
3740  *    This is because some other task allocated all available extents in the
3741  *    meanwhile - this typically happens with tasks that don't reserve space
3742  *    properly, either intentionally or as a bug. One example where this is
3743  *    done intentionally is fsync, as it does not reserve any transaction units
3744  *    and ends up allocating a variable number of metadata extents for log
3745  *    tree extent buffers;
3746  *
3747  * 4) The task has reserved enough transaction units / metadata space, but right
3748  *    before it tries to allocate the last extent buffer it needs, a discard
3749  *    operation comes in and, temporarily, removes the last free space entry from
3750  *    the only metadata block group that had free space (discard starts by
3751  *    removing a free space entry from a block group, then does the discard
3752  *    operation and, once it's done, it adds back the free space entry to the
3753  *    block group).
3754  *
3755  * We also need this 2 phases setup when adding a device to a filesystem with
3756  * a seed device - we must create new metadata and system chunks without adding
3757  * any of the block group items to the chunk, extent and device btrees. If we
3758  * did not do it this way, we would get ENOSPC when attempting to update those
3759  * btrees, since all the chunks from the seed device are read-only.
3760  *
3761  * Phase 1 does the updates and insertions to the chunk btree because if we had
3762  * it done in phase 2 and have a thundering herd of tasks allocating chunks in
3763  * parallel, we risk having too many system chunks allocated by many tasks if
3764  * many tasks reach phase 1 without the previous ones completing phase 2. In the
3765  * extreme case this leads to exhaustion of the system chunk array in the
3766  * superblock. This is easier to trigger if using a btree node/leaf size of 64K
3767  * and with RAID filesystems (so we have more device items in the chunk btree).
3768  * This has happened before and commit eafa4fd0ad0607 ("btrfs: fix exhaustion of
3769  * the system chunk array due to concurrent allocations") provides more details.
3770  *
3771  * Allocation of system chunks does not happen through this function. A task that
3772  * needs to update the chunk btree (the only btree that uses system chunks), must
3773  * preallocate chunk space by calling either check_system_chunk() or
3774  * btrfs_reserve_chunk_metadata() - the former is used when allocating a data or
3775  * metadata chunk or when removing a chunk, while the later is used before doing
3776  * a modification to the chunk btree - use cases for the later are adding,
3777  * removing and resizing a device as well as relocation of a system chunk.
3778  * See the comment below for more details.
3779  *
3780  * The reservation of system space, done through check_system_chunk(), as well
3781  * as all the updates and insertions into the chunk btree must be done while
3782  * holding fs_info->chunk_mutex. This is important to guarantee that while COWing
3783  * an extent buffer from the chunks btree we never trigger allocation of a new
3784  * system chunk, which would result in a deadlock (trying to lock twice an
3785  * extent buffer of the chunk btree, first time before triggering the chunk
3786  * allocation and the second time during chunk allocation while attempting to
3787  * update the chunks btree). The system chunk array is also updated while holding
3788  * that mutex. The same logic applies to removing chunks - we must reserve system
3789  * space, update the chunk btree and the system chunk array in the superblock
3790  * while holding fs_info->chunk_mutex.
3791  *
3792  * This function, btrfs_chunk_alloc(), belongs to phase 1.
3793  *
3794  * If @force is CHUNK_ALLOC_FORCE:
3795  *    - return 1 if it successfully allocates a chunk,
3796  *    - return errors including -ENOSPC otherwise.
3797  * If @force is NOT CHUNK_ALLOC_FORCE:
3798  *    - return 0 if it doesn't need to allocate a new chunk,
3799  *    - return 1 if it successfully allocates a chunk,
3800  *    - return errors including -ENOSPC otherwise.
3801  */
btrfs_chunk_alloc(struct btrfs_trans_handle * trans,u64 flags,enum btrfs_chunk_alloc_enum force)3802 int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
3803 		      enum btrfs_chunk_alloc_enum force)
3804 {
3805 	struct btrfs_fs_info *fs_info = trans->fs_info;
3806 	struct btrfs_space_info *space_info;
3807 	struct btrfs_block_group *ret_bg;
3808 	bool wait_for_alloc = false;
3809 	bool should_alloc = false;
3810 	bool from_extent_allocation = false;
3811 	int ret = 0;
3812 
3813 	if (force == CHUNK_ALLOC_FORCE_FOR_EXTENT) {
3814 		from_extent_allocation = true;
3815 		force = CHUNK_ALLOC_FORCE;
3816 	}
3817 
3818 	/* Don't re-enter if we're already allocating a chunk */
3819 	if (trans->allocating_chunk)
3820 		return -ENOSPC;
3821 	/*
3822 	 * Allocation of system chunks can not happen through this path, as we
3823 	 * could end up in a deadlock if we are allocating a data or metadata
3824 	 * chunk and there is another task modifying the chunk btree.
3825 	 *
3826 	 * This is because while we are holding the chunk mutex, we will attempt
3827 	 * to add the new chunk item to the chunk btree or update an existing
3828 	 * device item in the chunk btree, while the other task that is modifying
3829 	 * the chunk btree is attempting to COW an extent buffer while holding a
3830 	 * lock on it and on its parent - if the COW operation triggers a system
3831 	 * chunk allocation, then we can deadlock because we are holding the
3832 	 * chunk mutex and we may need to access that extent buffer or its parent
3833 	 * in order to add the chunk item or update a device item.
3834 	 *
3835 	 * Tasks that want to modify the chunk tree should reserve system space
3836 	 * before updating the chunk btree, by calling either
3837 	 * btrfs_reserve_chunk_metadata() or check_system_chunk().
3838 	 * It's possible that after a task reserves the space, it still ends up
3839 	 * here - this happens in the cases described above at do_chunk_alloc().
3840 	 * The task will have to either retry or fail.
3841 	 */
3842 	if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3843 		return -ENOSPC;
3844 
3845 	space_info = btrfs_find_space_info(fs_info, flags);
3846 	ASSERT(space_info);
3847 
3848 	do {
3849 		spin_lock(&space_info->lock);
3850 		if (force < space_info->force_alloc)
3851 			force = space_info->force_alloc;
3852 		should_alloc = should_alloc_chunk(fs_info, space_info, force);
3853 		if (space_info->full) {
3854 			/* No more free physical space */
3855 			if (should_alloc)
3856 				ret = -ENOSPC;
3857 			else
3858 				ret = 0;
3859 			spin_unlock(&space_info->lock);
3860 			return ret;
3861 		} else if (!should_alloc) {
3862 			spin_unlock(&space_info->lock);
3863 			return 0;
3864 		} else if (space_info->chunk_alloc) {
3865 			/*
3866 			 * Someone is already allocating, so we need to block
3867 			 * until this someone is finished and then loop to
3868 			 * recheck if we should continue with our allocation
3869 			 * attempt.
3870 			 */
3871 			wait_for_alloc = true;
3872 			force = CHUNK_ALLOC_NO_FORCE;
3873 			spin_unlock(&space_info->lock);
3874 			mutex_lock(&fs_info->chunk_mutex);
3875 			mutex_unlock(&fs_info->chunk_mutex);
3876 		} else {
3877 			/* Proceed with allocation */
3878 			space_info->chunk_alloc = 1;
3879 			wait_for_alloc = false;
3880 			spin_unlock(&space_info->lock);
3881 		}
3882 
3883 		cond_resched();
3884 	} while (wait_for_alloc);
3885 
3886 	mutex_lock(&fs_info->chunk_mutex);
3887 	trans->allocating_chunk = true;
3888 
3889 	/*
3890 	 * If we have mixed data/metadata chunks we want to make sure we keep
3891 	 * allocating mixed chunks instead of individual chunks.
3892 	 */
3893 	if (btrfs_mixed_space_info(space_info))
3894 		flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3895 
3896 	/*
3897 	 * if we're doing a data chunk, go ahead and make sure that
3898 	 * we keep a reasonable number of metadata chunks allocated in the
3899 	 * FS as well.
3900 	 */
3901 	if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3902 		fs_info->data_chunk_allocations++;
3903 		if (!(fs_info->data_chunk_allocations %
3904 		      fs_info->metadata_ratio))
3905 			force_metadata_allocation(fs_info);
3906 	}
3907 
3908 	ret_bg = do_chunk_alloc(trans, flags);
3909 	trans->allocating_chunk = false;
3910 
3911 	if (IS_ERR(ret_bg)) {
3912 		ret = PTR_ERR(ret_bg);
3913 	} else if (from_extent_allocation) {
3914 		/*
3915 		 * New block group is likely to be used soon. Try to activate
3916 		 * it now. Failure is OK for now.
3917 		 */
3918 		btrfs_zone_activate(ret_bg);
3919 	}
3920 
3921 	if (!ret)
3922 		btrfs_put_block_group(ret_bg);
3923 
3924 	spin_lock(&space_info->lock);
3925 	if (ret < 0) {
3926 		if (ret == -ENOSPC)
3927 			space_info->full = 1;
3928 		else
3929 			goto out;
3930 	} else {
3931 		ret = 1;
3932 		space_info->max_extent_size = 0;
3933 	}
3934 
3935 	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
3936 out:
3937 	space_info->chunk_alloc = 0;
3938 	spin_unlock(&space_info->lock);
3939 	mutex_unlock(&fs_info->chunk_mutex);
3940 
3941 	return ret;
3942 }
3943 
get_profile_num_devs(struct btrfs_fs_info * fs_info,u64 type)3944 static u64 get_profile_num_devs(struct btrfs_fs_info *fs_info, u64 type)
3945 {
3946 	u64 num_dev;
3947 
3948 	num_dev = btrfs_raid_array[btrfs_bg_flags_to_raid_index(type)].devs_max;
3949 	if (!num_dev)
3950 		num_dev = fs_info->fs_devices->rw_devices;
3951 
3952 	return num_dev;
3953 }
3954 
reserve_chunk_space(struct btrfs_trans_handle * trans,u64 bytes,u64 type)3955 static void reserve_chunk_space(struct btrfs_trans_handle *trans,
3956 				u64 bytes,
3957 				u64 type)
3958 {
3959 	struct btrfs_fs_info *fs_info = trans->fs_info;
3960 	struct btrfs_space_info *info;
3961 	u64 left;
3962 	int ret = 0;
3963 
3964 	/*
3965 	 * Needed because we can end up allocating a system chunk and for an
3966 	 * atomic and race free space reservation in the chunk block reserve.
3967 	 */
3968 	lockdep_assert_held(&fs_info->chunk_mutex);
3969 
3970 	info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3971 	spin_lock(&info->lock);
3972 	left = info->total_bytes - btrfs_space_info_used(info, true);
3973 	spin_unlock(&info->lock);
3974 
3975 	if (left < bytes && btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
3976 		btrfs_info(fs_info, "left=%llu, need=%llu, flags=%llu",
3977 			   left, bytes, type);
3978 		btrfs_dump_space_info(fs_info, info, 0, 0);
3979 	}
3980 
3981 	if (left < bytes) {
3982 		u64 flags = btrfs_system_alloc_profile(fs_info);
3983 		struct btrfs_block_group *bg;
3984 
3985 		/*
3986 		 * Ignore failure to create system chunk. We might end up not
3987 		 * needing it, as we might not need to COW all nodes/leafs from
3988 		 * the paths we visit in the chunk tree (they were already COWed
3989 		 * or created in the current transaction for example).
3990 		 */
3991 		bg = btrfs_create_chunk(trans, flags);
3992 		if (IS_ERR(bg)) {
3993 			ret = PTR_ERR(bg);
3994 		} else {
3995 			/*
3996 			 * We have a new chunk. We also need to activate it for
3997 			 * zoned filesystem.
3998 			 */
3999 			ret = btrfs_zoned_activate_one_bg(fs_info, info, true);
4000 			if (ret < 0)
4001 				return;
4002 
4003 			/*
4004 			 * If we fail to add the chunk item here, we end up
4005 			 * trying again at phase 2 of chunk allocation, at
4006 			 * btrfs_create_pending_block_groups(). So ignore
4007 			 * any error here. An ENOSPC here could happen, due to
4008 			 * the cases described at do_chunk_alloc() - the system
4009 			 * block group we just created was just turned into RO
4010 			 * mode by a scrub for example, or a running discard
4011 			 * temporarily removed its free space entries, etc.
4012 			 */
4013 			btrfs_chunk_alloc_add_chunk_item(trans, bg);
4014 		}
4015 	}
4016 
4017 	if (!ret) {
4018 		ret = btrfs_block_rsv_add(fs_info,
4019 					  &fs_info->chunk_block_rsv,
4020 					  bytes, BTRFS_RESERVE_NO_FLUSH);
4021 		if (!ret)
4022 			trans->chunk_bytes_reserved += bytes;
4023 	}
4024 }
4025 
4026 /*
4027  * Reserve space in the system space for allocating or removing a chunk.
4028  * The caller must be holding fs_info->chunk_mutex.
4029  */
check_system_chunk(struct btrfs_trans_handle * trans,u64 type)4030 void check_system_chunk(struct btrfs_trans_handle *trans, u64 type)
4031 {
4032 	struct btrfs_fs_info *fs_info = trans->fs_info;
4033 	const u64 num_devs = get_profile_num_devs(fs_info, type);
4034 	u64 bytes;
4035 
4036 	/* num_devs device items to update and 1 chunk item to add or remove. */
4037 	bytes = btrfs_calc_metadata_size(fs_info, num_devs) +
4038 		btrfs_calc_insert_metadata_size(fs_info, 1);
4039 
4040 	reserve_chunk_space(trans, bytes, type);
4041 }
4042 
4043 /*
4044  * Reserve space in the system space, if needed, for doing a modification to the
4045  * chunk btree.
4046  *
4047  * @trans:		A transaction handle.
4048  * @is_item_insertion:	Indicate if the modification is for inserting a new item
4049  *			in the chunk btree or if it's for the deletion or update
4050  *			of an existing item.
4051  *
4052  * This is used in a context where we need to update the chunk btree outside
4053  * block group allocation and removal, to avoid a deadlock with a concurrent
4054  * task that is allocating a metadata or data block group and therefore needs to
4055  * update the chunk btree while holding the chunk mutex. After the update to the
4056  * chunk btree is done, btrfs_trans_release_chunk_metadata() should be called.
4057  *
4058  */
btrfs_reserve_chunk_metadata(struct btrfs_trans_handle * trans,bool is_item_insertion)4059 void btrfs_reserve_chunk_metadata(struct btrfs_trans_handle *trans,
4060 				  bool is_item_insertion)
4061 {
4062 	struct btrfs_fs_info *fs_info = trans->fs_info;
4063 	u64 bytes;
4064 
4065 	if (is_item_insertion)
4066 		bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
4067 	else
4068 		bytes = btrfs_calc_metadata_size(fs_info, 1);
4069 
4070 	mutex_lock(&fs_info->chunk_mutex);
4071 	reserve_chunk_space(trans, bytes, BTRFS_BLOCK_GROUP_SYSTEM);
4072 	mutex_unlock(&fs_info->chunk_mutex);
4073 }
4074 
btrfs_put_block_group_cache(struct btrfs_fs_info * info)4075 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
4076 {
4077 	struct btrfs_block_group *block_group;
4078 
4079 	block_group = btrfs_lookup_first_block_group(info, 0);
4080 	while (block_group) {
4081 		btrfs_wait_block_group_cache_done(block_group);
4082 		spin_lock(&block_group->lock);
4083 		if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF,
4084 				       &block_group->runtime_flags)) {
4085 			struct inode *inode = block_group->inode;
4086 
4087 			block_group->inode = NULL;
4088 			spin_unlock(&block_group->lock);
4089 
4090 			ASSERT(block_group->io_ctl.inode == NULL);
4091 			iput(inode);
4092 		} else {
4093 			spin_unlock(&block_group->lock);
4094 		}
4095 		block_group = btrfs_next_block_group(block_group);
4096 	}
4097 }
4098 
4099 /*
4100  * Must be called only after stopping all workers, since we could have block
4101  * group caching kthreads running, and therefore they could race with us if we
4102  * freed the block groups before stopping them.
4103  */
btrfs_free_block_groups(struct btrfs_fs_info * info)4104 int btrfs_free_block_groups(struct btrfs_fs_info *info)
4105 {
4106 	struct btrfs_block_group *block_group;
4107 	struct btrfs_space_info *space_info;
4108 	struct btrfs_caching_control *caching_ctl;
4109 	struct rb_node *n;
4110 
4111 	write_lock(&info->block_group_cache_lock);
4112 	while (!list_empty(&info->caching_block_groups)) {
4113 		caching_ctl = list_entry(info->caching_block_groups.next,
4114 					 struct btrfs_caching_control, list);
4115 		list_del(&caching_ctl->list);
4116 		btrfs_put_caching_control(caching_ctl);
4117 	}
4118 	write_unlock(&info->block_group_cache_lock);
4119 
4120 	spin_lock(&info->unused_bgs_lock);
4121 	while (!list_empty(&info->unused_bgs)) {
4122 		block_group = list_first_entry(&info->unused_bgs,
4123 					       struct btrfs_block_group,
4124 					       bg_list);
4125 		list_del_init(&block_group->bg_list);
4126 		btrfs_put_block_group(block_group);
4127 	}
4128 
4129 	while (!list_empty(&info->reclaim_bgs)) {
4130 		block_group = list_first_entry(&info->reclaim_bgs,
4131 					       struct btrfs_block_group,
4132 					       bg_list);
4133 		list_del_init(&block_group->bg_list);
4134 		btrfs_put_block_group(block_group);
4135 	}
4136 	spin_unlock(&info->unused_bgs_lock);
4137 
4138 	spin_lock(&info->zone_active_bgs_lock);
4139 	while (!list_empty(&info->zone_active_bgs)) {
4140 		block_group = list_first_entry(&info->zone_active_bgs,
4141 					       struct btrfs_block_group,
4142 					       active_bg_list);
4143 		list_del_init(&block_group->active_bg_list);
4144 		btrfs_put_block_group(block_group);
4145 	}
4146 	spin_unlock(&info->zone_active_bgs_lock);
4147 
4148 	write_lock(&info->block_group_cache_lock);
4149 	while ((n = rb_last(&info->block_group_cache_tree.rb_root)) != NULL) {
4150 		block_group = rb_entry(n, struct btrfs_block_group,
4151 				       cache_node);
4152 		rb_erase_cached(&block_group->cache_node,
4153 				&info->block_group_cache_tree);
4154 		RB_CLEAR_NODE(&block_group->cache_node);
4155 		write_unlock(&info->block_group_cache_lock);
4156 
4157 		down_write(&block_group->space_info->groups_sem);
4158 		list_del(&block_group->list);
4159 		up_write(&block_group->space_info->groups_sem);
4160 
4161 		/*
4162 		 * We haven't cached this block group, which means we could
4163 		 * possibly have excluded extents on this block group.
4164 		 */
4165 		if (block_group->cached == BTRFS_CACHE_NO ||
4166 		    block_group->cached == BTRFS_CACHE_ERROR)
4167 			btrfs_free_excluded_extents(block_group);
4168 
4169 		btrfs_remove_free_space_cache(block_group);
4170 		ASSERT(block_group->cached != BTRFS_CACHE_STARTED);
4171 		ASSERT(list_empty(&block_group->dirty_list));
4172 		ASSERT(list_empty(&block_group->io_list));
4173 		ASSERT(list_empty(&block_group->bg_list));
4174 		ASSERT(refcount_read(&block_group->refs) == 1);
4175 		ASSERT(block_group->swap_extents == 0);
4176 		btrfs_put_block_group(block_group);
4177 
4178 		write_lock(&info->block_group_cache_lock);
4179 	}
4180 	write_unlock(&info->block_group_cache_lock);
4181 
4182 	btrfs_release_global_block_rsv(info);
4183 
4184 	while (!list_empty(&info->space_info)) {
4185 		space_info = list_entry(info->space_info.next,
4186 					struct btrfs_space_info,
4187 					list);
4188 
4189 		/*
4190 		 * Do not hide this behind enospc_debug, this is actually
4191 		 * important and indicates a real bug if this happens.
4192 		 */
4193 		if (WARN_ON(space_info->bytes_pinned > 0 ||
4194 			    space_info->bytes_may_use > 0))
4195 			btrfs_dump_space_info(info, space_info, 0, 0);
4196 
4197 		/*
4198 		 * If there was a failure to cleanup a log tree, very likely due
4199 		 * to an IO failure on a writeback attempt of one or more of its
4200 		 * extent buffers, we could not do proper (and cheap) unaccounting
4201 		 * of their reserved space, so don't warn on bytes_reserved > 0 in
4202 		 * that case.
4203 		 */
4204 		if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
4205 		    !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
4206 			if (WARN_ON(space_info->bytes_reserved > 0))
4207 				btrfs_dump_space_info(info, space_info, 0, 0);
4208 		}
4209 
4210 		WARN_ON(space_info->reclaim_size > 0);
4211 		list_del(&space_info->list);
4212 		btrfs_sysfs_remove_space_info(space_info);
4213 	}
4214 	return 0;
4215 }
4216 
btrfs_freeze_block_group(struct btrfs_block_group * cache)4217 void btrfs_freeze_block_group(struct btrfs_block_group *cache)
4218 {
4219 	atomic_inc(&cache->frozen);
4220 }
4221 
btrfs_unfreeze_block_group(struct btrfs_block_group * block_group)4222 void btrfs_unfreeze_block_group(struct btrfs_block_group *block_group)
4223 {
4224 	struct btrfs_fs_info *fs_info = block_group->fs_info;
4225 	struct extent_map_tree *em_tree;
4226 	struct extent_map *em;
4227 	bool cleanup;
4228 
4229 	spin_lock(&block_group->lock);
4230 	cleanup = (atomic_dec_and_test(&block_group->frozen) &&
4231 		   test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags));
4232 	spin_unlock(&block_group->lock);
4233 
4234 	if (cleanup) {
4235 		em_tree = &fs_info->mapping_tree;
4236 		write_lock(&em_tree->lock);
4237 		em = lookup_extent_mapping(em_tree, block_group->start,
4238 					   1);
4239 		BUG_ON(!em); /* logic error, can't happen */
4240 		remove_extent_mapping(em_tree, em);
4241 		write_unlock(&em_tree->lock);
4242 
4243 		/* once for us and once for the tree */
4244 		free_extent_map(em);
4245 		free_extent_map(em);
4246 
4247 		/*
4248 		 * We may have left one free space entry and other possible
4249 		 * tasks trimming this block group have left 1 entry each one.
4250 		 * Free them if any.
4251 		 */
4252 		btrfs_remove_free_space_cache(block_group);
4253 	}
4254 }
4255 
btrfs_inc_block_group_swap_extents(struct btrfs_block_group * bg)4256 bool btrfs_inc_block_group_swap_extents(struct btrfs_block_group *bg)
4257 {
4258 	bool ret = true;
4259 
4260 	spin_lock(&bg->lock);
4261 	if (bg->ro)
4262 		ret = false;
4263 	else
4264 		bg->swap_extents++;
4265 	spin_unlock(&bg->lock);
4266 
4267 	return ret;
4268 }
4269 
btrfs_dec_block_group_swap_extents(struct btrfs_block_group * bg,int amount)4270 void btrfs_dec_block_group_swap_extents(struct btrfs_block_group *bg, int amount)
4271 {
4272 	spin_lock(&bg->lock);
4273 	ASSERT(!bg->ro);
4274 	ASSERT(bg->swap_extents >= amount);
4275 	bg->swap_extents -= amount;
4276 	spin_unlock(&bg->lock);
4277 }
4278