• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 Red Hat.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
24 #include "ctree.h"
25 #include "free-space-cache.h"
26 #include "transaction.h"
27 #include "disk-io.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
30 #include "volumes.h"
31 
32 #define BITS_PER_BITMAP		(PAGE_CACHE_SIZE * 8)
33 #define MAX_CACHE_BYTES_PER_GIG	(32 * 1024)
34 
35 struct btrfs_trim_range {
36 	u64 start;
37 	u64 bytes;
38 	struct list_head list;
39 };
40 
41 static int link_free_space(struct btrfs_free_space_ctl *ctl,
42 			   struct btrfs_free_space *info);
43 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
44 			      struct btrfs_free_space *info);
45 
__lookup_free_space_inode(struct btrfs_root * root,struct btrfs_path * path,u64 offset)46 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
47 					       struct btrfs_path *path,
48 					       u64 offset)
49 {
50 	struct btrfs_key key;
51 	struct btrfs_key location;
52 	struct btrfs_disk_key disk_key;
53 	struct btrfs_free_space_header *header;
54 	struct extent_buffer *leaf;
55 	struct inode *inode = NULL;
56 	int ret;
57 
58 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
59 	key.offset = offset;
60 	key.type = 0;
61 
62 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
63 	if (ret < 0)
64 		return ERR_PTR(ret);
65 	if (ret > 0) {
66 		btrfs_release_path(path);
67 		return ERR_PTR(-ENOENT);
68 	}
69 
70 	leaf = path->nodes[0];
71 	header = btrfs_item_ptr(leaf, path->slots[0],
72 				struct btrfs_free_space_header);
73 	btrfs_free_space_key(leaf, header, &disk_key);
74 	btrfs_disk_key_to_cpu(&location, &disk_key);
75 	btrfs_release_path(path);
76 
77 	inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
78 	if (!inode)
79 		return ERR_PTR(-ENOENT);
80 	if (IS_ERR(inode))
81 		return inode;
82 	if (is_bad_inode(inode)) {
83 		iput(inode);
84 		return ERR_PTR(-ENOENT);
85 	}
86 
87 	mapping_set_gfp_mask(inode->i_mapping,
88 			mapping_gfp_constraint(inode->i_mapping,
89 			~(__GFP_FS | __GFP_HIGHMEM)));
90 
91 	return inode;
92 }
93 
lookup_free_space_inode(struct btrfs_root * root,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)94 struct inode *lookup_free_space_inode(struct btrfs_root *root,
95 				      struct btrfs_block_group_cache
96 				      *block_group, struct btrfs_path *path)
97 {
98 	struct inode *inode = NULL;
99 	u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
100 
101 	spin_lock(&block_group->lock);
102 	if (block_group->inode)
103 		inode = igrab(block_group->inode);
104 	spin_unlock(&block_group->lock);
105 	if (inode)
106 		return inode;
107 
108 	inode = __lookup_free_space_inode(root, path,
109 					  block_group->key.objectid);
110 	if (IS_ERR(inode))
111 		return inode;
112 
113 	spin_lock(&block_group->lock);
114 	if (!((BTRFS_I(inode)->flags & flags) == flags)) {
115 		btrfs_info(root->fs_info,
116 			"Old style space inode found, converting.");
117 		BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
118 			BTRFS_INODE_NODATACOW;
119 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
120 	}
121 
122 	if (!block_group->iref) {
123 		block_group->inode = igrab(inode);
124 		block_group->iref = 1;
125 	}
126 	spin_unlock(&block_group->lock);
127 
128 	return inode;
129 }
130 
__create_free_space_inode(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 ino,u64 offset)131 static int __create_free_space_inode(struct btrfs_root *root,
132 				     struct btrfs_trans_handle *trans,
133 				     struct btrfs_path *path,
134 				     u64 ino, u64 offset)
135 {
136 	struct btrfs_key key;
137 	struct btrfs_disk_key disk_key;
138 	struct btrfs_free_space_header *header;
139 	struct btrfs_inode_item *inode_item;
140 	struct extent_buffer *leaf;
141 	u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC;
142 	int ret;
143 
144 	ret = btrfs_insert_empty_inode(trans, root, path, ino);
145 	if (ret)
146 		return ret;
147 
148 	/* We inline crc's for the free disk space cache */
149 	if (ino != BTRFS_FREE_INO_OBJECTID)
150 		flags |= BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
151 
152 	leaf = path->nodes[0];
153 	inode_item = btrfs_item_ptr(leaf, path->slots[0],
154 				    struct btrfs_inode_item);
155 	btrfs_item_key(leaf, &disk_key, path->slots[0]);
156 	memset_extent_buffer(leaf, 0, (unsigned long)inode_item,
157 			     sizeof(*inode_item));
158 	btrfs_set_inode_generation(leaf, inode_item, trans->transid);
159 	btrfs_set_inode_size(leaf, inode_item, 0);
160 	btrfs_set_inode_nbytes(leaf, inode_item, 0);
161 	btrfs_set_inode_uid(leaf, inode_item, 0);
162 	btrfs_set_inode_gid(leaf, inode_item, 0);
163 	btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
164 	btrfs_set_inode_flags(leaf, inode_item, flags);
165 	btrfs_set_inode_nlink(leaf, inode_item, 1);
166 	btrfs_set_inode_transid(leaf, inode_item, trans->transid);
167 	btrfs_set_inode_block_group(leaf, inode_item, offset);
168 	btrfs_mark_buffer_dirty(leaf);
169 	btrfs_release_path(path);
170 
171 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
172 	key.offset = offset;
173 	key.type = 0;
174 	ret = btrfs_insert_empty_item(trans, root, path, &key,
175 				      sizeof(struct btrfs_free_space_header));
176 	if (ret < 0) {
177 		btrfs_release_path(path);
178 		return ret;
179 	}
180 
181 	leaf = path->nodes[0];
182 	header = btrfs_item_ptr(leaf, path->slots[0],
183 				struct btrfs_free_space_header);
184 	memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
185 	btrfs_set_free_space_key(leaf, header, &disk_key);
186 	btrfs_mark_buffer_dirty(leaf);
187 	btrfs_release_path(path);
188 
189 	return 0;
190 }
191 
create_free_space_inode(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)192 int create_free_space_inode(struct btrfs_root *root,
193 			    struct btrfs_trans_handle *trans,
194 			    struct btrfs_block_group_cache *block_group,
195 			    struct btrfs_path *path)
196 {
197 	int ret;
198 	u64 ino;
199 
200 	ret = btrfs_find_free_objectid(root, &ino);
201 	if (ret < 0)
202 		return ret;
203 
204 	return __create_free_space_inode(root, trans, path, ino,
205 					 block_group->key.objectid);
206 }
207 
btrfs_check_trunc_cache_free_space(struct btrfs_root * root,struct btrfs_block_rsv * rsv)208 int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
209 				       struct btrfs_block_rsv *rsv)
210 {
211 	u64 needed_bytes;
212 	int ret;
213 
214 	/* 1 for slack space, 1 for updating the inode */
215 	needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
216 		btrfs_calc_trans_metadata_size(root, 1);
217 
218 	spin_lock(&rsv->lock);
219 	if (rsv->reserved < needed_bytes)
220 		ret = -ENOSPC;
221 	else
222 		ret = 0;
223 	spin_unlock(&rsv->lock);
224 	return ret;
225 }
226 
btrfs_truncate_free_space_cache(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct inode * inode)227 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
228 				    struct btrfs_trans_handle *trans,
229 				    struct btrfs_block_group_cache *block_group,
230 				    struct inode *inode)
231 {
232 	int ret = 0;
233 	struct btrfs_path *path = btrfs_alloc_path();
234 	bool locked = false;
235 
236 	if (!path) {
237 		ret = -ENOMEM;
238 		goto fail;
239 	}
240 
241 	if (block_group) {
242 		locked = true;
243 		mutex_lock(&trans->transaction->cache_write_mutex);
244 		if (!list_empty(&block_group->io_list)) {
245 			list_del_init(&block_group->io_list);
246 
247 			btrfs_wait_cache_io(root, trans, block_group,
248 					    &block_group->io_ctl, path,
249 					    block_group->key.objectid);
250 			btrfs_put_block_group(block_group);
251 		}
252 
253 		/*
254 		 * now that we've truncated the cache away, its no longer
255 		 * setup or written
256 		 */
257 		spin_lock(&block_group->lock);
258 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
259 		spin_unlock(&block_group->lock);
260 	}
261 	btrfs_free_path(path);
262 
263 	btrfs_i_size_write(inode, 0);
264 	truncate_pagecache(inode, 0);
265 
266 	/*
267 	 * We don't need an orphan item because truncating the free space cache
268 	 * will never be split across transactions.
269 	 * We don't need to check for -EAGAIN because we're a free space
270 	 * cache inode
271 	 */
272 	ret = btrfs_truncate_inode_items(trans, root, inode,
273 					 0, BTRFS_EXTENT_DATA_KEY);
274 	if (ret)
275 		goto fail;
276 
277 	ret = btrfs_update_inode(trans, root, inode);
278 
279 fail:
280 	if (locked)
281 		mutex_unlock(&trans->transaction->cache_write_mutex);
282 	if (ret)
283 		btrfs_abort_transaction(trans, root, ret);
284 
285 	return ret;
286 }
287 
readahead_cache(struct inode * inode)288 static int readahead_cache(struct inode *inode)
289 {
290 	struct file_ra_state *ra;
291 	unsigned long last_index;
292 
293 	ra = kzalloc(sizeof(*ra), GFP_NOFS);
294 	if (!ra)
295 		return -ENOMEM;
296 
297 	file_ra_state_init(ra, inode->i_mapping);
298 	last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
299 
300 	page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index);
301 
302 	kfree(ra);
303 
304 	return 0;
305 }
306 
io_ctl_init(struct btrfs_io_ctl * io_ctl,struct inode * inode,struct btrfs_root * root,int write)307 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
308 		       struct btrfs_root *root, int write)
309 {
310 	int num_pages;
311 	int check_crcs = 0;
312 
313 	num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
314 
315 	if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
316 		check_crcs = 1;
317 
318 	/* Make sure we can fit our crcs into the first page */
319 	if (write && check_crcs &&
320 	    (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
321 		return -ENOSPC;
322 
323 	memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
324 
325 	io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
326 	if (!io_ctl->pages)
327 		return -ENOMEM;
328 
329 	io_ctl->num_pages = num_pages;
330 	io_ctl->root = root;
331 	io_ctl->check_crcs = check_crcs;
332 	io_ctl->inode = inode;
333 
334 	return 0;
335 }
336 
io_ctl_free(struct btrfs_io_ctl * io_ctl)337 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
338 {
339 	kfree(io_ctl->pages);
340 	io_ctl->pages = NULL;
341 }
342 
io_ctl_unmap_page(struct btrfs_io_ctl * io_ctl)343 static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
344 {
345 	if (io_ctl->cur) {
346 		io_ctl->cur = NULL;
347 		io_ctl->orig = NULL;
348 	}
349 }
350 
io_ctl_map_page(struct btrfs_io_ctl * io_ctl,int clear)351 static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
352 {
353 	ASSERT(io_ctl->index < io_ctl->num_pages);
354 	io_ctl->page = io_ctl->pages[io_ctl->index++];
355 	io_ctl->cur = page_address(io_ctl->page);
356 	io_ctl->orig = io_ctl->cur;
357 	io_ctl->size = PAGE_CACHE_SIZE;
358 	if (clear)
359 		memset(io_ctl->cur, 0, PAGE_CACHE_SIZE);
360 }
361 
io_ctl_drop_pages(struct btrfs_io_ctl * io_ctl)362 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
363 {
364 	int i;
365 
366 	io_ctl_unmap_page(io_ctl);
367 
368 	for (i = 0; i < io_ctl->num_pages; i++) {
369 		if (io_ctl->pages[i]) {
370 			ClearPageChecked(io_ctl->pages[i]);
371 			unlock_page(io_ctl->pages[i]);
372 			page_cache_release(io_ctl->pages[i]);
373 		}
374 	}
375 }
376 
io_ctl_prepare_pages(struct btrfs_io_ctl * io_ctl,struct inode * inode,int uptodate)377 static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, struct inode *inode,
378 				int uptodate)
379 {
380 	struct page *page;
381 	gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
382 	int i;
383 
384 	for (i = 0; i < io_ctl->num_pages; i++) {
385 		page = find_or_create_page(inode->i_mapping, i, mask);
386 		if (!page) {
387 			io_ctl_drop_pages(io_ctl);
388 			return -ENOMEM;
389 		}
390 		io_ctl->pages[i] = page;
391 		if (uptodate && !PageUptodate(page)) {
392 			btrfs_readpage(NULL, page);
393 			lock_page(page);
394 			if (page->mapping != inode->i_mapping) {
395 				btrfs_err(BTRFS_I(inode)->root->fs_info,
396 					  "free space cache page truncated");
397 				io_ctl_drop_pages(io_ctl);
398 				return -EIO;
399 			}
400 			if (!PageUptodate(page)) {
401 				btrfs_err(BTRFS_I(inode)->root->fs_info,
402 					   "error reading free space cache");
403 				io_ctl_drop_pages(io_ctl);
404 				return -EIO;
405 			}
406 		}
407 	}
408 
409 	for (i = 0; i < io_ctl->num_pages; i++) {
410 		clear_page_dirty_for_io(io_ctl->pages[i]);
411 		set_page_extent_mapped(io_ctl->pages[i]);
412 	}
413 
414 	return 0;
415 }
416 
io_ctl_set_generation(struct btrfs_io_ctl * io_ctl,u64 generation)417 static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
418 {
419 	__le64 *val;
420 
421 	io_ctl_map_page(io_ctl, 1);
422 
423 	/*
424 	 * Skip the csum areas.  If we don't check crcs then we just have a
425 	 * 64bit chunk at the front of the first page.
426 	 */
427 	if (io_ctl->check_crcs) {
428 		io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
429 		io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
430 	} else {
431 		io_ctl->cur += sizeof(u64);
432 		io_ctl->size -= sizeof(u64) * 2;
433 	}
434 
435 	val = io_ctl->cur;
436 	*val = cpu_to_le64(generation);
437 	io_ctl->cur += sizeof(u64);
438 }
439 
io_ctl_check_generation(struct btrfs_io_ctl * io_ctl,u64 generation)440 static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
441 {
442 	__le64 *gen;
443 
444 	/*
445 	 * Skip the crc area.  If we don't check crcs then we just have a 64bit
446 	 * chunk at the front of the first page.
447 	 */
448 	if (io_ctl->check_crcs) {
449 		io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
450 		io_ctl->size -= sizeof(u64) +
451 			(sizeof(u32) * io_ctl->num_pages);
452 	} else {
453 		io_ctl->cur += sizeof(u64);
454 		io_ctl->size -= sizeof(u64) * 2;
455 	}
456 
457 	gen = io_ctl->cur;
458 	if (le64_to_cpu(*gen) != generation) {
459 		btrfs_err_rl(io_ctl->root->fs_info,
460 			"space cache generation (%llu) does not match inode (%llu)",
461 				*gen, generation);
462 		io_ctl_unmap_page(io_ctl);
463 		return -EIO;
464 	}
465 	io_ctl->cur += sizeof(u64);
466 	return 0;
467 }
468 
io_ctl_set_crc(struct btrfs_io_ctl * io_ctl,int index)469 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
470 {
471 	u32 *tmp;
472 	u32 crc = ~(u32)0;
473 	unsigned offset = 0;
474 
475 	if (!io_ctl->check_crcs) {
476 		io_ctl_unmap_page(io_ctl);
477 		return;
478 	}
479 
480 	if (index == 0)
481 		offset = sizeof(u32) * io_ctl->num_pages;
482 
483 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
484 			      PAGE_CACHE_SIZE - offset);
485 	btrfs_csum_final(crc, (char *)&crc);
486 	io_ctl_unmap_page(io_ctl);
487 	tmp = page_address(io_ctl->pages[0]);
488 	tmp += index;
489 	*tmp = crc;
490 }
491 
io_ctl_check_crc(struct btrfs_io_ctl * io_ctl,int index)492 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
493 {
494 	u32 *tmp, val;
495 	u32 crc = ~(u32)0;
496 	unsigned offset = 0;
497 
498 	if (!io_ctl->check_crcs) {
499 		io_ctl_map_page(io_ctl, 0);
500 		return 0;
501 	}
502 
503 	if (index == 0)
504 		offset = sizeof(u32) * io_ctl->num_pages;
505 
506 	tmp = page_address(io_ctl->pages[0]);
507 	tmp += index;
508 	val = *tmp;
509 
510 	io_ctl_map_page(io_ctl, 0);
511 	crc = btrfs_csum_data(io_ctl->orig + offset, crc,
512 			      PAGE_CACHE_SIZE - offset);
513 	btrfs_csum_final(crc, (char *)&crc);
514 	if (val != crc) {
515 		btrfs_err_rl(io_ctl->root->fs_info,
516 			"csum mismatch on free space cache");
517 		io_ctl_unmap_page(io_ctl);
518 		return -EIO;
519 	}
520 
521 	return 0;
522 }
523 
io_ctl_add_entry(struct btrfs_io_ctl * io_ctl,u64 offset,u64 bytes,void * bitmap)524 static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
525 			    void *bitmap)
526 {
527 	struct btrfs_free_space_entry *entry;
528 
529 	if (!io_ctl->cur)
530 		return -ENOSPC;
531 
532 	entry = io_ctl->cur;
533 	entry->offset = cpu_to_le64(offset);
534 	entry->bytes = cpu_to_le64(bytes);
535 	entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
536 		BTRFS_FREE_SPACE_EXTENT;
537 	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
538 	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
539 
540 	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
541 		return 0;
542 
543 	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
544 
545 	/* No more pages to map */
546 	if (io_ctl->index >= io_ctl->num_pages)
547 		return 0;
548 
549 	/* map the next page */
550 	io_ctl_map_page(io_ctl, 1);
551 	return 0;
552 }
553 
io_ctl_add_bitmap(struct btrfs_io_ctl * io_ctl,void * bitmap)554 static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
555 {
556 	if (!io_ctl->cur)
557 		return -ENOSPC;
558 
559 	/*
560 	 * If we aren't at the start of the current page, unmap this one and
561 	 * map the next one if there is any left.
562 	 */
563 	if (io_ctl->cur != io_ctl->orig) {
564 		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
565 		if (io_ctl->index >= io_ctl->num_pages)
566 			return -ENOSPC;
567 		io_ctl_map_page(io_ctl, 0);
568 	}
569 
570 	memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE);
571 	io_ctl_set_crc(io_ctl, io_ctl->index - 1);
572 	if (io_ctl->index < io_ctl->num_pages)
573 		io_ctl_map_page(io_ctl, 0);
574 	return 0;
575 }
576 
io_ctl_zero_remaining_pages(struct btrfs_io_ctl * io_ctl)577 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
578 {
579 	/*
580 	 * If we're not on the boundary we know we've modified the page and we
581 	 * need to crc the page.
582 	 */
583 	if (io_ctl->cur != io_ctl->orig)
584 		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
585 	else
586 		io_ctl_unmap_page(io_ctl);
587 
588 	while (io_ctl->index < io_ctl->num_pages) {
589 		io_ctl_map_page(io_ctl, 1);
590 		io_ctl_set_crc(io_ctl, io_ctl->index - 1);
591 	}
592 }
593 
io_ctl_read_entry(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry,u8 * type)594 static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
595 			    struct btrfs_free_space *entry, u8 *type)
596 {
597 	struct btrfs_free_space_entry *e;
598 	int ret;
599 
600 	if (!io_ctl->cur) {
601 		ret = io_ctl_check_crc(io_ctl, io_ctl->index);
602 		if (ret)
603 			return ret;
604 	}
605 
606 	e = io_ctl->cur;
607 	entry->offset = le64_to_cpu(e->offset);
608 	entry->bytes = le64_to_cpu(e->bytes);
609 	*type = e->type;
610 	io_ctl->cur += sizeof(struct btrfs_free_space_entry);
611 	io_ctl->size -= sizeof(struct btrfs_free_space_entry);
612 
613 	if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
614 		return 0;
615 
616 	io_ctl_unmap_page(io_ctl);
617 
618 	return 0;
619 }
620 
io_ctl_read_bitmap(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry)621 static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
622 			      struct btrfs_free_space *entry)
623 {
624 	int ret;
625 
626 	ret = io_ctl_check_crc(io_ctl, io_ctl->index);
627 	if (ret)
628 		return ret;
629 
630 	memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE);
631 	io_ctl_unmap_page(io_ctl);
632 
633 	return 0;
634 }
635 
636 /*
637  * Since we attach pinned extents after the fact we can have contiguous sections
638  * of free space that are split up in entries.  This poses a problem with the
639  * tree logging stuff since it could have allocated across what appears to be 2
640  * entries since we would have merged the entries when adding the pinned extents
641  * back to the free space cache.  So run through the space cache that we just
642  * loaded and merge contiguous entries.  This will make the log replay stuff not
643  * blow up and it will make for nicer allocator behavior.
644  */
merge_space_tree(struct btrfs_free_space_ctl * ctl)645 static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
646 {
647 	struct btrfs_free_space *e, *prev = NULL;
648 	struct rb_node *n;
649 
650 again:
651 	spin_lock(&ctl->tree_lock);
652 	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
653 		e = rb_entry(n, struct btrfs_free_space, offset_index);
654 		if (!prev)
655 			goto next;
656 		if (e->bitmap || prev->bitmap)
657 			goto next;
658 		if (prev->offset + prev->bytes == e->offset) {
659 			unlink_free_space(ctl, prev);
660 			unlink_free_space(ctl, e);
661 			prev->bytes += e->bytes;
662 			kmem_cache_free(btrfs_free_space_cachep, e);
663 			link_free_space(ctl, prev);
664 			prev = NULL;
665 			spin_unlock(&ctl->tree_lock);
666 			goto again;
667 		}
668 next:
669 		prev = e;
670 	}
671 	spin_unlock(&ctl->tree_lock);
672 }
673 
__load_free_space_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_path * path,u64 offset)674 static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
675 				   struct btrfs_free_space_ctl *ctl,
676 				   struct btrfs_path *path, u64 offset)
677 {
678 	struct btrfs_free_space_header *header;
679 	struct extent_buffer *leaf;
680 	struct btrfs_io_ctl io_ctl;
681 	struct btrfs_key key;
682 	struct btrfs_free_space *e, *n;
683 	LIST_HEAD(bitmaps);
684 	u64 num_entries;
685 	u64 num_bitmaps;
686 	u64 generation;
687 	u8 type;
688 	int ret = 0;
689 
690 	/* Nothing in the space cache, goodbye */
691 	if (!i_size_read(inode))
692 		return 0;
693 
694 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
695 	key.offset = offset;
696 	key.type = 0;
697 
698 	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
699 	if (ret < 0)
700 		return 0;
701 	else if (ret > 0) {
702 		btrfs_release_path(path);
703 		return 0;
704 	}
705 
706 	ret = -1;
707 
708 	leaf = path->nodes[0];
709 	header = btrfs_item_ptr(leaf, path->slots[0],
710 				struct btrfs_free_space_header);
711 	num_entries = btrfs_free_space_entries(leaf, header);
712 	num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
713 	generation = btrfs_free_space_generation(leaf, header);
714 	btrfs_release_path(path);
715 
716 	if (!BTRFS_I(inode)->generation) {
717 		btrfs_info(root->fs_info,
718 			   "The free space cache file (%llu) is invalid. skip it\n",
719 			   offset);
720 		return 0;
721 	}
722 
723 	if (BTRFS_I(inode)->generation != generation) {
724 		btrfs_err(root->fs_info,
725 			"free space inode generation (%llu) "
726 			"did not match free space cache generation (%llu)",
727 			BTRFS_I(inode)->generation, generation);
728 		return 0;
729 	}
730 
731 	if (!num_entries)
732 		return 0;
733 
734 	ret = io_ctl_init(&io_ctl, inode, root, 0);
735 	if (ret)
736 		return ret;
737 
738 	ret = readahead_cache(inode);
739 	if (ret)
740 		goto out;
741 
742 	ret = io_ctl_prepare_pages(&io_ctl, inode, 1);
743 	if (ret)
744 		goto out;
745 
746 	ret = io_ctl_check_crc(&io_ctl, 0);
747 	if (ret)
748 		goto free_cache;
749 
750 	ret = io_ctl_check_generation(&io_ctl, generation);
751 	if (ret)
752 		goto free_cache;
753 
754 	while (num_entries) {
755 		e = kmem_cache_zalloc(btrfs_free_space_cachep,
756 				      GFP_NOFS);
757 		if (!e) {
758 			ret = -ENOMEM;
759 			goto free_cache;
760 		}
761 
762 		ret = io_ctl_read_entry(&io_ctl, e, &type);
763 		if (ret) {
764 			kmem_cache_free(btrfs_free_space_cachep, e);
765 			goto free_cache;
766 		}
767 
768 		if (!e->bytes) {
769 			ret = -1;
770 			kmem_cache_free(btrfs_free_space_cachep, e);
771 			goto free_cache;
772 		}
773 
774 		if (type == BTRFS_FREE_SPACE_EXTENT) {
775 			spin_lock(&ctl->tree_lock);
776 			ret = link_free_space(ctl, e);
777 			spin_unlock(&ctl->tree_lock);
778 			if (ret) {
779 				btrfs_err(root->fs_info,
780 					"Duplicate entries in free space cache, dumping");
781 				kmem_cache_free(btrfs_free_space_cachep, e);
782 				goto free_cache;
783 			}
784 		} else {
785 			ASSERT(num_bitmaps);
786 			num_bitmaps--;
787 			e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
788 			if (!e->bitmap) {
789 				ret = -ENOMEM;
790 				kmem_cache_free(
791 					btrfs_free_space_cachep, e);
792 				goto free_cache;
793 			}
794 			spin_lock(&ctl->tree_lock);
795 			ret = link_free_space(ctl, e);
796 			ctl->total_bitmaps++;
797 			ctl->op->recalc_thresholds(ctl);
798 			spin_unlock(&ctl->tree_lock);
799 			if (ret) {
800 				btrfs_err(root->fs_info,
801 					"Duplicate entries in free space cache, dumping");
802 				kmem_cache_free(btrfs_free_space_cachep, e);
803 				goto free_cache;
804 			}
805 			list_add_tail(&e->list, &bitmaps);
806 		}
807 
808 		num_entries--;
809 	}
810 
811 	io_ctl_unmap_page(&io_ctl);
812 
813 	/*
814 	 * We add the bitmaps at the end of the entries in order that
815 	 * the bitmap entries are added to the cache.
816 	 */
817 	list_for_each_entry_safe(e, n, &bitmaps, list) {
818 		list_del_init(&e->list);
819 		ret = io_ctl_read_bitmap(&io_ctl, e);
820 		if (ret)
821 			goto free_cache;
822 	}
823 
824 	io_ctl_drop_pages(&io_ctl);
825 	merge_space_tree(ctl);
826 	ret = 1;
827 out:
828 	io_ctl_free(&io_ctl);
829 	return ret;
830 free_cache:
831 	io_ctl_drop_pages(&io_ctl);
832 	__btrfs_remove_free_space_cache(ctl);
833 	goto out;
834 }
835 
load_free_space_cache(struct btrfs_fs_info * fs_info,struct btrfs_block_group_cache * block_group)836 int load_free_space_cache(struct btrfs_fs_info *fs_info,
837 			  struct btrfs_block_group_cache *block_group)
838 {
839 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
840 	struct btrfs_root *root = fs_info->tree_root;
841 	struct inode *inode;
842 	struct btrfs_path *path;
843 	int ret = 0;
844 	bool matched;
845 	u64 used = btrfs_block_group_used(&block_group->item);
846 
847 	/*
848 	 * If this block group has been marked to be cleared for one reason or
849 	 * another then we can't trust the on disk cache, so just return.
850 	 */
851 	spin_lock(&block_group->lock);
852 	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
853 		spin_unlock(&block_group->lock);
854 		return 0;
855 	}
856 	spin_unlock(&block_group->lock);
857 
858 	path = btrfs_alloc_path();
859 	if (!path)
860 		return 0;
861 	path->search_commit_root = 1;
862 	path->skip_locking = 1;
863 
864 	inode = lookup_free_space_inode(root, block_group, path);
865 	if (IS_ERR(inode)) {
866 		btrfs_free_path(path);
867 		return 0;
868 	}
869 
870 	/* We may have converted the inode and made the cache invalid. */
871 	spin_lock(&block_group->lock);
872 	if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
873 		spin_unlock(&block_group->lock);
874 		btrfs_free_path(path);
875 		goto out;
876 	}
877 	spin_unlock(&block_group->lock);
878 
879 	ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
880 				      path, block_group->key.objectid);
881 	btrfs_free_path(path);
882 	if (ret <= 0)
883 		goto out;
884 
885 	spin_lock(&ctl->tree_lock);
886 	matched = (ctl->free_space == (block_group->key.offset - used -
887 				       block_group->bytes_super));
888 	spin_unlock(&ctl->tree_lock);
889 
890 	if (!matched) {
891 		__btrfs_remove_free_space_cache(ctl);
892 		btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
893 			block_group->key.objectid);
894 		ret = -1;
895 	}
896 out:
897 	if (ret < 0) {
898 		/* This cache is bogus, make sure it gets cleared */
899 		spin_lock(&block_group->lock);
900 		block_group->disk_cache_state = BTRFS_DC_CLEAR;
901 		spin_unlock(&block_group->lock);
902 		ret = 0;
903 
904 		btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuilding it now",
905 			block_group->key.objectid);
906 	}
907 
908 	iput(inode);
909 	return ret;
910 }
911 
912 static noinline_for_stack
write_cache_extent_entries(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group_cache * block_group,int * entries,int * bitmaps,struct list_head * bitmap_list)913 int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
914 			      struct btrfs_free_space_ctl *ctl,
915 			      struct btrfs_block_group_cache *block_group,
916 			      int *entries, int *bitmaps,
917 			      struct list_head *bitmap_list)
918 {
919 	int ret;
920 	struct btrfs_free_cluster *cluster = NULL;
921 	struct btrfs_free_cluster *cluster_locked = NULL;
922 	struct rb_node *node = rb_first(&ctl->free_space_offset);
923 	struct btrfs_trim_range *trim_entry;
924 
925 	/* Get the cluster for this block_group if it exists */
926 	if (block_group && !list_empty(&block_group->cluster_list)) {
927 		cluster = list_entry(block_group->cluster_list.next,
928 				     struct btrfs_free_cluster,
929 				     block_group_list);
930 	}
931 
932 	if (!node && cluster) {
933 		cluster_locked = cluster;
934 		spin_lock(&cluster_locked->lock);
935 		node = rb_first(&cluster->root);
936 		cluster = NULL;
937 	}
938 
939 	/* Write out the extent entries */
940 	while (node) {
941 		struct btrfs_free_space *e;
942 
943 		e = rb_entry(node, struct btrfs_free_space, offset_index);
944 		*entries += 1;
945 
946 		ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
947 				       e->bitmap);
948 		if (ret)
949 			goto fail;
950 
951 		if (e->bitmap) {
952 			list_add_tail(&e->list, bitmap_list);
953 			*bitmaps += 1;
954 		}
955 		node = rb_next(node);
956 		if (!node && cluster) {
957 			node = rb_first(&cluster->root);
958 			cluster_locked = cluster;
959 			spin_lock(&cluster_locked->lock);
960 			cluster = NULL;
961 		}
962 	}
963 	if (cluster_locked) {
964 		spin_unlock(&cluster_locked->lock);
965 		cluster_locked = NULL;
966 	}
967 
968 	/*
969 	 * Make sure we don't miss any range that was removed from our rbtree
970 	 * because trimming is running. Otherwise after a umount+mount (or crash
971 	 * after committing the transaction) we would leak free space and get
972 	 * an inconsistent free space cache report from fsck.
973 	 */
974 	list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
975 		ret = io_ctl_add_entry(io_ctl, trim_entry->start,
976 				       trim_entry->bytes, NULL);
977 		if (ret)
978 			goto fail;
979 		*entries += 1;
980 	}
981 
982 	return 0;
983 fail:
984 	if (cluster_locked)
985 		spin_unlock(&cluster_locked->lock);
986 	return -ENOSPC;
987 }
988 
989 static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,struct btrfs_path * path,u64 offset,int entries,int bitmaps)990 update_cache_item(struct btrfs_trans_handle *trans,
991 		  struct btrfs_root *root,
992 		  struct inode *inode,
993 		  struct btrfs_path *path, u64 offset,
994 		  int entries, int bitmaps)
995 {
996 	struct btrfs_key key;
997 	struct btrfs_free_space_header *header;
998 	struct extent_buffer *leaf;
999 	int ret;
1000 
1001 	key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1002 	key.offset = offset;
1003 	key.type = 0;
1004 
1005 	ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1006 	if (ret < 0) {
1007 		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1008 				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1009 				 GFP_NOFS);
1010 		goto fail;
1011 	}
1012 	leaf = path->nodes[0];
1013 	if (ret > 0) {
1014 		struct btrfs_key found_key;
1015 		ASSERT(path->slots[0]);
1016 		path->slots[0]--;
1017 		btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1018 		if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1019 		    found_key.offset != offset) {
1020 			clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1021 					 inode->i_size - 1,
1022 					 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0,
1023 					 NULL, GFP_NOFS);
1024 			btrfs_release_path(path);
1025 			goto fail;
1026 		}
1027 	}
1028 
1029 	BTRFS_I(inode)->generation = trans->transid;
1030 	header = btrfs_item_ptr(leaf, path->slots[0],
1031 				struct btrfs_free_space_header);
1032 	btrfs_set_free_space_entries(leaf, header, entries);
1033 	btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1034 	btrfs_set_free_space_generation(leaf, header, trans->transid);
1035 	btrfs_mark_buffer_dirty(leaf);
1036 	btrfs_release_path(path);
1037 
1038 	return 0;
1039 
1040 fail:
1041 	return -1;
1042 }
1043 
1044 static noinline_for_stack int
write_pinned_extent_entries(struct btrfs_root * root,struct btrfs_block_group_cache * block_group,struct btrfs_io_ctl * io_ctl,int * entries)1045 write_pinned_extent_entries(struct btrfs_root *root,
1046 			    struct btrfs_block_group_cache *block_group,
1047 			    struct btrfs_io_ctl *io_ctl,
1048 			    int *entries)
1049 {
1050 	u64 start, extent_start, extent_end, len;
1051 	struct extent_io_tree *unpin = NULL;
1052 	int ret;
1053 
1054 	if (!block_group)
1055 		return 0;
1056 
1057 	/*
1058 	 * We want to add any pinned extents to our free space cache
1059 	 * so we don't leak the space
1060 	 *
1061 	 * We shouldn't have switched the pinned extents yet so this is the
1062 	 * right one
1063 	 */
1064 	unpin = root->fs_info->pinned_extents;
1065 
1066 	start = block_group->key.objectid;
1067 
1068 	while (start < block_group->key.objectid + block_group->key.offset) {
1069 		ret = find_first_extent_bit(unpin, start,
1070 					    &extent_start, &extent_end,
1071 					    EXTENT_DIRTY, NULL);
1072 		if (ret)
1073 			return 0;
1074 
1075 		/* This pinned extent is out of our range */
1076 		if (extent_start >= block_group->key.objectid +
1077 		    block_group->key.offset)
1078 			return 0;
1079 
1080 		extent_start = max(extent_start, start);
1081 		extent_end = min(block_group->key.objectid +
1082 				 block_group->key.offset, extent_end + 1);
1083 		len = extent_end - extent_start;
1084 
1085 		*entries += 1;
1086 		ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1087 		if (ret)
1088 			return -ENOSPC;
1089 
1090 		start = extent_end;
1091 	}
1092 
1093 	return 0;
1094 }
1095 
1096 static noinline_for_stack int
write_bitmap_entries(struct btrfs_io_ctl * io_ctl,struct list_head * bitmap_list)1097 write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1098 {
1099 	struct list_head *pos, *n;
1100 	int ret;
1101 
1102 	/* Write out the bitmaps */
1103 	list_for_each_safe(pos, n, bitmap_list) {
1104 		struct btrfs_free_space *entry =
1105 			list_entry(pos, struct btrfs_free_space, list);
1106 
1107 		ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1108 		if (ret)
1109 			return -ENOSPC;
1110 		list_del_init(&entry->list);
1111 	}
1112 
1113 	return 0;
1114 }
1115 
flush_dirty_cache(struct inode * inode)1116 static int flush_dirty_cache(struct inode *inode)
1117 {
1118 	int ret;
1119 
1120 	ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1121 	if (ret)
1122 		clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1123 				 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
1124 				 GFP_NOFS);
1125 
1126 	return ret;
1127 }
1128 
1129 static void noinline_for_stack
cleanup_bitmap_list(struct list_head * bitmap_list)1130 cleanup_bitmap_list(struct list_head *bitmap_list)
1131 {
1132 	struct list_head *pos, *n;
1133 
1134 	list_for_each_safe(pos, n, bitmap_list) {
1135 		struct btrfs_free_space *entry =
1136 			list_entry(pos, struct btrfs_free_space, list);
1137 		list_del_init(&entry->list);
1138 	}
1139 }
1140 
1141 static void noinline_for_stack
cleanup_write_cache_enospc(struct inode * inode,struct btrfs_io_ctl * io_ctl,struct extent_state ** cached_state,struct list_head * bitmap_list)1142 cleanup_write_cache_enospc(struct inode *inode,
1143 			   struct btrfs_io_ctl *io_ctl,
1144 			   struct extent_state **cached_state,
1145 			   struct list_head *bitmap_list)
1146 {
1147 	io_ctl_drop_pages(io_ctl);
1148 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1149 			     i_size_read(inode) - 1, cached_state,
1150 			     GFP_NOFS);
1151 }
1152 
btrfs_wait_cache_io(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_path * path,u64 offset)1153 int btrfs_wait_cache_io(struct btrfs_root *root,
1154 			struct btrfs_trans_handle *trans,
1155 			struct btrfs_block_group_cache *block_group,
1156 			struct btrfs_io_ctl *io_ctl,
1157 			struct btrfs_path *path, u64 offset)
1158 {
1159 	int ret;
1160 	struct inode *inode = io_ctl->inode;
1161 
1162 	if (!inode)
1163 		return 0;
1164 
1165 	if (block_group)
1166 		root = root->fs_info->tree_root;
1167 
1168 	/* Flush the dirty pages in the cache file. */
1169 	ret = flush_dirty_cache(inode);
1170 	if (ret)
1171 		goto out;
1172 
1173 	/* Update the cache item to tell everyone this cache file is valid. */
1174 	ret = update_cache_item(trans, root, inode, path, offset,
1175 				io_ctl->entries, io_ctl->bitmaps);
1176 out:
1177 	io_ctl_free(io_ctl);
1178 	if (ret) {
1179 		invalidate_inode_pages2(inode->i_mapping);
1180 		BTRFS_I(inode)->generation = 0;
1181 		if (block_group) {
1182 #ifdef DEBUG
1183 			btrfs_err(root->fs_info,
1184 				"failed to write free space cache for block group %llu",
1185 				block_group->key.objectid);
1186 #endif
1187 		}
1188 	}
1189 	btrfs_update_inode(trans, root, inode);
1190 
1191 	if (block_group) {
1192 		/* the dirty list is protected by the dirty_bgs_lock */
1193 		spin_lock(&trans->transaction->dirty_bgs_lock);
1194 
1195 		/* the disk_cache_state is protected by the block group lock */
1196 		spin_lock(&block_group->lock);
1197 
1198 		/*
1199 		 * only mark this as written if we didn't get put back on
1200 		 * the dirty list while waiting for IO.   Otherwise our
1201 		 * cache state won't be right, and we won't get written again
1202 		 */
1203 		if (!ret && list_empty(&block_group->dirty_list))
1204 			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1205 		else if (ret)
1206 			block_group->disk_cache_state = BTRFS_DC_ERROR;
1207 
1208 		spin_unlock(&block_group->lock);
1209 		spin_unlock(&trans->transaction->dirty_bgs_lock);
1210 		io_ctl->inode = NULL;
1211 		iput(inode);
1212 	}
1213 
1214 	return ret;
1215 
1216 }
1217 
1218 /**
1219  * __btrfs_write_out_cache - write out cached info to an inode
1220  * @root - the root the inode belongs to
1221  * @ctl - the free space cache we are going to write out
1222  * @block_group - the block_group for this cache if it belongs to a block_group
1223  * @trans - the trans handle
1224  * @path - the path to use
1225  * @offset - the offset for the key we'll insert
1226  *
1227  * This function writes out a free space cache struct to disk for quick recovery
1228  * on mount.  This will return 0 if it was successful in writing the cache out,
1229  * or an errno if it was not.
1230  */
__btrfs_write_out_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group_cache * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 offset)1231 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1232 				   struct btrfs_free_space_ctl *ctl,
1233 				   struct btrfs_block_group_cache *block_group,
1234 				   struct btrfs_io_ctl *io_ctl,
1235 				   struct btrfs_trans_handle *trans,
1236 				   struct btrfs_path *path, u64 offset)
1237 {
1238 	struct extent_state *cached_state = NULL;
1239 	LIST_HEAD(bitmap_list);
1240 	int entries = 0;
1241 	int bitmaps = 0;
1242 	int ret;
1243 	int must_iput = 0;
1244 
1245 	if (!i_size_read(inode))
1246 		return -EIO;
1247 
1248 	WARN_ON(io_ctl->pages);
1249 	ret = io_ctl_init(io_ctl, inode, root, 1);
1250 	if (ret)
1251 		return ret;
1252 
1253 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1254 		down_write(&block_group->data_rwsem);
1255 		spin_lock(&block_group->lock);
1256 		if (block_group->delalloc_bytes) {
1257 			block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1258 			spin_unlock(&block_group->lock);
1259 			up_write(&block_group->data_rwsem);
1260 			BTRFS_I(inode)->generation = 0;
1261 			ret = 0;
1262 			must_iput = 1;
1263 			goto out;
1264 		}
1265 		spin_unlock(&block_group->lock);
1266 	}
1267 
1268 	/* Lock all pages first so we can lock the extent safely. */
1269 	ret = io_ctl_prepare_pages(io_ctl, inode, 0);
1270 	if (ret)
1271 		goto out_unlock;
1272 
1273 	lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1274 			 0, &cached_state);
1275 
1276 	io_ctl_set_generation(io_ctl, trans->transid);
1277 
1278 	mutex_lock(&ctl->cache_writeout_mutex);
1279 	/* Write out the extent entries in the free space cache */
1280 	spin_lock(&ctl->tree_lock);
1281 	ret = write_cache_extent_entries(io_ctl, ctl,
1282 					 block_group, &entries, &bitmaps,
1283 					 &bitmap_list);
1284 	if (ret)
1285 		goto out_nospc_locked;
1286 
1287 	/*
1288 	 * Some spaces that are freed in the current transaction are pinned,
1289 	 * they will be added into free space cache after the transaction is
1290 	 * committed, we shouldn't lose them.
1291 	 *
1292 	 * If this changes while we are working we'll get added back to
1293 	 * the dirty list and redo it.  No locking needed
1294 	 */
1295 	ret = write_pinned_extent_entries(root, block_group, io_ctl, &entries);
1296 	if (ret)
1297 		goto out_nospc_locked;
1298 
1299 	/*
1300 	 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1301 	 * locked while doing it because a concurrent trim can be manipulating
1302 	 * or freeing the bitmap.
1303 	 */
1304 	ret = write_bitmap_entries(io_ctl, &bitmap_list);
1305 	spin_unlock(&ctl->tree_lock);
1306 	mutex_unlock(&ctl->cache_writeout_mutex);
1307 	if (ret)
1308 		goto out_nospc;
1309 
1310 	/* Zero out the rest of the pages just to make sure */
1311 	io_ctl_zero_remaining_pages(io_ctl);
1312 
1313 	/* Everything is written out, now we dirty the pages in the file. */
1314 	ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
1315 				0, i_size_read(inode), &cached_state);
1316 	if (ret)
1317 		goto out_nospc;
1318 
1319 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1320 		up_write(&block_group->data_rwsem);
1321 	/*
1322 	 * Release the pages and unlock the extent, we will flush
1323 	 * them out later
1324 	 */
1325 	io_ctl_drop_pages(io_ctl);
1326 
1327 	unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
1328 			     i_size_read(inode) - 1, &cached_state, GFP_NOFS);
1329 
1330 	/*
1331 	 * at this point the pages are under IO and we're happy,
1332 	 * The caller is responsible for waiting on them and updating the
1333 	 * the cache and the inode
1334 	 */
1335 	io_ctl->entries = entries;
1336 	io_ctl->bitmaps = bitmaps;
1337 
1338 	ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1339 	if (ret)
1340 		goto out;
1341 
1342 	return 0;
1343 
1344 out:
1345 	io_ctl->inode = NULL;
1346 	io_ctl_free(io_ctl);
1347 	if (ret) {
1348 		invalidate_inode_pages2(inode->i_mapping);
1349 		BTRFS_I(inode)->generation = 0;
1350 	}
1351 	btrfs_update_inode(trans, root, inode);
1352 	if (must_iput)
1353 		iput(inode);
1354 	return ret;
1355 
1356 out_nospc_locked:
1357 	cleanup_bitmap_list(&bitmap_list);
1358 	spin_unlock(&ctl->tree_lock);
1359 	mutex_unlock(&ctl->cache_writeout_mutex);
1360 
1361 out_nospc:
1362 	cleanup_write_cache_enospc(inode, io_ctl, &cached_state, &bitmap_list);
1363 
1364 out_unlock:
1365 	if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1366 		up_write(&block_group->data_rwsem);
1367 
1368 	goto out;
1369 }
1370 
btrfs_write_out_cache(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group_cache * block_group,struct btrfs_path * path)1371 int btrfs_write_out_cache(struct btrfs_root *root,
1372 			  struct btrfs_trans_handle *trans,
1373 			  struct btrfs_block_group_cache *block_group,
1374 			  struct btrfs_path *path)
1375 {
1376 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1377 	struct inode *inode;
1378 	int ret = 0;
1379 
1380 	root = root->fs_info->tree_root;
1381 
1382 	spin_lock(&block_group->lock);
1383 	if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1384 		spin_unlock(&block_group->lock);
1385 		return 0;
1386 	}
1387 	spin_unlock(&block_group->lock);
1388 
1389 	inode = lookup_free_space_inode(root, block_group, path);
1390 	if (IS_ERR(inode))
1391 		return 0;
1392 
1393 	ret = __btrfs_write_out_cache(root, inode, ctl, block_group,
1394 				      &block_group->io_ctl, trans,
1395 				      path, block_group->key.objectid);
1396 	if (ret) {
1397 #ifdef DEBUG
1398 		btrfs_err(root->fs_info,
1399 			"failed to write free space cache for block group %llu",
1400 			block_group->key.objectid);
1401 #endif
1402 		spin_lock(&block_group->lock);
1403 		block_group->disk_cache_state = BTRFS_DC_ERROR;
1404 		spin_unlock(&block_group->lock);
1405 
1406 		block_group->io_ctl.inode = NULL;
1407 		iput(inode);
1408 	}
1409 
1410 	/*
1411 	 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1412 	 * to wait for IO and put the inode
1413 	 */
1414 
1415 	return ret;
1416 }
1417 
offset_to_bit(u64 bitmap_start,u32 unit,u64 offset)1418 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1419 					  u64 offset)
1420 {
1421 	ASSERT(offset >= bitmap_start);
1422 	offset -= bitmap_start;
1423 	return (unsigned long)(div_u64(offset, unit));
1424 }
1425 
bytes_to_bits(u64 bytes,u32 unit)1426 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1427 {
1428 	return (unsigned long)(div_u64(bytes, unit));
1429 }
1430 
offset_to_bitmap(struct btrfs_free_space_ctl * ctl,u64 offset)1431 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1432 				   u64 offset)
1433 {
1434 	u64 bitmap_start;
1435 	u32 bytes_per_bitmap;
1436 
1437 	bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1438 	bitmap_start = offset - ctl->start;
1439 	bitmap_start = div_u64(bitmap_start, bytes_per_bitmap);
1440 	bitmap_start *= bytes_per_bitmap;
1441 	bitmap_start += ctl->start;
1442 
1443 	return bitmap_start;
1444 }
1445 
tree_insert_offset(struct rb_root * root,u64 offset,struct rb_node * node,int bitmap)1446 static int tree_insert_offset(struct rb_root *root, u64 offset,
1447 			      struct rb_node *node, int bitmap)
1448 {
1449 	struct rb_node **p = &root->rb_node;
1450 	struct rb_node *parent = NULL;
1451 	struct btrfs_free_space *info;
1452 
1453 	while (*p) {
1454 		parent = *p;
1455 		info = rb_entry(parent, struct btrfs_free_space, offset_index);
1456 
1457 		if (offset < info->offset) {
1458 			p = &(*p)->rb_left;
1459 		} else if (offset > info->offset) {
1460 			p = &(*p)->rb_right;
1461 		} else {
1462 			/*
1463 			 * we could have a bitmap entry and an extent entry
1464 			 * share the same offset.  If this is the case, we want
1465 			 * the extent entry to always be found first if we do a
1466 			 * linear search through the tree, since we want to have
1467 			 * the quickest allocation time, and allocating from an
1468 			 * extent is faster than allocating from a bitmap.  So
1469 			 * if we're inserting a bitmap and we find an entry at
1470 			 * this offset, we want to go right, or after this entry
1471 			 * logically.  If we are inserting an extent and we've
1472 			 * found a bitmap, we want to go left, or before
1473 			 * logically.
1474 			 */
1475 			if (bitmap) {
1476 				if (info->bitmap) {
1477 					WARN_ON_ONCE(1);
1478 					return -EEXIST;
1479 				}
1480 				p = &(*p)->rb_right;
1481 			} else {
1482 				if (!info->bitmap) {
1483 					WARN_ON_ONCE(1);
1484 					return -EEXIST;
1485 				}
1486 				p = &(*p)->rb_left;
1487 			}
1488 		}
1489 	}
1490 
1491 	rb_link_node(node, parent, p);
1492 	rb_insert_color(node, root);
1493 
1494 	return 0;
1495 }
1496 
1497 /*
1498  * searches the tree for the given offset.
1499  *
1500  * fuzzy - If this is set, then we are trying to make an allocation, and we just
1501  * want a section that has at least bytes size and comes at or after the given
1502  * offset.
1503  */
1504 static struct btrfs_free_space *
tree_search_offset(struct btrfs_free_space_ctl * ctl,u64 offset,int bitmap_only,int fuzzy)1505 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1506 		   u64 offset, int bitmap_only, int fuzzy)
1507 {
1508 	struct rb_node *n = ctl->free_space_offset.rb_node;
1509 	struct btrfs_free_space *entry, *prev = NULL;
1510 
1511 	/* find entry that is closest to the 'offset' */
1512 	while (1) {
1513 		if (!n) {
1514 			entry = NULL;
1515 			break;
1516 		}
1517 
1518 		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1519 		prev = entry;
1520 
1521 		if (offset < entry->offset)
1522 			n = n->rb_left;
1523 		else if (offset > entry->offset)
1524 			n = n->rb_right;
1525 		else
1526 			break;
1527 	}
1528 
1529 	if (bitmap_only) {
1530 		if (!entry)
1531 			return NULL;
1532 		if (entry->bitmap)
1533 			return entry;
1534 
1535 		/*
1536 		 * bitmap entry and extent entry may share same offset,
1537 		 * in that case, bitmap entry comes after extent entry.
1538 		 */
1539 		n = rb_next(n);
1540 		if (!n)
1541 			return NULL;
1542 		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1543 		if (entry->offset != offset)
1544 			return NULL;
1545 
1546 		WARN_ON(!entry->bitmap);
1547 		return entry;
1548 	} else if (entry) {
1549 		if (entry->bitmap) {
1550 			/*
1551 			 * if previous extent entry covers the offset,
1552 			 * we should return it instead of the bitmap entry
1553 			 */
1554 			n = rb_prev(&entry->offset_index);
1555 			if (n) {
1556 				prev = rb_entry(n, struct btrfs_free_space,
1557 						offset_index);
1558 				if (!prev->bitmap &&
1559 				    prev->offset + prev->bytes > offset)
1560 					entry = prev;
1561 			}
1562 		}
1563 		return entry;
1564 	}
1565 
1566 	if (!prev)
1567 		return NULL;
1568 
1569 	/* find last entry before the 'offset' */
1570 	entry = prev;
1571 	if (entry->offset > offset) {
1572 		n = rb_prev(&entry->offset_index);
1573 		if (n) {
1574 			entry = rb_entry(n, struct btrfs_free_space,
1575 					offset_index);
1576 			ASSERT(entry->offset <= offset);
1577 		} else {
1578 			if (fuzzy)
1579 				return entry;
1580 			else
1581 				return NULL;
1582 		}
1583 	}
1584 
1585 	if (entry->bitmap) {
1586 		n = rb_prev(&entry->offset_index);
1587 		if (n) {
1588 			prev = rb_entry(n, struct btrfs_free_space,
1589 					offset_index);
1590 			if (!prev->bitmap &&
1591 			    prev->offset + prev->bytes > offset)
1592 				return prev;
1593 		}
1594 		if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1595 			return entry;
1596 	} else if (entry->offset + entry->bytes > offset)
1597 		return entry;
1598 
1599 	if (!fuzzy)
1600 		return NULL;
1601 
1602 	while (1) {
1603 		if (entry->bitmap) {
1604 			if (entry->offset + BITS_PER_BITMAP *
1605 			    ctl->unit > offset)
1606 				break;
1607 		} else {
1608 			if (entry->offset + entry->bytes > offset)
1609 				break;
1610 		}
1611 
1612 		n = rb_next(&entry->offset_index);
1613 		if (!n)
1614 			return NULL;
1615 		entry = rb_entry(n, struct btrfs_free_space, offset_index);
1616 	}
1617 	return entry;
1618 }
1619 
1620 static inline void
__unlink_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1621 __unlink_free_space(struct btrfs_free_space_ctl *ctl,
1622 		    struct btrfs_free_space *info)
1623 {
1624 	rb_erase(&info->offset_index, &ctl->free_space_offset);
1625 	ctl->free_extents--;
1626 }
1627 
unlink_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1628 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1629 			      struct btrfs_free_space *info)
1630 {
1631 	__unlink_free_space(ctl, info);
1632 	ctl->free_space -= info->bytes;
1633 }
1634 
link_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1635 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1636 			   struct btrfs_free_space *info)
1637 {
1638 	int ret = 0;
1639 
1640 	ASSERT(info->bytes || info->bitmap);
1641 	ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1642 				 &info->offset_index, (info->bitmap != NULL));
1643 	if (ret)
1644 		return ret;
1645 
1646 	ctl->free_space += info->bytes;
1647 	ctl->free_extents++;
1648 	return ret;
1649 }
1650 
recalculate_thresholds(struct btrfs_free_space_ctl * ctl)1651 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
1652 {
1653 	struct btrfs_block_group_cache *block_group = ctl->private;
1654 	u64 max_bytes;
1655 	u64 bitmap_bytes;
1656 	u64 extent_bytes;
1657 	u64 size = block_group->key.offset;
1658 	u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
1659 	u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg);
1660 
1661 	max_bitmaps = max_t(u32, max_bitmaps, 1);
1662 
1663 	ASSERT(ctl->total_bitmaps <= max_bitmaps);
1664 
1665 	/*
1666 	 * The goal is to keep the total amount of memory used per 1gb of space
1667 	 * at or below 32k, so we need to adjust how much memory we allow to be
1668 	 * used by extent based free space tracking
1669 	 */
1670 	if (size < 1024 * 1024 * 1024)
1671 		max_bytes = MAX_CACHE_BYTES_PER_GIG;
1672 	else
1673 		max_bytes = MAX_CACHE_BYTES_PER_GIG *
1674 			div_u64(size, 1024 * 1024 * 1024);
1675 
1676 	/*
1677 	 * we want to account for 1 more bitmap than what we have so we can make
1678 	 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1679 	 * we add more bitmaps.
1680 	 */
1681 	bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
1682 
1683 	if (bitmap_bytes >= max_bytes) {
1684 		ctl->extents_thresh = 0;
1685 		return;
1686 	}
1687 
1688 	/*
1689 	 * we want the extent entry threshold to always be at most 1/2 the max
1690 	 * bytes we can have, or whatever is less than that.
1691 	 */
1692 	extent_bytes = max_bytes - bitmap_bytes;
1693 	extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
1694 
1695 	ctl->extents_thresh =
1696 		div_u64(extent_bytes, sizeof(struct btrfs_free_space));
1697 }
1698 
__bitmap_clear_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1699 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1700 				       struct btrfs_free_space *info,
1701 				       u64 offset, u64 bytes)
1702 {
1703 	unsigned long start, count;
1704 
1705 	start = offset_to_bit(info->offset, ctl->unit, offset);
1706 	count = bytes_to_bits(bytes, ctl->unit);
1707 	ASSERT(start + count <= BITS_PER_BITMAP);
1708 
1709 	bitmap_clear(info->bitmap, start, count);
1710 
1711 	info->bytes -= bytes;
1712 	if (info->max_extent_size > ctl->unit)
1713 		info->max_extent_size = 0;
1714 }
1715 
bitmap_clear_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1716 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1717 			      struct btrfs_free_space *info, u64 offset,
1718 			      u64 bytes)
1719 {
1720 	__bitmap_clear_bits(ctl, info, offset, bytes);
1721 	ctl->free_space -= bytes;
1722 }
1723 
bitmap_set_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1724 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1725 			    struct btrfs_free_space *info, u64 offset,
1726 			    u64 bytes)
1727 {
1728 	unsigned long start, count;
1729 
1730 	start = offset_to_bit(info->offset, ctl->unit, offset);
1731 	count = bytes_to_bits(bytes, ctl->unit);
1732 	ASSERT(start + count <= BITS_PER_BITMAP);
1733 
1734 	bitmap_set(info->bitmap, start, count);
1735 
1736 	info->bytes += bytes;
1737 	ctl->free_space += bytes;
1738 }
1739 
1740 /*
1741  * If we can not find suitable extent, we will use bytes to record
1742  * the size of the max extent.
1743  */
search_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes,bool for_alloc)1744 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1745 			 struct btrfs_free_space *bitmap_info, u64 *offset,
1746 			 u64 *bytes, bool for_alloc)
1747 {
1748 	unsigned long found_bits = 0;
1749 	unsigned long max_bits = 0;
1750 	unsigned long bits, i;
1751 	unsigned long next_zero;
1752 	unsigned long extent_bits;
1753 
1754 	/*
1755 	 * Skip searching the bitmap if we don't have a contiguous section that
1756 	 * is large enough for this allocation.
1757 	 */
1758 	if (for_alloc &&
1759 	    bitmap_info->max_extent_size &&
1760 	    bitmap_info->max_extent_size < *bytes) {
1761 		*bytes = bitmap_info->max_extent_size;
1762 		return -1;
1763 	}
1764 
1765 	i = offset_to_bit(bitmap_info->offset, ctl->unit,
1766 			  max_t(u64, *offset, bitmap_info->offset));
1767 	bits = bytes_to_bits(*bytes, ctl->unit);
1768 
1769 	for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1770 		if (for_alloc && bits == 1) {
1771 			found_bits = 1;
1772 			break;
1773 		}
1774 		next_zero = find_next_zero_bit(bitmap_info->bitmap,
1775 					       BITS_PER_BITMAP, i);
1776 		extent_bits = next_zero - i;
1777 		if (extent_bits >= bits) {
1778 			found_bits = extent_bits;
1779 			break;
1780 		} else if (extent_bits > max_bits) {
1781 			max_bits = extent_bits;
1782 		}
1783 		i = next_zero;
1784 	}
1785 
1786 	if (found_bits) {
1787 		*offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1788 		*bytes = (u64)(found_bits) * ctl->unit;
1789 		return 0;
1790 	}
1791 
1792 	*bytes = (u64)(max_bits) * ctl->unit;
1793 	bitmap_info->max_extent_size = *bytes;
1794 	return -1;
1795 }
1796 
get_max_extent_size(struct btrfs_free_space * entry)1797 static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
1798 {
1799 	if (entry->bitmap)
1800 		return entry->max_extent_size;
1801 	return entry->bytes;
1802 }
1803 
1804 /* Cache the size of the max extent in bytes */
1805 static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl * ctl,u64 * offset,u64 * bytes,unsigned long align,u64 * max_extent_size)1806 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1807 		unsigned long align, u64 *max_extent_size)
1808 {
1809 	struct btrfs_free_space *entry;
1810 	struct rb_node *node;
1811 	u64 tmp;
1812 	u64 align_off;
1813 	int ret;
1814 
1815 	if (!ctl->free_space_offset.rb_node)
1816 		goto out;
1817 
1818 	entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
1819 	if (!entry)
1820 		goto out;
1821 
1822 	for (node = &entry->offset_index; node; node = rb_next(node)) {
1823 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
1824 		if (entry->bytes < *bytes) {
1825 			*max_extent_size = max(get_max_extent_size(entry),
1826 					       *max_extent_size);
1827 			continue;
1828 		}
1829 
1830 		/* make sure the space returned is big enough
1831 		 * to match our requested alignment
1832 		 */
1833 		if (*bytes >= align) {
1834 			tmp = entry->offset - ctl->start + align - 1;
1835 			tmp = div64_u64(tmp, align);
1836 			tmp = tmp * align + ctl->start;
1837 			align_off = tmp - entry->offset;
1838 		} else {
1839 			align_off = 0;
1840 			tmp = entry->offset;
1841 		}
1842 
1843 		if (entry->bytes < *bytes + align_off) {
1844 			*max_extent_size = max(get_max_extent_size(entry),
1845 					       *max_extent_size);
1846 			continue;
1847 		}
1848 
1849 		if (entry->bitmap) {
1850 			u64 size = *bytes;
1851 
1852 			ret = search_bitmap(ctl, entry, &tmp, &size, true);
1853 			if (!ret) {
1854 				*offset = tmp;
1855 				*bytes = size;
1856 				return entry;
1857 			} else {
1858 				*max_extent_size =
1859 					max(get_max_extent_size(entry),
1860 					    *max_extent_size);
1861 			}
1862 			continue;
1863 		}
1864 
1865 		*offset = tmp;
1866 		*bytes = entry->bytes - align_off;
1867 		return entry;
1868 	}
1869 out:
1870 	return NULL;
1871 }
1872 
add_new_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset)1873 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
1874 			   struct btrfs_free_space *info, u64 offset)
1875 {
1876 	info->offset = offset_to_bitmap(ctl, offset);
1877 	info->bytes = 0;
1878 	INIT_LIST_HEAD(&info->list);
1879 	link_free_space(ctl, info);
1880 	ctl->total_bitmaps++;
1881 
1882 	ctl->op->recalc_thresholds(ctl);
1883 }
1884 
free_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info)1885 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
1886 			struct btrfs_free_space *bitmap_info)
1887 {
1888 	unlink_free_space(ctl, bitmap_info);
1889 	kfree(bitmap_info->bitmap);
1890 	kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
1891 	ctl->total_bitmaps--;
1892 	ctl->op->recalc_thresholds(ctl);
1893 }
1894 
remove_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes)1895 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
1896 			      struct btrfs_free_space *bitmap_info,
1897 			      u64 *offset, u64 *bytes)
1898 {
1899 	u64 end;
1900 	u64 search_start, search_bytes;
1901 	int ret;
1902 
1903 again:
1904 	end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
1905 
1906 	/*
1907 	 * We need to search for bits in this bitmap.  We could only cover some
1908 	 * of the extent in this bitmap thanks to how we add space, so we need
1909 	 * to search for as much as it as we can and clear that amount, and then
1910 	 * go searching for the next bit.
1911 	 */
1912 	search_start = *offset;
1913 	search_bytes = ctl->unit;
1914 	search_bytes = min(search_bytes, end - search_start + 1);
1915 	ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
1916 			    false);
1917 	if (ret < 0 || search_start != *offset)
1918 		return -EINVAL;
1919 
1920 	/* We may have found more bits than what we need */
1921 	search_bytes = min(search_bytes, *bytes);
1922 
1923 	/* Cannot clear past the end of the bitmap */
1924 	search_bytes = min(search_bytes, end - search_start + 1);
1925 
1926 	bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes);
1927 	*offset += search_bytes;
1928 	*bytes -= search_bytes;
1929 
1930 	if (*bytes) {
1931 		struct rb_node *next = rb_next(&bitmap_info->offset_index);
1932 		if (!bitmap_info->bytes)
1933 			free_bitmap(ctl, bitmap_info);
1934 
1935 		/*
1936 		 * no entry after this bitmap, but we still have bytes to
1937 		 * remove, so something has gone wrong.
1938 		 */
1939 		if (!next)
1940 			return -EINVAL;
1941 
1942 		bitmap_info = rb_entry(next, struct btrfs_free_space,
1943 				       offset_index);
1944 
1945 		/*
1946 		 * if the next entry isn't a bitmap we need to return to let the
1947 		 * extent stuff do its work.
1948 		 */
1949 		if (!bitmap_info->bitmap)
1950 			return -EAGAIN;
1951 
1952 		/*
1953 		 * Ok the next item is a bitmap, but it may not actually hold
1954 		 * the information for the rest of this free space stuff, so
1955 		 * look for it, and if we don't find it return so we can try
1956 		 * everything over again.
1957 		 */
1958 		search_start = *offset;
1959 		search_bytes = ctl->unit;
1960 		ret = search_bitmap(ctl, bitmap_info, &search_start,
1961 				    &search_bytes, false);
1962 		if (ret < 0 || search_start != *offset)
1963 			return -EAGAIN;
1964 
1965 		goto again;
1966 	} else if (!bitmap_info->bytes)
1967 		free_bitmap(ctl, bitmap_info);
1968 
1969 	return 0;
1970 }
1971 
add_bytes_to_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1972 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1973 			       struct btrfs_free_space *info, u64 offset,
1974 			       u64 bytes)
1975 {
1976 	u64 bytes_to_set = 0;
1977 	u64 end;
1978 
1979 	end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1980 
1981 	bytes_to_set = min(end - offset, bytes);
1982 
1983 	bitmap_set_bits(ctl, info, offset, bytes_to_set);
1984 
1985 	/*
1986 	 * We set some bytes, we have no idea what the max extent size is
1987 	 * anymore.
1988 	 */
1989 	info->max_extent_size = 0;
1990 
1991 	return bytes_to_set;
1992 
1993 }
1994 
use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1995 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1996 		      struct btrfs_free_space *info)
1997 {
1998 	struct btrfs_block_group_cache *block_group = ctl->private;
1999 	bool forced = false;
2000 
2001 #ifdef CONFIG_BTRFS_DEBUG
2002 	if (btrfs_should_fragment_free_space(block_group->fs_info->extent_root,
2003 					     block_group))
2004 		forced = true;
2005 #endif
2006 
2007 	/*
2008 	 * If we are below the extents threshold then we can add this as an
2009 	 * extent, and don't have to deal with the bitmap
2010 	 */
2011 	if (!forced && ctl->free_extents < ctl->extents_thresh) {
2012 		/*
2013 		 * If this block group has some small extents we don't want to
2014 		 * use up all of our free slots in the cache with them, we want
2015 		 * to reserve them to larger extents, however if we have plent
2016 		 * of cache left then go ahead an dadd them, no sense in adding
2017 		 * the overhead of a bitmap if we don't have to.
2018 		 */
2019 		if (info->bytes <= block_group->sectorsize * 4) {
2020 			if (ctl->free_extents * 2 <= ctl->extents_thresh)
2021 				return false;
2022 		} else {
2023 			return false;
2024 		}
2025 	}
2026 
2027 	/*
2028 	 * The original block groups from mkfs can be really small, like 8
2029 	 * megabytes, so don't bother with a bitmap for those entries.  However
2030 	 * some block groups can be smaller than what a bitmap would cover but
2031 	 * are still large enough that they could overflow the 32k memory limit,
2032 	 * so allow those block groups to still be allowed to have a bitmap
2033 	 * entry.
2034 	 */
2035 	if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
2036 		return false;
2037 
2038 	return true;
2039 }
2040 
2041 static struct btrfs_free_space_op free_space_op = {
2042 	.recalc_thresholds	= recalculate_thresholds,
2043 	.use_bitmap		= use_bitmap,
2044 };
2045 
insert_into_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)2046 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2047 			      struct btrfs_free_space *info)
2048 {
2049 	struct btrfs_free_space *bitmap_info;
2050 	struct btrfs_block_group_cache *block_group = NULL;
2051 	int added = 0;
2052 	u64 bytes, offset, bytes_added;
2053 	int ret;
2054 
2055 	bytes = info->bytes;
2056 	offset = info->offset;
2057 
2058 	if (!ctl->op->use_bitmap(ctl, info))
2059 		return 0;
2060 
2061 	if (ctl->op == &free_space_op)
2062 		block_group = ctl->private;
2063 again:
2064 	/*
2065 	 * Since we link bitmaps right into the cluster we need to see if we
2066 	 * have a cluster here, and if so and it has our bitmap we need to add
2067 	 * the free space to that bitmap.
2068 	 */
2069 	if (block_group && !list_empty(&block_group->cluster_list)) {
2070 		struct btrfs_free_cluster *cluster;
2071 		struct rb_node *node;
2072 		struct btrfs_free_space *entry;
2073 
2074 		cluster = list_entry(block_group->cluster_list.next,
2075 				     struct btrfs_free_cluster,
2076 				     block_group_list);
2077 		spin_lock(&cluster->lock);
2078 		node = rb_first(&cluster->root);
2079 		if (!node) {
2080 			spin_unlock(&cluster->lock);
2081 			goto no_cluster_bitmap;
2082 		}
2083 
2084 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2085 		if (!entry->bitmap) {
2086 			spin_unlock(&cluster->lock);
2087 			goto no_cluster_bitmap;
2088 		}
2089 
2090 		if (entry->offset == offset_to_bitmap(ctl, offset)) {
2091 			bytes_added = add_bytes_to_bitmap(ctl, entry,
2092 							  offset, bytes);
2093 			bytes -= bytes_added;
2094 			offset += bytes_added;
2095 		}
2096 		spin_unlock(&cluster->lock);
2097 		if (!bytes) {
2098 			ret = 1;
2099 			goto out;
2100 		}
2101 	}
2102 
2103 no_cluster_bitmap:
2104 	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2105 					 1, 0);
2106 	if (!bitmap_info) {
2107 		ASSERT(added == 0);
2108 		goto new_bitmap;
2109 	}
2110 
2111 	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
2112 	bytes -= bytes_added;
2113 	offset += bytes_added;
2114 	added = 0;
2115 
2116 	if (!bytes) {
2117 		ret = 1;
2118 		goto out;
2119 	} else
2120 		goto again;
2121 
2122 new_bitmap:
2123 	if (info && info->bitmap) {
2124 		add_new_bitmap(ctl, info, offset);
2125 		added = 1;
2126 		info = NULL;
2127 		goto again;
2128 	} else {
2129 		spin_unlock(&ctl->tree_lock);
2130 
2131 		/* no pre-allocated info, allocate a new one */
2132 		if (!info) {
2133 			info = kmem_cache_zalloc(btrfs_free_space_cachep,
2134 						 GFP_NOFS);
2135 			if (!info) {
2136 				spin_lock(&ctl->tree_lock);
2137 				ret = -ENOMEM;
2138 				goto out;
2139 			}
2140 		}
2141 
2142 		/* allocate the bitmap */
2143 		info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
2144 		spin_lock(&ctl->tree_lock);
2145 		if (!info->bitmap) {
2146 			ret = -ENOMEM;
2147 			goto out;
2148 		}
2149 		goto again;
2150 	}
2151 
2152 out:
2153 	if (info) {
2154 		if (info->bitmap)
2155 			kfree(info->bitmap);
2156 		kmem_cache_free(btrfs_free_space_cachep, info);
2157 	}
2158 
2159 	return ret;
2160 }
2161 
try_merge_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2162 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2163 			  struct btrfs_free_space *info, bool update_stat)
2164 {
2165 	struct btrfs_free_space *left_info = NULL;
2166 	struct btrfs_free_space *right_info;
2167 	bool merged = false;
2168 	u64 offset = info->offset;
2169 	u64 bytes = info->bytes;
2170 
2171 	/*
2172 	 * first we want to see if there is free space adjacent to the range we
2173 	 * are adding, if there is remove that struct and add a new one to
2174 	 * cover the entire range
2175 	 */
2176 	right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2177 	if (right_info && rb_prev(&right_info->offset_index))
2178 		left_info = rb_entry(rb_prev(&right_info->offset_index),
2179 				     struct btrfs_free_space, offset_index);
2180 	else if (!right_info)
2181 		left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2182 
2183 	if (right_info && !right_info->bitmap) {
2184 		if (update_stat)
2185 			unlink_free_space(ctl, right_info);
2186 		else
2187 			__unlink_free_space(ctl, right_info);
2188 		info->bytes += right_info->bytes;
2189 		kmem_cache_free(btrfs_free_space_cachep, right_info);
2190 		merged = true;
2191 	}
2192 
2193 	if (left_info && !left_info->bitmap &&
2194 	    left_info->offset + left_info->bytes == offset) {
2195 		if (update_stat)
2196 			unlink_free_space(ctl, left_info);
2197 		else
2198 			__unlink_free_space(ctl, left_info);
2199 		info->offset = left_info->offset;
2200 		info->bytes += left_info->bytes;
2201 		kmem_cache_free(btrfs_free_space_cachep, left_info);
2202 		merged = true;
2203 	}
2204 
2205 	return merged;
2206 }
2207 
steal_from_bitmap_to_end(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2208 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2209 				     struct btrfs_free_space *info,
2210 				     bool update_stat)
2211 {
2212 	struct btrfs_free_space *bitmap;
2213 	unsigned long i;
2214 	unsigned long j;
2215 	const u64 end = info->offset + info->bytes;
2216 	const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2217 	u64 bytes;
2218 
2219 	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2220 	if (!bitmap)
2221 		return false;
2222 
2223 	i = offset_to_bit(bitmap->offset, ctl->unit, end);
2224 	j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2225 	if (j == i)
2226 		return false;
2227 	bytes = (j - i) * ctl->unit;
2228 	info->bytes += bytes;
2229 
2230 	if (update_stat)
2231 		bitmap_clear_bits(ctl, bitmap, end, bytes);
2232 	else
2233 		__bitmap_clear_bits(ctl, bitmap, end, bytes);
2234 
2235 	if (!bitmap->bytes)
2236 		free_bitmap(ctl, bitmap);
2237 
2238 	return true;
2239 }
2240 
steal_from_bitmap_to_front(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2241 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2242 				       struct btrfs_free_space *info,
2243 				       bool update_stat)
2244 {
2245 	struct btrfs_free_space *bitmap;
2246 	u64 bitmap_offset;
2247 	unsigned long i;
2248 	unsigned long j;
2249 	unsigned long prev_j;
2250 	u64 bytes;
2251 
2252 	bitmap_offset = offset_to_bitmap(ctl, info->offset);
2253 	/* If we're on a boundary, try the previous logical bitmap. */
2254 	if (bitmap_offset == info->offset) {
2255 		if (info->offset == 0)
2256 			return false;
2257 		bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2258 	}
2259 
2260 	bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2261 	if (!bitmap)
2262 		return false;
2263 
2264 	i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2265 	j = 0;
2266 	prev_j = (unsigned long)-1;
2267 	for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2268 		if (j > i)
2269 			break;
2270 		prev_j = j;
2271 	}
2272 	if (prev_j == i)
2273 		return false;
2274 
2275 	if (prev_j == (unsigned long)-1)
2276 		bytes = (i + 1) * ctl->unit;
2277 	else
2278 		bytes = (i - prev_j) * ctl->unit;
2279 
2280 	info->offset -= bytes;
2281 	info->bytes += bytes;
2282 
2283 	if (update_stat)
2284 		bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2285 	else
2286 		__bitmap_clear_bits(ctl, bitmap, info->offset, bytes);
2287 
2288 	if (!bitmap->bytes)
2289 		free_bitmap(ctl, bitmap);
2290 
2291 	return true;
2292 }
2293 
2294 /*
2295  * We prefer always to allocate from extent entries, both for clustered and
2296  * non-clustered allocation requests. So when attempting to add a new extent
2297  * entry, try to see if there's adjacent free space in bitmap entries, and if
2298  * there is, migrate that space from the bitmaps to the extent.
2299  * Like this we get better chances of satisfying space allocation requests
2300  * because we attempt to satisfy them based on a single cache entry, and never
2301  * on 2 or more entries - even if the entries represent a contiguous free space
2302  * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2303  * ends).
2304  */
steal_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2305 static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2306 			      struct btrfs_free_space *info,
2307 			      bool update_stat)
2308 {
2309 	/*
2310 	 * Only work with disconnected entries, as we can change their offset,
2311 	 * and must be extent entries.
2312 	 */
2313 	ASSERT(!info->bitmap);
2314 	ASSERT(RB_EMPTY_NODE(&info->offset_index));
2315 
2316 	if (ctl->total_bitmaps > 0) {
2317 		bool stole_end;
2318 		bool stole_front = false;
2319 
2320 		stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2321 		if (ctl->total_bitmaps > 0)
2322 			stole_front = steal_from_bitmap_to_front(ctl, info,
2323 								 update_stat);
2324 
2325 		if (stole_end || stole_front)
2326 			try_merge_free_space(ctl, info, update_stat);
2327 	}
2328 }
2329 
__btrfs_add_free_space(struct btrfs_free_space_ctl * ctl,u64 offset,u64 bytes)2330 int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
2331 			   u64 offset, u64 bytes)
2332 {
2333 	struct btrfs_free_space *info;
2334 	int ret = 0;
2335 
2336 	info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2337 	if (!info)
2338 		return -ENOMEM;
2339 
2340 	info->offset = offset;
2341 	info->bytes = bytes;
2342 	RB_CLEAR_NODE(&info->offset_index);
2343 
2344 	spin_lock(&ctl->tree_lock);
2345 
2346 	if (try_merge_free_space(ctl, info, true))
2347 		goto link;
2348 
2349 	/*
2350 	 * There was no extent directly to the left or right of this new
2351 	 * extent then we know we're going to have to allocate a new extent, so
2352 	 * before we do that see if we need to drop this into a bitmap
2353 	 */
2354 	ret = insert_into_bitmap(ctl, info);
2355 	if (ret < 0) {
2356 		goto out;
2357 	} else if (ret) {
2358 		ret = 0;
2359 		goto out;
2360 	}
2361 link:
2362 	/*
2363 	 * Only steal free space from adjacent bitmaps if we're sure we're not
2364 	 * going to add the new free space to existing bitmap entries - because
2365 	 * that would mean unnecessary work that would be reverted. Therefore
2366 	 * attempt to steal space from bitmaps if we're adding an extent entry.
2367 	 */
2368 	steal_from_bitmap(ctl, info, true);
2369 
2370 	ret = link_free_space(ctl, info);
2371 	if (ret)
2372 		kmem_cache_free(btrfs_free_space_cachep, info);
2373 out:
2374 	spin_unlock(&ctl->tree_lock);
2375 
2376 	if (ret) {
2377 		printk(KERN_CRIT "BTRFS: unable to add free space :%d\n", ret);
2378 		ASSERT(ret != -EEXIST);
2379 	}
2380 
2381 	return ret;
2382 }
2383 
btrfs_remove_free_space(struct btrfs_block_group_cache * block_group,u64 offset,u64 bytes)2384 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
2385 			    u64 offset, u64 bytes)
2386 {
2387 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2388 	struct btrfs_free_space *info;
2389 	int ret;
2390 	bool re_search = false;
2391 
2392 	spin_lock(&ctl->tree_lock);
2393 
2394 again:
2395 	ret = 0;
2396 	if (!bytes)
2397 		goto out_lock;
2398 
2399 	info = tree_search_offset(ctl, offset, 0, 0);
2400 	if (!info) {
2401 		/*
2402 		 * oops didn't find an extent that matched the space we wanted
2403 		 * to remove, look for a bitmap instead
2404 		 */
2405 		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2406 					  1, 0);
2407 		if (!info) {
2408 			/*
2409 			 * If we found a partial bit of our free space in a
2410 			 * bitmap but then couldn't find the other part this may
2411 			 * be a problem, so WARN about it.
2412 			 */
2413 			WARN_ON(re_search);
2414 			goto out_lock;
2415 		}
2416 	}
2417 
2418 	re_search = false;
2419 	if (!info->bitmap) {
2420 		unlink_free_space(ctl, info);
2421 		if (offset == info->offset) {
2422 			u64 to_free = min(bytes, info->bytes);
2423 
2424 			info->bytes -= to_free;
2425 			info->offset += to_free;
2426 			if (info->bytes) {
2427 				ret = link_free_space(ctl, info);
2428 				WARN_ON(ret);
2429 			} else {
2430 				kmem_cache_free(btrfs_free_space_cachep, info);
2431 			}
2432 
2433 			offset += to_free;
2434 			bytes -= to_free;
2435 			goto again;
2436 		} else {
2437 			u64 old_end = info->bytes + info->offset;
2438 
2439 			info->bytes = offset - info->offset;
2440 			ret = link_free_space(ctl, info);
2441 			WARN_ON(ret);
2442 			if (ret)
2443 				goto out_lock;
2444 
2445 			/* Not enough bytes in this entry to satisfy us */
2446 			if (old_end < offset + bytes) {
2447 				bytes -= old_end - offset;
2448 				offset = old_end;
2449 				goto again;
2450 			} else if (old_end == offset + bytes) {
2451 				/* all done */
2452 				goto out_lock;
2453 			}
2454 			spin_unlock(&ctl->tree_lock);
2455 
2456 			ret = btrfs_add_free_space(block_group, offset + bytes,
2457 						   old_end - (offset + bytes));
2458 			WARN_ON(ret);
2459 			goto out;
2460 		}
2461 	}
2462 
2463 	ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2464 	if (ret == -EAGAIN) {
2465 		re_search = true;
2466 		goto again;
2467 	}
2468 out_lock:
2469 	spin_unlock(&ctl->tree_lock);
2470 out:
2471 	return ret;
2472 }
2473 
btrfs_dump_free_space(struct btrfs_block_group_cache * block_group,u64 bytes)2474 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
2475 			   u64 bytes)
2476 {
2477 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2478 	struct btrfs_free_space *info;
2479 	struct rb_node *n;
2480 	int count = 0;
2481 
2482 	spin_lock(&ctl->tree_lock);
2483 	for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2484 		info = rb_entry(n, struct btrfs_free_space, offset_index);
2485 		if (info->bytes >= bytes && !block_group->ro)
2486 			count++;
2487 		btrfs_crit(block_group->fs_info,
2488 			   "entry offset %llu, bytes %llu, bitmap %s",
2489 			   info->offset, info->bytes,
2490 		       (info->bitmap) ? "yes" : "no");
2491 	}
2492 	spin_unlock(&ctl->tree_lock);
2493 	btrfs_info(block_group->fs_info, "block group has cluster?: %s",
2494 	       list_empty(&block_group->cluster_list) ? "no" : "yes");
2495 	btrfs_info(block_group->fs_info,
2496 		   "%d blocks of free space at or bigger than bytes is", count);
2497 }
2498 
btrfs_init_free_space_ctl(struct btrfs_block_group_cache * block_group)2499 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2500 {
2501 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2502 
2503 	spin_lock_init(&ctl->tree_lock);
2504 	ctl->unit = block_group->sectorsize;
2505 	ctl->start = block_group->key.objectid;
2506 	ctl->private = block_group;
2507 	ctl->op = &free_space_op;
2508 	INIT_LIST_HEAD(&ctl->trimming_ranges);
2509 	mutex_init(&ctl->cache_writeout_mutex);
2510 
2511 	/*
2512 	 * we only want to have 32k of ram per block group for keeping
2513 	 * track of free space, and if we pass 1/2 of that we want to
2514 	 * start converting things over to using bitmaps
2515 	 */
2516 	ctl->extents_thresh = ((1024 * 32) / 2) /
2517 				sizeof(struct btrfs_free_space);
2518 }
2519 
2520 /*
2521  * for a given cluster, put all of its extents back into the free
2522  * space cache.  If the block group passed doesn't match the block group
2523  * pointed to by the cluster, someone else raced in and freed the
2524  * cluster already.  In that case, we just return without changing anything
2525  */
2526 static int
__btrfs_return_cluster_to_free_space(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster)2527 __btrfs_return_cluster_to_free_space(
2528 			     struct btrfs_block_group_cache *block_group,
2529 			     struct btrfs_free_cluster *cluster)
2530 {
2531 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2532 	struct btrfs_free_space *entry;
2533 	struct rb_node *node;
2534 
2535 	spin_lock(&cluster->lock);
2536 	if (cluster->block_group != block_group)
2537 		goto out;
2538 
2539 	cluster->block_group = NULL;
2540 	cluster->window_start = 0;
2541 	list_del_init(&cluster->block_group_list);
2542 
2543 	node = rb_first(&cluster->root);
2544 	while (node) {
2545 		bool bitmap;
2546 
2547 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2548 		node = rb_next(&entry->offset_index);
2549 		rb_erase(&entry->offset_index, &cluster->root);
2550 		RB_CLEAR_NODE(&entry->offset_index);
2551 
2552 		bitmap = (entry->bitmap != NULL);
2553 		if (!bitmap) {
2554 			try_merge_free_space(ctl, entry, false);
2555 			steal_from_bitmap(ctl, entry, false);
2556 		}
2557 		tree_insert_offset(&ctl->free_space_offset,
2558 				   entry->offset, &entry->offset_index, bitmap);
2559 	}
2560 	cluster->root = RB_ROOT;
2561 
2562 out:
2563 	spin_unlock(&cluster->lock);
2564 	btrfs_put_block_group(block_group);
2565 	return 0;
2566 }
2567 
__btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl * ctl)2568 static void __btrfs_remove_free_space_cache_locked(
2569 				struct btrfs_free_space_ctl *ctl)
2570 {
2571 	struct btrfs_free_space *info;
2572 	struct rb_node *node;
2573 
2574 	while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
2575 		info = rb_entry(node, struct btrfs_free_space, offset_index);
2576 		if (!info->bitmap) {
2577 			unlink_free_space(ctl, info);
2578 			kmem_cache_free(btrfs_free_space_cachep, info);
2579 		} else {
2580 			free_bitmap(ctl, info);
2581 		}
2582 
2583 		cond_resched_lock(&ctl->tree_lock);
2584 	}
2585 }
2586 
__btrfs_remove_free_space_cache(struct btrfs_free_space_ctl * ctl)2587 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
2588 {
2589 	spin_lock(&ctl->tree_lock);
2590 	__btrfs_remove_free_space_cache_locked(ctl);
2591 	spin_unlock(&ctl->tree_lock);
2592 }
2593 
btrfs_remove_free_space_cache(struct btrfs_block_group_cache * block_group)2594 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
2595 {
2596 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2597 	struct btrfs_free_cluster *cluster;
2598 	struct list_head *head;
2599 
2600 	spin_lock(&ctl->tree_lock);
2601 	while ((head = block_group->cluster_list.next) !=
2602 	       &block_group->cluster_list) {
2603 		cluster = list_entry(head, struct btrfs_free_cluster,
2604 				     block_group_list);
2605 
2606 		WARN_ON(cluster->block_group != block_group);
2607 		__btrfs_return_cluster_to_free_space(block_group, cluster);
2608 
2609 		cond_resched_lock(&ctl->tree_lock);
2610 	}
2611 	__btrfs_remove_free_space_cache_locked(ctl);
2612 	spin_unlock(&ctl->tree_lock);
2613 
2614 }
2615 
btrfs_find_space_for_alloc(struct btrfs_block_group_cache * block_group,u64 offset,u64 bytes,u64 empty_size,u64 * max_extent_size)2616 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
2617 			       u64 offset, u64 bytes, u64 empty_size,
2618 			       u64 *max_extent_size)
2619 {
2620 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2621 	struct btrfs_free_space *entry = NULL;
2622 	u64 bytes_search = bytes + empty_size;
2623 	u64 ret = 0;
2624 	u64 align_gap = 0;
2625 	u64 align_gap_len = 0;
2626 
2627 	spin_lock(&ctl->tree_lock);
2628 	entry = find_free_space(ctl, &offset, &bytes_search,
2629 				block_group->full_stripe_len, max_extent_size);
2630 	if (!entry)
2631 		goto out;
2632 
2633 	ret = offset;
2634 	if (entry->bitmap) {
2635 		bitmap_clear_bits(ctl, entry, offset, bytes);
2636 		if (!entry->bytes)
2637 			free_bitmap(ctl, entry);
2638 	} else {
2639 		unlink_free_space(ctl, entry);
2640 		align_gap_len = offset - entry->offset;
2641 		align_gap = entry->offset;
2642 
2643 		entry->offset = offset + bytes;
2644 		WARN_ON(entry->bytes < bytes + align_gap_len);
2645 
2646 		entry->bytes -= bytes + align_gap_len;
2647 		if (!entry->bytes)
2648 			kmem_cache_free(btrfs_free_space_cachep, entry);
2649 		else
2650 			link_free_space(ctl, entry);
2651 	}
2652 out:
2653 	spin_unlock(&ctl->tree_lock);
2654 
2655 	if (align_gap_len)
2656 		__btrfs_add_free_space(ctl, align_gap, align_gap_len);
2657 	return ret;
2658 }
2659 
2660 /*
2661  * given a cluster, put all of its extents back into the free space
2662  * cache.  If a block group is passed, this function will only free
2663  * a cluster that belongs to the passed block group.
2664  *
2665  * Otherwise, it'll get a reference on the block group pointed to by the
2666  * cluster and remove the cluster from it.
2667  */
btrfs_return_cluster_to_free_space(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster)2668 int btrfs_return_cluster_to_free_space(
2669 			       struct btrfs_block_group_cache *block_group,
2670 			       struct btrfs_free_cluster *cluster)
2671 {
2672 	struct btrfs_free_space_ctl *ctl;
2673 	int ret;
2674 
2675 	/* first, get a safe pointer to the block group */
2676 	spin_lock(&cluster->lock);
2677 	if (!block_group) {
2678 		block_group = cluster->block_group;
2679 		if (!block_group) {
2680 			spin_unlock(&cluster->lock);
2681 			return 0;
2682 		}
2683 	} else if (cluster->block_group != block_group) {
2684 		/* someone else has already freed it don't redo their work */
2685 		spin_unlock(&cluster->lock);
2686 		return 0;
2687 	}
2688 	atomic_inc(&block_group->count);
2689 	spin_unlock(&cluster->lock);
2690 
2691 	ctl = block_group->free_space_ctl;
2692 
2693 	/* now return any extents the cluster had on it */
2694 	spin_lock(&ctl->tree_lock);
2695 	ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
2696 	spin_unlock(&ctl->tree_lock);
2697 
2698 	/* finally drop our ref */
2699 	btrfs_put_block_group(block_group);
2700 	return ret;
2701 }
2702 
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,struct btrfs_free_space * entry,u64 bytes,u64 min_start,u64 * max_extent_size)2703 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
2704 				   struct btrfs_free_cluster *cluster,
2705 				   struct btrfs_free_space *entry,
2706 				   u64 bytes, u64 min_start,
2707 				   u64 *max_extent_size)
2708 {
2709 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2710 	int err;
2711 	u64 search_start = cluster->window_start;
2712 	u64 search_bytes = bytes;
2713 	u64 ret = 0;
2714 
2715 	search_start = min_start;
2716 	search_bytes = bytes;
2717 
2718 	err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
2719 	if (err) {
2720 		*max_extent_size = max(get_max_extent_size(entry),
2721 				       *max_extent_size);
2722 		return 0;
2723 	}
2724 
2725 	ret = search_start;
2726 	__bitmap_clear_bits(ctl, entry, ret, bytes);
2727 
2728 	return ret;
2729 }
2730 
2731 /*
2732  * given a cluster, try to allocate 'bytes' from it, returns 0
2733  * if it couldn't find anything suitably large, or a logical disk offset
2734  * if things worked out
2735  */
btrfs_alloc_from_cluster(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,u64 bytes,u64 min_start,u64 * max_extent_size)2736 u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
2737 			     struct btrfs_free_cluster *cluster, u64 bytes,
2738 			     u64 min_start, u64 *max_extent_size)
2739 {
2740 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2741 	struct btrfs_free_space *entry = NULL;
2742 	struct rb_node *node;
2743 	u64 ret = 0;
2744 
2745 	spin_lock(&cluster->lock);
2746 	if (bytes > cluster->max_size)
2747 		goto out;
2748 
2749 	if (cluster->block_group != block_group)
2750 		goto out;
2751 
2752 	node = rb_first(&cluster->root);
2753 	if (!node)
2754 		goto out;
2755 
2756 	entry = rb_entry(node, struct btrfs_free_space, offset_index);
2757 	while (1) {
2758 		if (entry->bytes < bytes)
2759 			*max_extent_size = max(get_max_extent_size(entry),
2760 					       *max_extent_size);
2761 
2762 		if (entry->bytes < bytes ||
2763 		    (!entry->bitmap && entry->offset < min_start)) {
2764 			node = rb_next(&entry->offset_index);
2765 			if (!node)
2766 				break;
2767 			entry = rb_entry(node, struct btrfs_free_space,
2768 					 offset_index);
2769 			continue;
2770 		}
2771 
2772 		if (entry->bitmap) {
2773 			ret = btrfs_alloc_from_bitmap(block_group,
2774 						      cluster, entry, bytes,
2775 						      cluster->window_start,
2776 						      max_extent_size);
2777 			if (ret == 0) {
2778 				node = rb_next(&entry->offset_index);
2779 				if (!node)
2780 					break;
2781 				entry = rb_entry(node, struct btrfs_free_space,
2782 						 offset_index);
2783 				continue;
2784 			}
2785 			cluster->window_start += bytes;
2786 		} else {
2787 			ret = entry->offset;
2788 
2789 			entry->offset += bytes;
2790 			entry->bytes -= bytes;
2791 		}
2792 
2793 		if (entry->bytes == 0)
2794 			rb_erase(&entry->offset_index, &cluster->root);
2795 		break;
2796 	}
2797 out:
2798 	spin_unlock(&cluster->lock);
2799 
2800 	if (!ret)
2801 		return 0;
2802 
2803 	spin_lock(&ctl->tree_lock);
2804 
2805 	ctl->free_space -= bytes;
2806 	if (entry->bytes == 0) {
2807 		ctl->free_extents--;
2808 		if (entry->bitmap) {
2809 			kfree(entry->bitmap);
2810 			ctl->total_bitmaps--;
2811 			ctl->op->recalc_thresholds(ctl);
2812 		}
2813 		kmem_cache_free(btrfs_free_space_cachep, entry);
2814 	}
2815 
2816 	spin_unlock(&ctl->tree_lock);
2817 
2818 	return ret;
2819 }
2820 
btrfs_bitmap_cluster(struct btrfs_block_group_cache * block_group,struct btrfs_free_space * entry,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)2821 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
2822 				struct btrfs_free_space *entry,
2823 				struct btrfs_free_cluster *cluster,
2824 				u64 offset, u64 bytes,
2825 				u64 cont1_bytes, u64 min_bytes)
2826 {
2827 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2828 	unsigned long next_zero;
2829 	unsigned long i;
2830 	unsigned long want_bits;
2831 	unsigned long min_bits;
2832 	unsigned long found_bits;
2833 	unsigned long max_bits = 0;
2834 	unsigned long start = 0;
2835 	unsigned long total_found = 0;
2836 	int ret;
2837 
2838 	i = offset_to_bit(entry->offset, ctl->unit,
2839 			  max_t(u64, offset, entry->offset));
2840 	want_bits = bytes_to_bits(bytes, ctl->unit);
2841 	min_bits = bytes_to_bits(min_bytes, ctl->unit);
2842 
2843 	/*
2844 	 * Don't bother looking for a cluster in this bitmap if it's heavily
2845 	 * fragmented.
2846 	 */
2847 	if (entry->max_extent_size &&
2848 	    entry->max_extent_size < cont1_bytes)
2849 		return -ENOSPC;
2850 again:
2851 	found_bits = 0;
2852 	for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
2853 		next_zero = find_next_zero_bit(entry->bitmap,
2854 					       BITS_PER_BITMAP, i);
2855 		if (next_zero - i >= min_bits) {
2856 			found_bits = next_zero - i;
2857 			if (found_bits > max_bits)
2858 				max_bits = found_bits;
2859 			break;
2860 		}
2861 		if (next_zero - i > max_bits)
2862 			max_bits = next_zero - i;
2863 		i = next_zero;
2864 	}
2865 
2866 	if (!found_bits) {
2867 		entry->max_extent_size = (u64)max_bits * ctl->unit;
2868 		return -ENOSPC;
2869 	}
2870 
2871 	if (!total_found) {
2872 		start = i;
2873 		cluster->max_size = 0;
2874 	}
2875 
2876 	total_found += found_bits;
2877 
2878 	if (cluster->max_size < found_bits * ctl->unit)
2879 		cluster->max_size = found_bits * ctl->unit;
2880 
2881 	if (total_found < want_bits || cluster->max_size < cont1_bytes) {
2882 		i = next_zero + 1;
2883 		goto again;
2884 	}
2885 
2886 	cluster->window_start = start * ctl->unit + entry->offset;
2887 	rb_erase(&entry->offset_index, &ctl->free_space_offset);
2888 	ret = tree_insert_offset(&cluster->root, entry->offset,
2889 				 &entry->offset_index, 1);
2890 	ASSERT(!ret); /* -EEXIST; Logic error */
2891 
2892 	trace_btrfs_setup_cluster(block_group, cluster,
2893 				  total_found * ctl->unit, 1);
2894 	return 0;
2895 }
2896 
2897 /*
2898  * This searches the block group for just extents to fill the cluster with.
2899  * Try to find a cluster with at least bytes total bytes, at least one
2900  * extent of cont1_bytes, and other clusters of at least min_bytes.
2901  */
2902 static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)2903 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2904 			struct btrfs_free_cluster *cluster,
2905 			struct list_head *bitmaps, u64 offset, u64 bytes,
2906 			u64 cont1_bytes, u64 min_bytes)
2907 {
2908 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2909 	struct btrfs_free_space *first = NULL;
2910 	struct btrfs_free_space *entry = NULL;
2911 	struct btrfs_free_space *last;
2912 	struct rb_node *node;
2913 	u64 window_free;
2914 	u64 max_extent;
2915 	u64 total_size = 0;
2916 
2917 	entry = tree_search_offset(ctl, offset, 0, 1);
2918 	if (!entry)
2919 		return -ENOSPC;
2920 
2921 	/*
2922 	 * We don't want bitmaps, so just move along until we find a normal
2923 	 * extent entry.
2924 	 */
2925 	while (entry->bitmap || entry->bytes < min_bytes) {
2926 		if (entry->bitmap && list_empty(&entry->list))
2927 			list_add_tail(&entry->list, bitmaps);
2928 		node = rb_next(&entry->offset_index);
2929 		if (!node)
2930 			return -ENOSPC;
2931 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2932 	}
2933 
2934 	window_free = entry->bytes;
2935 	max_extent = entry->bytes;
2936 	first = entry;
2937 	last = entry;
2938 
2939 	for (node = rb_next(&entry->offset_index); node;
2940 	     node = rb_next(&entry->offset_index)) {
2941 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2942 
2943 		if (entry->bitmap) {
2944 			if (list_empty(&entry->list))
2945 				list_add_tail(&entry->list, bitmaps);
2946 			continue;
2947 		}
2948 
2949 		if (entry->bytes < min_bytes)
2950 			continue;
2951 
2952 		last = entry;
2953 		window_free += entry->bytes;
2954 		if (entry->bytes > max_extent)
2955 			max_extent = entry->bytes;
2956 	}
2957 
2958 	if (window_free < bytes || max_extent < cont1_bytes)
2959 		return -ENOSPC;
2960 
2961 	cluster->window_start = first->offset;
2962 
2963 	node = &first->offset_index;
2964 
2965 	/*
2966 	 * now we've found our entries, pull them out of the free space
2967 	 * cache and put them into the cluster rbtree
2968 	 */
2969 	do {
2970 		int ret;
2971 
2972 		entry = rb_entry(node, struct btrfs_free_space, offset_index);
2973 		node = rb_next(&entry->offset_index);
2974 		if (entry->bitmap || entry->bytes < min_bytes)
2975 			continue;
2976 
2977 		rb_erase(&entry->offset_index, &ctl->free_space_offset);
2978 		ret = tree_insert_offset(&cluster->root, entry->offset,
2979 					 &entry->offset_index, 0);
2980 		total_size += entry->bytes;
2981 		ASSERT(!ret); /* -EEXIST; Logic error */
2982 	} while (node && entry != last);
2983 
2984 	cluster->max_size = max_extent;
2985 	trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
2986 	return 0;
2987 }
2988 
2989 /*
2990  * This specifically looks for bitmaps that may work in the cluster, we assume
2991  * that we have already failed to find extents that will work.
2992  */
2993 static noinline int
setup_cluster_bitmap(struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)2994 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2995 		     struct btrfs_free_cluster *cluster,
2996 		     struct list_head *bitmaps, u64 offset, u64 bytes,
2997 		     u64 cont1_bytes, u64 min_bytes)
2998 {
2999 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3000 	struct btrfs_free_space *entry = NULL;
3001 	int ret = -ENOSPC;
3002 	u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3003 
3004 	if (ctl->total_bitmaps == 0)
3005 		return -ENOSPC;
3006 
3007 	/*
3008 	 * The bitmap that covers offset won't be in the list unless offset
3009 	 * is just its start offset.
3010 	 */
3011 	if (!list_empty(bitmaps))
3012 		entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3013 
3014 	if (!entry || entry->offset != bitmap_offset) {
3015 		entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3016 		if (entry && list_empty(&entry->list))
3017 			list_add(&entry->list, bitmaps);
3018 	}
3019 
3020 	list_for_each_entry(entry, bitmaps, list) {
3021 		if (entry->bytes < bytes)
3022 			continue;
3023 		ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3024 					   bytes, cont1_bytes, min_bytes);
3025 		if (!ret)
3026 			return 0;
3027 	}
3028 
3029 	/*
3030 	 * The bitmaps list has all the bitmaps that record free space
3031 	 * starting after offset, so no more search is required.
3032 	 */
3033 	return -ENOSPC;
3034 }
3035 
3036 /*
3037  * here we try to find a cluster of blocks in a block group.  The goal
3038  * is to find at least bytes+empty_size.
3039  * We might not find them all in one contiguous area.
3040  *
3041  * returns zero and sets up cluster if things worked out, otherwise
3042  * it returns -enospc
3043  */
btrfs_find_space_cluster(struct btrfs_root * root,struct btrfs_block_group_cache * block_group,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 empty_size)3044 int btrfs_find_space_cluster(struct btrfs_root *root,
3045 			     struct btrfs_block_group_cache *block_group,
3046 			     struct btrfs_free_cluster *cluster,
3047 			     u64 offset, u64 bytes, u64 empty_size)
3048 {
3049 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3050 	struct btrfs_free_space *entry, *tmp;
3051 	LIST_HEAD(bitmaps);
3052 	u64 min_bytes;
3053 	u64 cont1_bytes;
3054 	int ret;
3055 
3056 	/*
3057 	 * Choose the minimum extent size we'll require for this
3058 	 * cluster.  For SSD_SPREAD, don't allow any fragmentation.
3059 	 * For metadata, allow allocates with smaller extents.  For
3060 	 * data, keep it dense.
3061 	 */
3062 	if (btrfs_test_opt(root, SSD_SPREAD)) {
3063 		cont1_bytes = min_bytes = bytes + empty_size;
3064 	} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3065 		cont1_bytes = bytes;
3066 		min_bytes = block_group->sectorsize;
3067 	} else {
3068 		cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3069 		min_bytes = block_group->sectorsize;
3070 	}
3071 
3072 	spin_lock(&ctl->tree_lock);
3073 
3074 	/*
3075 	 * If we know we don't have enough space to make a cluster don't even
3076 	 * bother doing all the work to try and find one.
3077 	 */
3078 	if (ctl->free_space < bytes) {
3079 		spin_unlock(&ctl->tree_lock);
3080 		return -ENOSPC;
3081 	}
3082 
3083 	spin_lock(&cluster->lock);
3084 
3085 	/* someone already found a cluster, hooray */
3086 	if (cluster->block_group) {
3087 		ret = 0;
3088 		goto out;
3089 	}
3090 
3091 	trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3092 				 min_bytes);
3093 
3094 	ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3095 				      bytes + empty_size,
3096 				      cont1_bytes, min_bytes);
3097 	if (ret)
3098 		ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3099 					   offset, bytes + empty_size,
3100 					   cont1_bytes, min_bytes);
3101 
3102 	/* Clear our temporary list */
3103 	list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3104 		list_del_init(&entry->list);
3105 
3106 	if (!ret) {
3107 		atomic_inc(&block_group->count);
3108 		list_add_tail(&cluster->block_group_list,
3109 			      &block_group->cluster_list);
3110 		cluster->block_group = block_group;
3111 	} else {
3112 		trace_btrfs_failed_cluster_setup(block_group);
3113 	}
3114 out:
3115 	spin_unlock(&cluster->lock);
3116 	spin_unlock(&ctl->tree_lock);
3117 
3118 	return ret;
3119 }
3120 
3121 /*
3122  * simple code to zero out a cluster
3123  */
btrfs_init_free_cluster(struct btrfs_free_cluster * cluster)3124 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3125 {
3126 	spin_lock_init(&cluster->lock);
3127 	spin_lock_init(&cluster->refill_lock);
3128 	cluster->root = RB_ROOT;
3129 	cluster->max_size = 0;
3130 	cluster->fragmented = false;
3131 	INIT_LIST_HEAD(&cluster->block_group_list);
3132 	cluster->block_group = NULL;
3133 }
3134 
do_trimming(struct btrfs_block_group_cache * block_group,u64 * total_trimmed,u64 start,u64 bytes,u64 reserved_start,u64 reserved_bytes,struct btrfs_trim_range * trim_entry)3135 static int do_trimming(struct btrfs_block_group_cache *block_group,
3136 		       u64 *total_trimmed, u64 start, u64 bytes,
3137 		       u64 reserved_start, u64 reserved_bytes,
3138 		       struct btrfs_trim_range *trim_entry)
3139 {
3140 	struct btrfs_space_info *space_info = block_group->space_info;
3141 	struct btrfs_fs_info *fs_info = block_group->fs_info;
3142 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3143 	int ret;
3144 	int update = 0;
3145 	u64 trimmed = 0;
3146 
3147 	spin_lock(&space_info->lock);
3148 	spin_lock(&block_group->lock);
3149 	if (!block_group->ro) {
3150 		block_group->reserved += reserved_bytes;
3151 		space_info->bytes_reserved += reserved_bytes;
3152 		update = 1;
3153 	}
3154 	spin_unlock(&block_group->lock);
3155 	spin_unlock(&space_info->lock);
3156 
3157 	ret = btrfs_discard_extent(fs_info->extent_root,
3158 				   start, bytes, &trimmed);
3159 	if (!ret)
3160 		*total_trimmed += trimmed;
3161 
3162 	mutex_lock(&ctl->cache_writeout_mutex);
3163 	btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
3164 	list_del(&trim_entry->list);
3165 	mutex_unlock(&ctl->cache_writeout_mutex);
3166 
3167 	if (update) {
3168 		spin_lock(&space_info->lock);
3169 		spin_lock(&block_group->lock);
3170 		if (block_group->ro)
3171 			space_info->bytes_readonly += reserved_bytes;
3172 		block_group->reserved -= reserved_bytes;
3173 		space_info->bytes_reserved -= reserved_bytes;
3174 		spin_unlock(&space_info->lock);
3175 		spin_unlock(&block_group->lock);
3176 	}
3177 
3178 	return ret;
3179 }
3180 
trim_no_bitmap(struct btrfs_block_group_cache * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen)3181 static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
3182 			  u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3183 {
3184 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3185 	struct btrfs_free_space *entry;
3186 	struct rb_node *node;
3187 	int ret = 0;
3188 	u64 extent_start;
3189 	u64 extent_bytes;
3190 	u64 bytes;
3191 
3192 	while (start < end) {
3193 		struct btrfs_trim_range trim_entry;
3194 
3195 		mutex_lock(&ctl->cache_writeout_mutex);
3196 		spin_lock(&ctl->tree_lock);
3197 
3198 		if (ctl->free_space < minlen) {
3199 			spin_unlock(&ctl->tree_lock);
3200 			mutex_unlock(&ctl->cache_writeout_mutex);
3201 			break;
3202 		}
3203 
3204 		entry = tree_search_offset(ctl, start, 0, 1);
3205 		if (!entry) {
3206 			spin_unlock(&ctl->tree_lock);
3207 			mutex_unlock(&ctl->cache_writeout_mutex);
3208 			break;
3209 		}
3210 
3211 		/* skip bitmaps */
3212 		while (entry->bitmap) {
3213 			node = rb_next(&entry->offset_index);
3214 			if (!node) {
3215 				spin_unlock(&ctl->tree_lock);
3216 				mutex_unlock(&ctl->cache_writeout_mutex);
3217 				goto out;
3218 			}
3219 			entry = rb_entry(node, struct btrfs_free_space,
3220 					 offset_index);
3221 		}
3222 
3223 		if (entry->offset >= end) {
3224 			spin_unlock(&ctl->tree_lock);
3225 			mutex_unlock(&ctl->cache_writeout_mutex);
3226 			break;
3227 		}
3228 
3229 		extent_start = entry->offset;
3230 		extent_bytes = entry->bytes;
3231 		start = max(start, extent_start);
3232 		bytes = min(extent_start + extent_bytes, end) - start;
3233 		if (bytes < minlen) {
3234 			spin_unlock(&ctl->tree_lock);
3235 			mutex_unlock(&ctl->cache_writeout_mutex);
3236 			goto next;
3237 		}
3238 
3239 		unlink_free_space(ctl, entry);
3240 		kmem_cache_free(btrfs_free_space_cachep, entry);
3241 
3242 		spin_unlock(&ctl->tree_lock);
3243 		trim_entry.start = extent_start;
3244 		trim_entry.bytes = extent_bytes;
3245 		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3246 		mutex_unlock(&ctl->cache_writeout_mutex);
3247 
3248 		ret = do_trimming(block_group, total_trimmed, start, bytes,
3249 				  extent_start, extent_bytes, &trim_entry);
3250 		if (ret)
3251 			break;
3252 next:
3253 		start += bytes;
3254 
3255 		if (fatal_signal_pending(current)) {
3256 			ret = -ERESTARTSYS;
3257 			break;
3258 		}
3259 
3260 		cond_resched();
3261 	}
3262 out:
3263 	return ret;
3264 }
3265 
trim_bitmaps(struct btrfs_block_group_cache * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen)3266 static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3267 			u64 *total_trimmed, u64 start, u64 end, u64 minlen)
3268 {
3269 	struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3270 	struct btrfs_free_space *entry;
3271 	int ret = 0;
3272 	int ret2;
3273 	u64 bytes;
3274 	u64 offset = offset_to_bitmap(ctl, start);
3275 
3276 	while (offset < end) {
3277 		bool next_bitmap = false;
3278 		struct btrfs_trim_range trim_entry;
3279 
3280 		mutex_lock(&ctl->cache_writeout_mutex);
3281 		spin_lock(&ctl->tree_lock);
3282 
3283 		if (ctl->free_space < minlen) {
3284 			spin_unlock(&ctl->tree_lock);
3285 			mutex_unlock(&ctl->cache_writeout_mutex);
3286 			break;
3287 		}
3288 
3289 		entry = tree_search_offset(ctl, offset, 1, 0);
3290 		if (!entry) {
3291 			spin_unlock(&ctl->tree_lock);
3292 			mutex_unlock(&ctl->cache_writeout_mutex);
3293 			next_bitmap = true;
3294 			goto next;
3295 		}
3296 
3297 		bytes = minlen;
3298 		ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3299 		if (ret2 || start >= end) {
3300 			spin_unlock(&ctl->tree_lock);
3301 			mutex_unlock(&ctl->cache_writeout_mutex);
3302 			next_bitmap = true;
3303 			goto next;
3304 		}
3305 
3306 		bytes = min(bytes, end - start);
3307 		if (bytes < minlen) {
3308 			spin_unlock(&ctl->tree_lock);
3309 			mutex_unlock(&ctl->cache_writeout_mutex);
3310 			goto next;
3311 		}
3312 
3313 		bitmap_clear_bits(ctl, entry, start, bytes);
3314 		if (entry->bytes == 0)
3315 			free_bitmap(ctl, entry);
3316 
3317 		spin_unlock(&ctl->tree_lock);
3318 		trim_entry.start = start;
3319 		trim_entry.bytes = bytes;
3320 		list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3321 		mutex_unlock(&ctl->cache_writeout_mutex);
3322 
3323 		ret = do_trimming(block_group, total_trimmed, start, bytes,
3324 				  start, bytes, &trim_entry);
3325 		if (ret)
3326 			break;
3327 next:
3328 		if (next_bitmap) {
3329 			offset += BITS_PER_BITMAP * ctl->unit;
3330 		} else {
3331 			start += bytes;
3332 			if (start >= offset + BITS_PER_BITMAP * ctl->unit)
3333 				offset += BITS_PER_BITMAP * ctl->unit;
3334 		}
3335 
3336 		if (fatal_signal_pending(current)) {
3337 			ret = -ERESTARTSYS;
3338 			break;
3339 		}
3340 
3341 		cond_resched();
3342 	}
3343 
3344 	return ret;
3345 }
3346 
btrfs_get_block_group_trimming(struct btrfs_block_group_cache * cache)3347 void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache)
3348 {
3349 	atomic_inc(&cache->trimming);
3350 }
3351 
btrfs_put_block_group_trimming(struct btrfs_block_group_cache * block_group)3352 void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *block_group)
3353 {
3354 	struct extent_map_tree *em_tree;
3355 	struct extent_map *em;
3356 	bool cleanup;
3357 
3358 	spin_lock(&block_group->lock);
3359 	cleanup = (atomic_dec_and_test(&block_group->trimming) &&
3360 		   block_group->removed);
3361 	spin_unlock(&block_group->lock);
3362 
3363 	if (cleanup) {
3364 		lock_chunks(block_group->fs_info->chunk_root);
3365 		em_tree = &block_group->fs_info->mapping_tree.map_tree;
3366 		write_lock(&em_tree->lock);
3367 		em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3368 					   1);
3369 		BUG_ON(!em); /* logic error, can't happen */
3370 		/*
3371 		 * remove_extent_mapping() will delete us from the pinned_chunks
3372 		 * list, which is protected by the chunk mutex.
3373 		 */
3374 		remove_extent_mapping(em_tree, em);
3375 		write_unlock(&em_tree->lock);
3376 		unlock_chunks(block_group->fs_info->chunk_root);
3377 
3378 		/* once for us and once for the tree */
3379 		free_extent_map(em);
3380 		free_extent_map(em);
3381 
3382 		/*
3383 		 * We've left one free space entry and other tasks trimming
3384 		 * this block group have left 1 entry each one. Free them.
3385 		 */
3386 		__btrfs_remove_free_space_cache(block_group->free_space_ctl);
3387 	}
3388 }
3389 
btrfs_trim_block_group(struct btrfs_block_group_cache * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen)3390 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
3391 			   u64 *trimmed, u64 start, u64 end, u64 minlen)
3392 {
3393 	int ret;
3394 
3395 	*trimmed = 0;
3396 
3397 	spin_lock(&block_group->lock);
3398 	if (block_group->removed) {
3399 		spin_unlock(&block_group->lock);
3400 		return 0;
3401 	}
3402 	btrfs_get_block_group_trimming(block_group);
3403 	spin_unlock(&block_group->lock);
3404 
3405 	ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
3406 	if (ret)
3407 		goto out;
3408 
3409 	ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
3410 out:
3411 	btrfs_put_block_group_trimming(block_group);
3412 	return ret;
3413 }
3414 
3415 /*
3416  * Find the left-most item in the cache tree, and then return the
3417  * smallest inode number in the item.
3418  *
3419  * Note: the returned inode number may not be the smallest one in
3420  * the tree, if the left-most item is a bitmap.
3421  */
btrfs_find_ino_for_alloc(struct btrfs_root * fs_root)3422 u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
3423 {
3424 	struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
3425 	struct btrfs_free_space *entry = NULL;
3426 	u64 ino = 0;
3427 
3428 	spin_lock(&ctl->tree_lock);
3429 
3430 	if (RB_EMPTY_ROOT(&ctl->free_space_offset))
3431 		goto out;
3432 
3433 	entry = rb_entry(rb_first(&ctl->free_space_offset),
3434 			 struct btrfs_free_space, offset_index);
3435 
3436 	if (!entry->bitmap) {
3437 		ino = entry->offset;
3438 
3439 		unlink_free_space(ctl, entry);
3440 		entry->offset++;
3441 		entry->bytes--;
3442 		if (!entry->bytes)
3443 			kmem_cache_free(btrfs_free_space_cachep, entry);
3444 		else
3445 			link_free_space(ctl, entry);
3446 	} else {
3447 		u64 offset = 0;
3448 		u64 count = 1;
3449 		int ret;
3450 
3451 		ret = search_bitmap(ctl, entry, &offset, &count, true);
3452 		/* Logic error; Should be empty if it can't find anything */
3453 		ASSERT(!ret);
3454 
3455 		ino = offset;
3456 		bitmap_clear_bits(ctl, entry, offset, 1);
3457 		if (entry->bytes == 0)
3458 			free_bitmap(ctl, entry);
3459 	}
3460 out:
3461 	spin_unlock(&ctl->tree_lock);
3462 
3463 	return ino;
3464 }
3465 
lookup_free_ino_inode(struct btrfs_root * root,struct btrfs_path * path)3466 struct inode *lookup_free_ino_inode(struct btrfs_root *root,
3467 				    struct btrfs_path *path)
3468 {
3469 	struct inode *inode = NULL;
3470 
3471 	spin_lock(&root->ino_cache_lock);
3472 	if (root->ino_cache_inode)
3473 		inode = igrab(root->ino_cache_inode);
3474 	spin_unlock(&root->ino_cache_lock);
3475 	if (inode)
3476 		return inode;
3477 
3478 	inode = __lookup_free_space_inode(root, path, 0);
3479 	if (IS_ERR(inode))
3480 		return inode;
3481 
3482 	spin_lock(&root->ino_cache_lock);
3483 	if (!btrfs_fs_closing(root->fs_info))
3484 		root->ino_cache_inode = igrab(inode);
3485 	spin_unlock(&root->ino_cache_lock);
3486 
3487 	return inode;
3488 }
3489 
create_free_ino_inode(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_path * path)3490 int create_free_ino_inode(struct btrfs_root *root,
3491 			  struct btrfs_trans_handle *trans,
3492 			  struct btrfs_path *path)
3493 {
3494 	return __create_free_space_inode(root, trans, path,
3495 					 BTRFS_FREE_INO_OBJECTID, 0);
3496 }
3497 
load_free_ino_cache(struct btrfs_fs_info * fs_info,struct btrfs_root * root)3498 int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
3499 {
3500 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3501 	struct btrfs_path *path;
3502 	struct inode *inode;
3503 	int ret = 0;
3504 	u64 root_gen = btrfs_root_generation(&root->root_item);
3505 
3506 	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
3507 		return 0;
3508 
3509 	/*
3510 	 * If we're unmounting then just return, since this does a search on the
3511 	 * normal root and not the commit root and we could deadlock.
3512 	 */
3513 	if (btrfs_fs_closing(fs_info))
3514 		return 0;
3515 
3516 	path = btrfs_alloc_path();
3517 	if (!path)
3518 		return 0;
3519 
3520 	inode = lookup_free_ino_inode(root, path);
3521 	if (IS_ERR(inode))
3522 		goto out;
3523 
3524 	if (root_gen != BTRFS_I(inode)->generation)
3525 		goto out_put;
3526 
3527 	ret = __load_free_space_cache(root, inode, ctl, path, 0);
3528 
3529 	if (ret < 0)
3530 		btrfs_err(fs_info,
3531 			"failed to load free ino cache for root %llu",
3532 			root->root_key.objectid);
3533 out_put:
3534 	iput(inode);
3535 out:
3536 	btrfs_free_path(path);
3537 	return ret;
3538 }
3539 
btrfs_write_out_ino_cache(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_path * path,struct inode * inode)3540 int btrfs_write_out_ino_cache(struct btrfs_root *root,
3541 			      struct btrfs_trans_handle *trans,
3542 			      struct btrfs_path *path,
3543 			      struct inode *inode)
3544 {
3545 	struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
3546 	int ret;
3547 	struct btrfs_io_ctl io_ctl;
3548 	bool release_metadata = true;
3549 
3550 	if (!btrfs_test_opt(root, INODE_MAP_CACHE))
3551 		return 0;
3552 
3553 	memset(&io_ctl, 0, sizeof(io_ctl));
3554 	ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
3555 				      trans, path, 0);
3556 	if (!ret) {
3557 		/*
3558 		 * At this point writepages() didn't error out, so our metadata
3559 		 * reservation is released when the writeback finishes, at
3560 		 * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
3561 		 * with or without an error.
3562 		 */
3563 		release_metadata = false;
3564 		ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
3565 	}
3566 
3567 	if (ret) {
3568 		if (release_metadata)
3569 			btrfs_delalloc_release_metadata(inode, inode->i_size);
3570 #ifdef DEBUG
3571 		btrfs_err(root->fs_info,
3572 			"failed to write free ino cache for root %llu",
3573 			root->root_key.objectid);
3574 #endif
3575 	}
3576 
3577 	return ret;
3578 }
3579 
3580 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
3581 /*
3582  * Use this if you need to make a bitmap or extent entry specifically, it
3583  * doesn't do any of the merging that add_free_space does, this acts a lot like
3584  * how the free space cache loading stuff works, so you can get really weird
3585  * configurations.
3586  */
test_add_free_space_entry(struct btrfs_block_group_cache * cache,u64 offset,u64 bytes,bool bitmap)3587 int test_add_free_space_entry(struct btrfs_block_group_cache *cache,
3588 			      u64 offset, u64 bytes, bool bitmap)
3589 {
3590 	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3591 	struct btrfs_free_space *info = NULL, *bitmap_info;
3592 	void *map = NULL;
3593 	u64 bytes_added;
3594 	int ret;
3595 
3596 again:
3597 	if (!info) {
3598 		info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
3599 		if (!info)
3600 			return -ENOMEM;
3601 	}
3602 
3603 	if (!bitmap) {
3604 		spin_lock(&ctl->tree_lock);
3605 		info->offset = offset;
3606 		info->bytes = bytes;
3607 		info->max_extent_size = 0;
3608 		ret = link_free_space(ctl, info);
3609 		spin_unlock(&ctl->tree_lock);
3610 		if (ret)
3611 			kmem_cache_free(btrfs_free_space_cachep, info);
3612 		return ret;
3613 	}
3614 
3615 	if (!map) {
3616 		map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
3617 		if (!map) {
3618 			kmem_cache_free(btrfs_free_space_cachep, info);
3619 			return -ENOMEM;
3620 		}
3621 	}
3622 
3623 	spin_lock(&ctl->tree_lock);
3624 	bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3625 					 1, 0);
3626 	if (!bitmap_info) {
3627 		info->bitmap = map;
3628 		map = NULL;
3629 		add_new_bitmap(ctl, info, offset);
3630 		bitmap_info = info;
3631 		info = NULL;
3632 	}
3633 
3634 	bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
3635 
3636 	bytes -= bytes_added;
3637 	offset += bytes_added;
3638 	spin_unlock(&ctl->tree_lock);
3639 
3640 	if (bytes)
3641 		goto again;
3642 
3643 	if (info)
3644 		kmem_cache_free(btrfs_free_space_cachep, info);
3645 	if (map)
3646 		kfree(map);
3647 	return 0;
3648 }
3649 
3650 /*
3651  * Checks to see if the given range is in the free space cache.  This is really
3652  * just used to check the absence of space, so if there is free space in the
3653  * range at all we will return 1.
3654  */
test_check_exists(struct btrfs_block_group_cache * cache,u64 offset,u64 bytes)3655 int test_check_exists(struct btrfs_block_group_cache *cache,
3656 		      u64 offset, u64 bytes)
3657 {
3658 	struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
3659 	struct btrfs_free_space *info;
3660 	int ret = 0;
3661 
3662 	spin_lock(&ctl->tree_lock);
3663 	info = tree_search_offset(ctl, offset, 0, 0);
3664 	if (!info) {
3665 		info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
3666 					  1, 0);
3667 		if (!info)
3668 			goto out;
3669 	}
3670 
3671 have_info:
3672 	if (info->bitmap) {
3673 		u64 bit_off, bit_bytes;
3674 		struct rb_node *n;
3675 		struct btrfs_free_space *tmp;
3676 
3677 		bit_off = offset;
3678 		bit_bytes = ctl->unit;
3679 		ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
3680 		if (!ret) {
3681 			if (bit_off == offset) {
3682 				ret = 1;
3683 				goto out;
3684 			} else if (bit_off > offset &&
3685 				   offset + bytes > bit_off) {
3686 				ret = 1;
3687 				goto out;
3688 			}
3689 		}
3690 
3691 		n = rb_prev(&info->offset_index);
3692 		while (n) {
3693 			tmp = rb_entry(n, struct btrfs_free_space,
3694 				       offset_index);
3695 			if (tmp->offset + tmp->bytes < offset)
3696 				break;
3697 			if (offset + bytes < tmp->offset) {
3698 				n = rb_prev(&info->offset_index);
3699 				continue;
3700 			}
3701 			info = tmp;
3702 			goto have_info;
3703 		}
3704 
3705 		n = rb_next(&info->offset_index);
3706 		while (n) {
3707 			tmp = rb_entry(n, struct btrfs_free_space,
3708 				       offset_index);
3709 			if (offset + bytes < tmp->offset)
3710 				break;
3711 			if (tmp->offset + tmp->bytes < offset) {
3712 				n = rb_next(&info->offset_index);
3713 				continue;
3714 			}
3715 			info = tmp;
3716 			goto have_info;
3717 		}
3718 
3719 		ret = 0;
3720 		goto out;
3721 	}
3722 
3723 	if (info->offset == offset) {
3724 		ret = 1;
3725 		goto out;
3726 	}
3727 
3728 	if (offset > info->offset && offset < info->offset + info->bytes)
3729 		ret = 1;
3730 out:
3731 	spin_unlock(&ctl->tree_lock);
3732 	return ret;
3733 }
3734 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
3735