1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2008 Red Hat. All rights reserved.
4 */
5
6 #include <linux/pagemap.h>
7 #include <linux/sched.h>
8 #include <linux/sched/signal.h>
9 #include <linux/slab.h>
10 #include <linux/math64.h>
11 #include <linux/ratelimit.h>
12 #include <linux/error-injection.h>
13 #include <linux/sched/mm.h>
14 #include "misc.h"
15 #include "ctree.h"
16 #include "free-space-cache.h"
17 #include "transaction.h"
18 #include "disk-io.h"
19 #include "extent_io.h"
20 #include "volumes.h"
21 #include "space-info.h"
22 #include "delalloc-space.h"
23 #include "block-group.h"
24 #include "discard.h"
25 #include "subpage.h"
26 #include "inode-item.h"
27
28 #define BITS_PER_BITMAP (PAGE_SIZE * 8UL)
29 #define MAX_CACHE_BYTES_PER_GIG SZ_64K
30 #define FORCE_EXTENT_THRESHOLD SZ_1M
31
32 struct btrfs_trim_range {
33 u64 start;
34 u64 bytes;
35 struct list_head list;
36 };
37
38 static int link_free_space(struct btrfs_free_space_ctl *ctl,
39 struct btrfs_free_space *info);
40 static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
41 struct btrfs_free_space *info, bool update_stat);
42 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
43 struct btrfs_free_space *bitmap_info, u64 *offset,
44 u64 *bytes, bool for_alloc);
45 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
46 struct btrfs_free_space *bitmap_info);
47 static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
48 struct btrfs_free_space *info, u64 offset,
49 u64 bytes, bool update_stats);
50
__btrfs_remove_free_space_cache(struct btrfs_free_space_ctl * ctl)51 static void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
52 {
53 struct btrfs_free_space *info;
54 struct rb_node *node;
55
56 while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
57 info = rb_entry(node, struct btrfs_free_space, offset_index);
58 if (!info->bitmap) {
59 unlink_free_space(ctl, info, true);
60 kmem_cache_free(btrfs_free_space_cachep, info);
61 } else {
62 free_bitmap(ctl, info);
63 }
64
65 cond_resched_lock(&ctl->tree_lock);
66 }
67 }
68
__lookup_free_space_inode(struct btrfs_root * root,struct btrfs_path * path,u64 offset)69 static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
70 struct btrfs_path *path,
71 u64 offset)
72 {
73 struct btrfs_fs_info *fs_info = root->fs_info;
74 struct btrfs_key key;
75 struct btrfs_key location;
76 struct btrfs_disk_key disk_key;
77 struct btrfs_free_space_header *header;
78 struct extent_buffer *leaf;
79 struct inode *inode = NULL;
80 unsigned nofs_flag;
81 int ret;
82
83 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
84 key.offset = offset;
85 key.type = 0;
86
87 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
88 if (ret < 0)
89 return ERR_PTR(ret);
90 if (ret > 0) {
91 btrfs_release_path(path);
92 return ERR_PTR(-ENOENT);
93 }
94
95 leaf = path->nodes[0];
96 header = btrfs_item_ptr(leaf, path->slots[0],
97 struct btrfs_free_space_header);
98 btrfs_free_space_key(leaf, header, &disk_key);
99 btrfs_disk_key_to_cpu(&location, &disk_key);
100 btrfs_release_path(path);
101
102 /*
103 * We are often under a trans handle at this point, so we need to make
104 * sure NOFS is set to keep us from deadlocking.
105 */
106 nofs_flag = memalloc_nofs_save();
107 inode = btrfs_iget_path(fs_info->sb, location.objectid, root, path);
108 btrfs_release_path(path);
109 memalloc_nofs_restore(nofs_flag);
110 if (IS_ERR(inode))
111 return inode;
112
113 mapping_set_gfp_mask(inode->i_mapping,
114 mapping_gfp_constraint(inode->i_mapping,
115 ~(__GFP_FS | __GFP_HIGHMEM)));
116
117 return inode;
118 }
119
lookup_free_space_inode(struct btrfs_block_group * block_group,struct btrfs_path * path)120 struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
121 struct btrfs_path *path)
122 {
123 struct btrfs_fs_info *fs_info = block_group->fs_info;
124 struct inode *inode = NULL;
125 u32 flags = BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
126
127 spin_lock(&block_group->lock);
128 if (block_group->inode)
129 inode = igrab(block_group->inode);
130 spin_unlock(&block_group->lock);
131 if (inode)
132 return inode;
133
134 inode = __lookup_free_space_inode(fs_info->tree_root, path,
135 block_group->start);
136 if (IS_ERR(inode))
137 return inode;
138
139 spin_lock(&block_group->lock);
140 if (!((BTRFS_I(inode)->flags & flags) == flags)) {
141 btrfs_info(fs_info, "Old style space inode found, converting.");
142 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM |
143 BTRFS_INODE_NODATACOW;
144 block_group->disk_cache_state = BTRFS_DC_CLEAR;
145 }
146
147 if (!test_and_set_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags))
148 block_group->inode = igrab(inode);
149 spin_unlock(&block_group->lock);
150
151 return inode;
152 }
153
__create_free_space_inode(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_path * path,u64 ino,u64 offset)154 static int __create_free_space_inode(struct btrfs_root *root,
155 struct btrfs_trans_handle *trans,
156 struct btrfs_path *path,
157 u64 ino, u64 offset)
158 {
159 struct btrfs_key key;
160 struct btrfs_disk_key disk_key;
161 struct btrfs_free_space_header *header;
162 struct btrfs_inode_item *inode_item;
163 struct extent_buffer *leaf;
164 /* We inline CRCs for the free disk space cache */
165 const u64 flags = BTRFS_INODE_NOCOMPRESS | BTRFS_INODE_PREALLOC |
166 BTRFS_INODE_NODATASUM | BTRFS_INODE_NODATACOW;
167 int ret;
168
169 ret = btrfs_insert_empty_inode(trans, root, path, ino);
170 if (ret)
171 return ret;
172
173 leaf = path->nodes[0];
174 inode_item = btrfs_item_ptr(leaf, path->slots[0],
175 struct btrfs_inode_item);
176 btrfs_item_key(leaf, &disk_key, path->slots[0]);
177 memzero_extent_buffer(leaf, (unsigned long)inode_item,
178 sizeof(*inode_item));
179 btrfs_set_inode_generation(leaf, inode_item, trans->transid);
180 btrfs_set_inode_size(leaf, inode_item, 0);
181 btrfs_set_inode_nbytes(leaf, inode_item, 0);
182 btrfs_set_inode_uid(leaf, inode_item, 0);
183 btrfs_set_inode_gid(leaf, inode_item, 0);
184 btrfs_set_inode_mode(leaf, inode_item, S_IFREG | 0600);
185 btrfs_set_inode_flags(leaf, inode_item, flags);
186 btrfs_set_inode_nlink(leaf, inode_item, 1);
187 btrfs_set_inode_transid(leaf, inode_item, trans->transid);
188 btrfs_set_inode_block_group(leaf, inode_item, offset);
189 btrfs_mark_buffer_dirty(leaf);
190 btrfs_release_path(path);
191
192 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
193 key.offset = offset;
194 key.type = 0;
195 ret = btrfs_insert_empty_item(trans, root, path, &key,
196 sizeof(struct btrfs_free_space_header));
197 if (ret < 0) {
198 btrfs_release_path(path);
199 return ret;
200 }
201
202 leaf = path->nodes[0];
203 header = btrfs_item_ptr(leaf, path->slots[0],
204 struct btrfs_free_space_header);
205 memzero_extent_buffer(leaf, (unsigned long)header, sizeof(*header));
206 btrfs_set_free_space_key(leaf, header, &disk_key);
207 btrfs_mark_buffer_dirty(leaf);
208 btrfs_release_path(path);
209
210 return 0;
211 }
212
create_free_space_inode(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)213 int create_free_space_inode(struct btrfs_trans_handle *trans,
214 struct btrfs_block_group *block_group,
215 struct btrfs_path *path)
216 {
217 int ret;
218 u64 ino;
219
220 ret = btrfs_get_free_objectid(trans->fs_info->tree_root, &ino);
221 if (ret < 0)
222 return ret;
223
224 return __create_free_space_inode(trans->fs_info->tree_root, trans, path,
225 ino, block_group->start);
226 }
227
228 /*
229 * inode is an optional sink: if it is NULL, btrfs_remove_free_space_inode
230 * handles lookup, otherwise it takes ownership and iputs the inode.
231 * Don't reuse an inode pointer after passing it into this function.
232 */
btrfs_remove_free_space_inode(struct btrfs_trans_handle * trans,struct inode * inode,struct btrfs_block_group * block_group)233 int btrfs_remove_free_space_inode(struct btrfs_trans_handle *trans,
234 struct inode *inode,
235 struct btrfs_block_group *block_group)
236 {
237 struct btrfs_path *path;
238 struct btrfs_key key;
239 int ret = 0;
240
241 path = btrfs_alloc_path();
242 if (!path)
243 return -ENOMEM;
244
245 if (!inode)
246 inode = lookup_free_space_inode(block_group, path);
247 if (IS_ERR(inode)) {
248 if (PTR_ERR(inode) != -ENOENT)
249 ret = PTR_ERR(inode);
250 goto out;
251 }
252 ret = btrfs_orphan_add(trans, BTRFS_I(inode));
253 if (ret) {
254 btrfs_add_delayed_iput(inode);
255 goto out;
256 }
257 clear_nlink(inode);
258 /* One for the block groups ref */
259 spin_lock(&block_group->lock);
260 if (test_and_clear_bit(BLOCK_GROUP_FLAG_IREF, &block_group->runtime_flags)) {
261 block_group->inode = NULL;
262 spin_unlock(&block_group->lock);
263 iput(inode);
264 } else {
265 spin_unlock(&block_group->lock);
266 }
267 /* One for the lookup ref */
268 btrfs_add_delayed_iput(inode);
269
270 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
271 key.type = 0;
272 key.offset = block_group->start;
273 ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path,
274 -1, 1);
275 if (ret) {
276 if (ret > 0)
277 ret = 0;
278 goto out;
279 }
280 ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
281 out:
282 btrfs_free_path(path);
283 return ret;
284 }
285
btrfs_check_trunc_cache_free_space(struct btrfs_fs_info * fs_info,struct btrfs_block_rsv * rsv)286 int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
287 struct btrfs_block_rsv *rsv)
288 {
289 u64 needed_bytes;
290 int ret;
291
292 /* 1 for slack space, 1 for updating the inode */
293 needed_bytes = btrfs_calc_insert_metadata_size(fs_info, 1) +
294 btrfs_calc_metadata_size(fs_info, 1);
295
296 spin_lock(&rsv->lock);
297 if (rsv->reserved < needed_bytes)
298 ret = -ENOSPC;
299 else
300 ret = 0;
301 spin_unlock(&rsv->lock);
302 return ret;
303 }
304
btrfs_truncate_free_space_cache(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct inode * vfs_inode)305 int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
306 struct btrfs_block_group *block_group,
307 struct inode *vfs_inode)
308 {
309 struct btrfs_truncate_control control = {
310 .inode = BTRFS_I(vfs_inode),
311 .new_size = 0,
312 .ino = btrfs_ino(BTRFS_I(vfs_inode)),
313 .min_type = BTRFS_EXTENT_DATA_KEY,
314 .clear_extent_range = true,
315 };
316 struct btrfs_inode *inode = BTRFS_I(vfs_inode);
317 struct btrfs_root *root = inode->root;
318 struct extent_state *cached_state = NULL;
319 int ret = 0;
320 bool locked = false;
321
322 if (block_group) {
323 struct btrfs_path *path = btrfs_alloc_path();
324
325 if (!path) {
326 ret = -ENOMEM;
327 goto fail;
328 }
329 locked = true;
330 mutex_lock(&trans->transaction->cache_write_mutex);
331 if (!list_empty(&block_group->io_list)) {
332 list_del_init(&block_group->io_list);
333
334 btrfs_wait_cache_io(trans, block_group, path);
335 btrfs_put_block_group(block_group);
336 }
337
338 /*
339 * now that we've truncated the cache away, its no longer
340 * setup or written
341 */
342 spin_lock(&block_group->lock);
343 block_group->disk_cache_state = BTRFS_DC_CLEAR;
344 spin_unlock(&block_group->lock);
345 btrfs_free_path(path);
346 }
347
348 btrfs_i_size_write(inode, 0);
349 truncate_pagecache(vfs_inode, 0);
350
351 lock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
352 btrfs_drop_extent_map_range(inode, 0, (u64)-1, false);
353
354 /*
355 * We skip the throttling logic for free space cache inodes, so we don't
356 * need to check for -EAGAIN.
357 */
358 ret = btrfs_truncate_inode_items(trans, root, &control);
359
360 inode_sub_bytes(&inode->vfs_inode, control.sub_bytes);
361 btrfs_inode_safe_disk_i_size_write(inode, control.last_size);
362
363 unlock_extent(&inode->io_tree, 0, (u64)-1, &cached_state);
364 if (ret)
365 goto fail;
366
367 ret = btrfs_update_inode(trans, root, inode);
368
369 fail:
370 if (locked)
371 mutex_unlock(&trans->transaction->cache_write_mutex);
372 if (ret)
373 btrfs_abort_transaction(trans, ret);
374
375 return ret;
376 }
377
readahead_cache(struct inode * inode)378 static void readahead_cache(struct inode *inode)
379 {
380 struct file_ra_state ra;
381 unsigned long last_index;
382
383 file_ra_state_init(&ra, inode->i_mapping);
384 last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
385
386 page_cache_sync_readahead(inode->i_mapping, &ra, NULL, 0, last_index);
387 }
388
io_ctl_init(struct btrfs_io_ctl * io_ctl,struct inode * inode,int write)389 static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode,
390 int write)
391 {
392 int num_pages;
393
394 num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
395
396 /* Make sure we can fit our crcs and generation into the first page */
397 if (write && (num_pages * sizeof(u32) + sizeof(u64)) > PAGE_SIZE)
398 return -ENOSPC;
399
400 memset(io_ctl, 0, sizeof(struct btrfs_io_ctl));
401
402 io_ctl->pages = kcalloc(num_pages, sizeof(struct page *), GFP_NOFS);
403 if (!io_ctl->pages)
404 return -ENOMEM;
405
406 io_ctl->num_pages = num_pages;
407 io_ctl->fs_info = btrfs_sb(inode->i_sb);
408 io_ctl->inode = inode;
409
410 return 0;
411 }
412 ALLOW_ERROR_INJECTION(io_ctl_init, ERRNO);
413
io_ctl_free(struct btrfs_io_ctl * io_ctl)414 static void io_ctl_free(struct btrfs_io_ctl *io_ctl)
415 {
416 kfree(io_ctl->pages);
417 io_ctl->pages = NULL;
418 }
419
io_ctl_unmap_page(struct btrfs_io_ctl * io_ctl)420 static void io_ctl_unmap_page(struct btrfs_io_ctl *io_ctl)
421 {
422 if (io_ctl->cur) {
423 io_ctl->cur = NULL;
424 io_ctl->orig = NULL;
425 }
426 }
427
io_ctl_map_page(struct btrfs_io_ctl * io_ctl,int clear)428 static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear)
429 {
430 ASSERT(io_ctl->index < io_ctl->num_pages);
431 io_ctl->page = io_ctl->pages[io_ctl->index++];
432 io_ctl->cur = page_address(io_ctl->page);
433 io_ctl->orig = io_ctl->cur;
434 io_ctl->size = PAGE_SIZE;
435 if (clear)
436 clear_page(io_ctl->cur);
437 }
438
io_ctl_drop_pages(struct btrfs_io_ctl * io_ctl)439 static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl)
440 {
441 int i;
442
443 io_ctl_unmap_page(io_ctl);
444
445 for (i = 0; i < io_ctl->num_pages; i++) {
446 if (io_ctl->pages[i]) {
447 btrfs_page_clear_checked(io_ctl->fs_info,
448 io_ctl->pages[i],
449 page_offset(io_ctl->pages[i]),
450 PAGE_SIZE);
451 unlock_page(io_ctl->pages[i]);
452 put_page(io_ctl->pages[i]);
453 }
454 }
455 }
456
io_ctl_prepare_pages(struct btrfs_io_ctl * io_ctl,bool uptodate)457 static int io_ctl_prepare_pages(struct btrfs_io_ctl *io_ctl, bool uptodate)
458 {
459 struct page *page;
460 struct inode *inode = io_ctl->inode;
461 gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping);
462 int i;
463
464 for (i = 0; i < io_ctl->num_pages; i++) {
465 int ret;
466
467 page = find_or_create_page(inode->i_mapping, i, mask);
468 if (!page) {
469 io_ctl_drop_pages(io_ctl);
470 return -ENOMEM;
471 }
472
473 ret = set_page_extent_mapped(page);
474 if (ret < 0) {
475 unlock_page(page);
476 put_page(page);
477 io_ctl_drop_pages(io_ctl);
478 return ret;
479 }
480
481 io_ctl->pages[i] = page;
482 if (uptodate && !PageUptodate(page)) {
483 btrfs_read_folio(NULL, page_folio(page));
484 lock_page(page);
485 if (page->mapping != inode->i_mapping) {
486 btrfs_err(BTRFS_I(inode)->root->fs_info,
487 "free space cache page truncated");
488 io_ctl_drop_pages(io_ctl);
489 return -EIO;
490 }
491 if (!PageUptodate(page)) {
492 btrfs_err(BTRFS_I(inode)->root->fs_info,
493 "error reading free space cache");
494 io_ctl_drop_pages(io_ctl);
495 return -EIO;
496 }
497 }
498 }
499
500 for (i = 0; i < io_ctl->num_pages; i++)
501 clear_page_dirty_for_io(io_ctl->pages[i]);
502
503 return 0;
504 }
505
io_ctl_set_generation(struct btrfs_io_ctl * io_ctl,u64 generation)506 static void io_ctl_set_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
507 {
508 io_ctl_map_page(io_ctl, 1);
509
510 /*
511 * Skip the csum areas. If we don't check crcs then we just have a
512 * 64bit chunk at the front of the first page.
513 */
514 io_ctl->cur += (sizeof(u32) * io_ctl->num_pages);
515 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
516
517 put_unaligned_le64(generation, io_ctl->cur);
518 io_ctl->cur += sizeof(u64);
519 }
520
io_ctl_check_generation(struct btrfs_io_ctl * io_ctl,u64 generation)521 static int io_ctl_check_generation(struct btrfs_io_ctl *io_ctl, u64 generation)
522 {
523 u64 cache_gen;
524
525 /*
526 * Skip the crc area. If we don't check crcs then we just have a 64bit
527 * chunk at the front of the first page.
528 */
529 io_ctl->cur += sizeof(u32) * io_ctl->num_pages;
530 io_ctl->size -= sizeof(u64) + (sizeof(u32) * io_ctl->num_pages);
531
532 cache_gen = get_unaligned_le64(io_ctl->cur);
533 if (cache_gen != generation) {
534 btrfs_err_rl(io_ctl->fs_info,
535 "space cache generation (%llu) does not match inode (%llu)",
536 cache_gen, generation);
537 io_ctl_unmap_page(io_ctl);
538 return -EIO;
539 }
540 io_ctl->cur += sizeof(u64);
541 return 0;
542 }
543
io_ctl_set_crc(struct btrfs_io_ctl * io_ctl,int index)544 static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index)
545 {
546 u32 *tmp;
547 u32 crc = ~(u32)0;
548 unsigned offset = 0;
549
550 if (index == 0)
551 offset = sizeof(u32) * io_ctl->num_pages;
552
553 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
554 btrfs_crc32c_final(crc, (u8 *)&crc);
555 io_ctl_unmap_page(io_ctl);
556 tmp = page_address(io_ctl->pages[0]);
557 tmp += index;
558 *tmp = crc;
559 }
560
io_ctl_check_crc(struct btrfs_io_ctl * io_ctl,int index)561 static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index)
562 {
563 u32 *tmp, val;
564 u32 crc = ~(u32)0;
565 unsigned offset = 0;
566
567 if (index == 0)
568 offset = sizeof(u32) * io_ctl->num_pages;
569
570 tmp = page_address(io_ctl->pages[0]);
571 tmp += index;
572 val = *tmp;
573
574 io_ctl_map_page(io_ctl, 0);
575 crc = btrfs_crc32c(crc, io_ctl->orig + offset, PAGE_SIZE - offset);
576 btrfs_crc32c_final(crc, (u8 *)&crc);
577 if (val != crc) {
578 btrfs_err_rl(io_ctl->fs_info,
579 "csum mismatch on free space cache");
580 io_ctl_unmap_page(io_ctl);
581 return -EIO;
582 }
583
584 return 0;
585 }
586
io_ctl_add_entry(struct btrfs_io_ctl * io_ctl,u64 offset,u64 bytes,void * bitmap)587 static int io_ctl_add_entry(struct btrfs_io_ctl *io_ctl, u64 offset, u64 bytes,
588 void *bitmap)
589 {
590 struct btrfs_free_space_entry *entry;
591
592 if (!io_ctl->cur)
593 return -ENOSPC;
594
595 entry = io_ctl->cur;
596 put_unaligned_le64(offset, &entry->offset);
597 put_unaligned_le64(bytes, &entry->bytes);
598 entry->type = (bitmap) ? BTRFS_FREE_SPACE_BITMAP :
599 BTRFS_FREE_SPACE_EXTENT;
600 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
601 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
602
603 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
604 return 0;
605
606 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
607
608 /* No more pages to map */
609 if (io_ctl->index >= io_ctl->num_pages)
610 return 0;
611
612 /* map the next page */
613 io_ctl_map_page(io_ctl, 1);
614 return 0;
615 }
616
io_ctl_add_bitmap(struct btrfs_io_ctl * io_ctl,void * bitmap)617 static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap)
618 {
619 if (!io_ctl->cur)
620 return -ENOSPC;
621
622 /*
623 * If we aren't at the start of the current page, unmap this one and
624 * map the next one if there is any left.
625 */
626 if (io_ctl->cur != io_ctl->orig) {
627 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
628 if (io_ctl->index >= io_ctl->num_pages)
629 return -ENOSPC;
630 io_ctl_map_page(io_ctl, 0);
631 }
632
633 copy_page(io_ctl->cur, bitmap);
634 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
635 if (io_ctl->index < io_ctl->num_pages)
636 io_ctl_map_page(io_ctl, 0);
637 return 0;
638 }
639
io_ctl_zero_remaining_pages(struct btrfs_io_ctl * io_ctl)640 static void io_ctl_zero_remaining_pages(struct btrfs_io_ctl *io_ctl)
641 {
642 /*
643 * If we're not on the boundary we know we've modified the page and we
644 * need to crc the page.
645 */
646 if (io_ctl->cur != io_ctl->orig)
647 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
648 else
649 io_ctl_unmap_page(io_ctl);
650
651 while (io_ctl->index < io_ctl->num_pages) {
652 io_ctl_map_page(io_ctl, 1);
653 io_ctl_set_crc(io_ctl, io_ctl->index - 1);
654 }
655 }
656
io_ctl_read_entry(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry,u8 * type)657 static int io_ctl_read_entry(struct btrfs_io_ctl *io_ctl,
658 struct btrfs_free_space *entry, u8 *type)
659 {
660 struct btrfs_free_space_entry *e;
661 int ret;
662
663 if (!io_ctl->cur) {
664 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
665 if (ret)
666 return ret;
667 }
668
669 e = io_ctl->cur;
670 entry->offset = get_unaligned_le64(&e->offset);
671 entry->bytes = get_unaligned_le64(&e->bytes);
672 *type = e->type;
673 io_ctl->cur += sizeof(struct btrfs_free_space_entry);
674 io_ctl->size -= sizeof(struct btrfs_free_space_entry);
675
676 if (io_ctl->size >= sizeof(struct btrfs_free_space_entry))
677 return 0;
678
679 io_ctl_unmap_page(io_ctl);
680
681 return 0;
682 }
683
io_ctl_read_bitmap(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space * entry)684 static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl,
685 struct btrfs_free_space *entry)
686 {
687 int ret;
688
689 ret = io_ctl_check_crc(io_ctl, io_ctl->index);
690 if (ret)
691 return ret;
692
693 copy_page(entry->bitmap, io_ctl->cur);
694 io_ctl_unmap_page(io_ctl);
695
696 return 0;
697 }
698
recalculate_thresholds(struct btrfs_free_space_ctl * ctl)699 static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
700 {
701 struct btrfs_block_group *block_group = ctl->block_group;
702 u64 max_bytes;
703 u64 bitmap_bytes;
704 u64 extent_bytes;
705 u64 size = block_group->length;
706 u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
707 u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
708
709 max_bitmaps = max_t(u64, max_bitmaps, 1);
710
711 if (ctl->total_bitmaps > max_bitmaps)
712 btrfs_err(block_group->fs_info,
713 "invalid free space control: bg start=%llu len=%llu total_bitmaps=%u unit=%u max_bitmaps=%llu bytes_per_bg=%llu",
714 block_group->start, block_group->length,
715 ctl->total_bitmaps, ctl->unit, max_bitmaps,
716 bytes_per_bg);
717 ASSERT(ctl->total_bitmaps <= max_bitmaps);
718
719 /*
720 * We are trying to keep the total amount of memory used per 1GiB of
721 * space to be MAX_CACHE_BYTES_PER_GIG. However, with a reclamation
722 * mechanism of pulling extents >= FORCE_EXTENT_THRESHOLD out of
723 * bitmaps, we may end up using more memory than this.
724 */
725 if (size < SZ_1G)
726 max_bytes = MAX_CACHE_BYTES_PER_GIG;
727 else
728 max_bytes = MAX_CACHE_BYTES_PER_GIG * div_u64(size, SZ_1G);
729
730 bitmap_bytes = ctl->total_bitmaps * ctl->unit;
731
732 /*
733 * we want the extent entry threshold to always be at most 1/2 the max
734 * bytes we can have, or whatever is less than that.
735 */
736 extent_bytes = max_bytes - bitmap_bytes;
737 extent_bytes = min_t(u64, extent_bytes, max_bytes >> 1);
738
739 ctl->extents_thresh =
740 div_u64(extent_bytes, sizeof(struct btrfs_free_space));
741 }
742
__load_free_space_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_path * path,u64 offset)743 static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
744 struct btrfs_free_space_ctl *ctl,
745 struct btrfs_path *path, u64 offset)
746 {
747 struct btrfs_fs_info *fs_info = root->fs_info;
748 struct btrfs_free_space_header *header;
749 struct extent_buffer *leaf;
750 struct btrfs_io_ctl io_ctl;
751 struct btrfs_key key;
752 struct btrfs_free_space *e, *n;
753 LIST_HEAD(bitmaps);
754 u64 num_entries;
755 u64 num_bitmaps;
756 u64 generation;
757 u8 type;
758 int ret = 0;
759
760 /* Nothing in the space cache, goodbye */
761 if (!i_size_read(inode))
762 return 0;
763
764 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
765 key.offset = offset;
766 key.type = 0;
767
768 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
769 if (ret < 0)
770 return 0;
771 else if (ret > 0) {
772 btrfs_release_path(path);
773 return 0;
774 }
775
776 ret = -1;
777
778 leaf = path->nodes[0];
779 header = btrfs_item_ptr(leaf, path->slots[0],
780 struct btrfs_free_space_header);
781 num_entries = btrfs_free_space_entries(leaf, header);
782 num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
783 generation = btrfs_free_space_generation(leaf, header);
784 btrfs_release_path(path);
785
786 if (!BTRFS_I(inode)->generation) {
787 btrfs_info(fs_info,
788 "the free space cache file (%llu) is invalid, skip it",
789 offset);
790 return 0;
791 }
792
793 if (BTRFS_I(inode)->generation != generation) {
794 btrfs_err(fs_info,
795 "free space inode generation (%llu) did not match free space cache generation (%llu)",
796 BTRFS_I(inode)->generation, generation);
797 return 0;
798 }
799
800 if (!num_entries)
801 return 0;
802
803 ret = io_ctl_init(&io_ctl, inode, 0);
804 if (ret)
805 return ret;
806
807 readahead_cache(inode);
808
809 ret = io_ctl_prepare_pages(&io_ctl, true);
810 if (ret)
811 goto out;
812
813 ret = io_ctl_check_crc(&io_ctl, 0);
814 if (ret)
815 goto free_cache;
816
817 ret = io_ctl_check_generation(&io_ctl, generation);
818 if (ret)
819 goto free_cache;
820
821 while (num_entries) {
822 e = kmem_cache_zalloc(btrfs_free_space_cachep,
823 GFP_NOFS);
824 if (!e) {
825 ret = -ENOMEM;
826 goto free_cache;
827 }
828
829 ret = io_ctl_read_entry(&io_ctl, e, &type);
830 if (ret) {
831 kmem_cache_free(btrfs_free_space_cachep, e);
832 goto free_cache;
833 }
834
835 if (!e->bytes) {
836 ret = -1;
837 kmem_cache_free(btrfs_free_space_cachep, e);
838 goto free_cache;
839 }
840
841 if (type == BTRFS_FREE_SPACE_EXTENT) {
842 spin_lock(&ctl->tree_lock);
843 ret = link_free_space(ctl, e);
844 spin_unlock(&ctl->tree_lock);
845 if (ret) {
846 btrfs_err(fs_info,
847 "Duplicate entries in free space cache, dumping");
848 kmem_cache_free(btrfs_free_space_cachep, e);
849 goto free_cache;
850 }
851 } else {
852 ASSERT(num_bitmaps);
853 num_bitmaps--;
854 e->bitmap = kmem_cache_zalloc(
855 btrfs_free_space_bitmap_cachep, GFP_NOFS);
856 if (!e->bitmap) {
857 ret = -ENOMEM;
858 kmem_cache_free(
859 btrfs_free_space_cachep, e);
860 goto free_cache;
861 }
862 spin_lock(&ctl->tree_lock);
863 ret = link_free_space(ctl, e);
864 if (ret) {
865 spin_unlock(&ctl->tree_lock);
866 btrfs_err(fs_info,
867 "Duplicate entries in free space cache, dumping");
868 kmem_cache_free(btrfs_free_space_cachep, e);
869 goto free_cache;
870 }
871 ctl->total_bitmaps++;
872 recalculate_thresholds(ctl);
873 spin_unlock(&ctl->tree_lock);
874 list_add_tail(&e->list, &bitmaps);
875 }
876
877 num_entries--;
878 }
879
880 io_ctl_unmap_page(&io_ctl);
881
882 /*
883 * We add the bitmaps at the end of the entries in order that
884 * the bitmap entries are added to the cache.
885 */
886 list_for_each_entry_safe(e, n, &bitmaps, list) {
887 list_del_init(&e->list);
888 ret = io_ctl_read_bitmap(&io_ctl, e);
889 if (ret)
890 goto free_cache;
891 }
892
893 io_ctl_drop_pages(&io_ctl);
894 ret = 1;
895 out:
896 io_ctl_free(&io_ctl);
897 return ret;
898 free_cache:
899 io_ctl_drop_pages(&io_ctl);
900
901 spin_lock(&ctl->tree_lock);
902 __btrfs_remove_free_space_cache(ctl);
903 spin_unlock(&ctl->tree_lock);
904 goto out;
905 }
906
copy_free_space_cache(struct btrfs_block_group * block_group,struct btrfs_free_space_ctl * ctl)907 static int copy_free_space_cache(struct btrfs_block_group *block_group,
908 struct btrfs_free_space_ctl *ctl)
909 {
910 struct btrfs_free_space *info;
911 struct rb_node *n;
912 int ret = 0;
913
914 while (!ret && (n = rb_first(&ctl->free_space_offset)) != NULL) {
915 info = rb_entry(n, struct btrfs_free_space, offset_index);
916 if (!info->bitmap) {
917 unlink_free_space(ctl, info, true);
918 ret = btrfs_add_free_space(block_group, info->offset,
919 info->bytes);
920 kmem_cache_free(btrfs_free_space_cachep, info);
921 } else {
922 u64 offset = info->offset;
923 u64 bytes = ctl->unit;
924
925 while (search_bitmap(ctl, info, &offset, &bytes,
926 false) == 0) {
927 ret = btrfs_add_free_space(block_group, offset,
928 bytes);
929 if (ret)
930 break;
931 bitmap_clear_bits(ctl, info, offset, bytes, true);
932 offset = info->offset;
933 bytes = ctl->unit;
934 }
935 free_bitmap(ctl, info);
936 }
937 cond_resched();
938 }
939 return ret;
940 }
941
942 static struct lock_class_key btrfs_free_space_inode_key;
943
load_free_space_cache(struct btrfs_block_group * block_group)944 int load_free_space_cache(struct btrfs_block_group *block_group)
945 {
946 struct btrfs_fs_info *fs_info = block_group->fs_info;
947 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
948 struct btrfs_free_space_ctl tmp_ctl = {};
949 struct inode *inode;
950 struct btrfs_path *path;
951 int ret = 0;
952 bool matched;
953 u64 used = block_group->used;
954
955 /*
956 * Because we could potentially discard our loaded free space, we want
957 * to load everything into a temporary structure first, and then if it's
958 * valid copy it all into the actual free space ctl.
959 */
960 btrfs_init_free_space_ctl(block_group, &tmp_ctl);
961
962 /*
963 * If this block group has been marked to be cleared for one reason or
964 * another then we can't trust the on disk cache, so just return.
965 */
966 spin_lock(&block_group->lock);
967 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
968 spin_unlock(&block_group->lock);
969 return 0;
970 }
971 spin_unlock(&block_group->lock);
972
973 path = btrfs_alloc_path();
974 if (!path)
975 return 0;
976 path->search_commit_root = 1;
977 path->skip_locking = 1;
978
979 /*
980 * We must pass a path with search_commit_root set to btrfs_iget in
981 * order to avoid a deadlock when allocating extents for the tree root.
982 *
983 * When we are COWing an extent buffer from the tree root, when looking
984 * for a free extent, at extent-tree.c:find_free_extent(), we can find
985 * block group without its free space cache loaded. When we find one
986 * we must load its space cache which requires reading its free space
987 * cache's inode item from the root tree. If this inode item is located
988 * in the same leaf that we started COWing before, then we end up in
989 * deadlock on the extent buffer (trying to read lock it when we
990 * previously write locked it).
991 *
992 * It's safe to read the inode item using the commit root because
993 * block groups, once loaded, stay in memory forever (until they are
994 * removed) as well as their space caches once loaded. New block groups
995 * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
996 * we will never try to read their inode item while the fs is mounted.
997 */
998 inode = lookup_free_space_inode(block_group, path);
999 if (IS_ERR(inode)) {
1000 btrfs_free_path(path);
1001 return 0;
1002 }
1003
1004 /* We may have converted the inode and made the cache invalid. */
1005 spin_lock(&block_group->lock);
1006 if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
1007 spin_unlock(&block_group->lock);
1008 btrfs_free_path(path);
1009 goto out;
1010 }
1011 spin_unlock(&block_group->lock);
1012
1013 /*
1014 * Reinitialize the class of struct inode's mapping->invalidate_lock for
1015 * free space inodes to prevent false positives related to locks for normal
1016 * inodes.
1017 */
1018 lockdep_set_class(&(&inode->i_data)->invalidate_lock,
1019 &btrfs_free_space_inode_key);
1020
1021 ret = __load_free_space_cache(fs_info->tree_root, inode, &tmp_ctl,
1022 path, block_group->start);
1023 btrfs_free_path(path);
1024 if (ret <= 0)
1025 goto out;
1026
1027 matched = (tmp_ctl.free_space == (block_group->length - used -
1028 block_group->bytes_super));
1029
1030 if (matched) {
1031 ret = copy_free_space_cache(block_group, &tmp_ctl);
1032 /*
1033 * ret == 1 means we successfully loaded the free space cache,
1034 * so we need to re-set it here.
1035 */
1036 if (ret == 0)
1037 ret = 1;
1038 } else {
1039 /*
1040 * We need to call the _locked variant so we don't try to update
1041 * the discard counters.
1042 */
1043 spin_lock(&tmp_ctl.tree_lock);
1044 __btrfs_remove_free_space_cache(&tmp_ctl);
1045 spin_unlock(&tmp_ctl.tree_lock);
1046 btrfs_warn(fs_info,
1047 "block group %llu has wrong amount of free space",
1048 block_group->start);
1049 ret = -1;
1050 }
1051 out:
1052 if (ret < 0) {
1053 /* This cache is bogus, make sure it gets cleared */
1054 spin_lock(&block_group->lock);
1055 block_group->disk_cache_state = BTRFS_DC_CLEAR;
1056 spin_unlock(&block_group->lock);
1057 ret = 0;
1058
1059 btrfs_warn(fs_info,
1060 "failed to load free space cache for block group %llu, rebuilding it now",
1061 block_group->start);
1062 }
1063
1064 spin_lock(&ctl->tree_lock);
1065 btrfs_discard_update_discardable(block_group);
1066 spin_unlock(&ctl->tree_lock);
1067 iput(inode);
1068 return ret;
1069 }
1070
1071 static noinline_for_stack
write_cache_extent_entries(struct btrfs_io_ctl * io_ctl,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group * block_group,int * entries,int * bitmaps,struct list_head * bitmap_list)1072 int write_cache_extent_entries(struct btrfs_io_ctl *io_ctl,
1073 struct btrfs_free_space_ctl *ctl,
1074 struct btrfs_block_group *block_group,
1075 int *entries, int *bitmaps,
1076 struct list_head *bitmap_list)
1077 {
1078 int ret;
1079 struct btrfs_free_cluster *cluster = NULL;
1080 struct btrfs_free_cluster *cluster_locked = NULL;
1081 struct rb_node *node = rb_first(&ctl->free_space_offset);
1082 struct btrfs_trim_range *trim_entry;
1083
1084 /* Get the cluster for this block_group if it exists */
1085 if (block_group && !list_empty(&block_group->cluster_list)) {
1086 cluster = list_entry(block_group->cluster_list.next,
1087 struct btrfs_free_cluster,
1088 block_group_list);
1089 }
1090
1091 if (!node && cluster) {
1092 cluster_locked = cluster;
1093 spin_lock(&cluster_locked->lock);
1094 node = rb_first(&cluster->root);
1095 cluster = NULL;
1096 }
1097
1098 /* Write out the extent entries */
1099 while (node) {
1100 struct btrfs_free_space *e;
1101
1102 e = rb_entry(node, struct btrfs_free_space, offset_index);
1103 *entries += 1;
1104
1105 ret = io_ctl_add_entry(io_ctl, e->offset, e->bytes,
1106 e->bitmap);
1107 if (ret)
1108 goto fail;
1109
1110 if (e->bitmap) {
1111 list_add_tail(&e->list, bitmap_list);
1112 *bitmaps += 1;
1113 }
1114 node = rb_next(node);
1115 if (!node && cluster) {
1116 node = rb_first(&cluster->root);
1117 cluster_locked = cluster;
1118 spin_lock(&cluster_locked->lock);
1119 cluster = NULL;
1120 }
1121 }
1122 if (cluster_locked) {
1123 spin_unlock(&cluster_locked->lock);
1124 cluster_locked = NULL;
1125 }
1126
1127 /*
1128 * Make sure we don't miss any range that was removed from our rbtree
1129 * because trimming is running. Otherwise after a umount+mount (or crash
1130 * after committing the transaction) we would leak free space and get
1131 * an inconsistent free space cache report from fsck.
1132 */
1133 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
1134 ret = io_ctl_add_entry(io_ctl, trim_entry->start,
1135 trim_entry->bytes, NULL);
1136 if (ret)
1137 goto fail;
1138 *entries += 1;
1139 }
1140
1141 return 0;
1142 fail:
1143 if (cluster_locked)
1144 spin_unlock(&cluster_locked->lock);
1145 return -ENOSPC;
1146 }
1147
1148 static noinline_for_stack int
update_cache_item(struct btrfs_trans_handle * trans,struct btrfs_root * root,struct inode * inode,struct btrfs_path * path,u64 offset,int entries,int bitmaps)1149 update_cache_item(struct btrfs_trans_handle *trans,
1150 struct btrfs_root *root,
1151 struct inode *inode,
1152 struct btrfs_path *path, u64 offset,
1153 int entries, int bitmaps)
1154 {
1155 struct btrfs_key key;
1156 struct btrfs_free_space_header *header;
1157 struct extent_buffer *leaf;
1158 int ret;
1159
1160 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
1161 key.offset = offset;
1162 key.type = 0;
1163
1164 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1165 if (ret < 0) {
1166 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1167 EXTENT_DELALLOC, NULL);
1168 goto fail;
1169 }
1170 leaf = path->nodes[0];
1171 if (ret > 0) {
1172 struct btrfs_key found_key;
1173 ASSERT(path->slots[0]);
1174 path->slots[0]--;
1175 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1176 if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
1177 found_key.offset != offset) {
1178 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0,
1179 inode->i_size - 1, EXTENT_DELALLOC,
1180 NULL);
1181 btrfs_release_path(path);
1182 goto fail;
1183 }
1184 }
1185
1186 BTRFS_I(inode)->generation = trans->transid;
1187 header = btrfs_item_ptr(leaf, path->slots[0],
1188 struct btrfs_free_space_header);
1189 btrfs_set_free_space_entries(leaf, header, entries);
1190 btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
1191 btrfs_set_free_space_generation(leaf, header, trans->transid);
1192 btrfs_mark_buffer_dirty(leaf);
1193 btrfs_release_path(path);
1194
1195 return 0;
1196
1197 fail:
1198 return -1;
1199 }
1200
write_pinned_extent_entries(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,int * entries)1201 static noinline_for_stack int write_pinned_extent_entries(
1202 struct btrfs_trans_handle *trans,
1203 struct btrfs_block_group *block_group,
1204 struct btrfs_io_ctl *io_ctl,
1205 int *entries)
1206 {
1207 u64 start, extent_start, extent_end, len;
1208 struct extent_io_tree *unpin = NULL;
1209 int ret;
1210
1211 if (!block_group)
1212 return 0;
1213
1214 /*
1215 * We want to add any pinned extents to our free space cache
1216 * so we don't leak the space
1217 *
1218 * We shouldn't have switched the pinned extents yet so this is the
1219 * right one
1220 */
1221 unpin = &trans->transaction->pinned_extents;
1222
1223 start = block_group->start;
1224
1225 while (start < block_group->start + block_group->length) {
1226 ret = find_first_extent_bit(unpin, start,
1227 &extent_start, &extent_end,
1228 EXTENT_DIRTY, NULL);
1229 if (ret)
1230 return 0;
1231
1232 /* This pinned extent is out of our range */
1233 if (extent_start >= block_group->start + block_group->length)
1234 return 0;
1235
1236 extent_start = max(extent_start, start);
1237 extent_end = min(block_group->start + block_group->length,
1238 extent_end + 1);
1239 len = extent_end - extent_start;
1240
1241 *entries += 1;
1242 ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
1243 if (ret)
1244 return -ENOSPC;
1245
1246 start = extent_end;
1247 }
1248
1249 return 0;
1250 }
1251
1252 static noinline_for_stack int
write_bitmap_entries(struct btrfs_io_ctl * io_ctl,struct list_head * bitmap_list)1253 write_bitmap_entries(struct btrfs_io_ctl *io_ctl, struct list_head *bitmap_list)
1254 {
1255 struct btrfs_free_space *entry, *next;
1256 int ret;
1257
1258 /* Write out the bitmaps */
1259 list_for_each_entry_safe(entry, next, bitmap_list, list) {
1260 ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
1261 if (ret)
1262 return -ENOSPC;
1263 list_del_init(&entry->list);
1264 }
1265
1266 return 0;
1267 }
1268
flush_dirty_cache(struct inode * inode)1269 static int flush_dirty_cache(struct inode *inode)
1270 {
1271 int ret;
1272
1273 ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
1274 if (ret)
1275 clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
1276 EXTENT_DELALLOC, NULL);
1277
1278 return ret;
1279 }
1280
1281 static void noinline_for_stack
cleanup_bitmap_list(struct list_head * bitmap_list)1282 cleanup_bitmap_list(struct list_head *bitmap_list)
1283 {
1284 struct btrfs_free_space *entry, *next;
1285
1286 list_for_each_entry_safe(entry, next, bitmap_list, list)
1287 list_del_init(&entry->list);
1288 }
1289
1290 static void noinline_for_stack
cleanup_write_cache_enospc(struct inode * inode,struct btrfs_io_ctl * io_ctl,struct extent_state ** cached_state)1291 cleanup_write_cache_enospc(struct inode *inode,
1292 struct btrfs_io_ctl *io_ctl,
1293 struct extent_state **cached_state)
1294 {
1295 io_ctl_drop_pages(io_ctl);
1296 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1297 cached_state);
1298 }
1299
__btrfs_wait_cache_io(struct btrfs_root * root,struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_path * path,u64 offset)1300 static int __btrfs_wait_cache_io(struct btrfs_root *root,
1301 struct btrfs_trans_handle *trans,
1302 struct btrfs_block_group *block_group,
1303 struct btrfs_io_ctl *io_ctl,
1304 struct btrfs_path *path, u64 offset)
1305 {
1306 int ret;
1307 struct inode *inode = io_ctl->inode;
1308
1309 if (!inode)
1310 return 0;
1311
1312 /* Flush the dirty pages in the cache file. */
1313 ret = flush_dirty_cache(inode);
1314 if (ret)
1315 goto out;
1316
1317 /* Update the cache item to tell everyone this cache file is valid. */
1318 ret = update_cache_item(trans, root, inode, path, offset,
1319 io_ctl->entries, io_ctl->bitmaps);
1320 out:
1321 if (ret) {
1322 invalidate_inode_pages2(inode->i_mapping);
1323 BTRFS_I(inode)->generation = 0;
1324 if (block_group)
1325 btrfs_debug(root->fs_info,
1326 "failed to write free space cache for block group %llu error %d",
1327 block_group->start, ret);
1328 }
1329 btrfs_update_inode(trans, root, BTRFS_I(inode));
1330
1331 if (block_group) {
1332 /* the dirty list is protected by the dirty_bgs_lock */
1333 spin_lock(&trans->transaction->dirty_bgs_lock);
1334
1335 /* the disk_cache_state is protected by the block group lock */
1336 spin_lock(&block_group->lock);
1337
1338 /*
1339 * only mark this as written if we didn't get put back on
1340 * the dirty list while waiting for IO. Otherwise our
1341 * cache state won't be right, and we won't get written again
1342 */
1343 if (!ret && list_empty(&block_group->dirty_list))
1344 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1345 else if (ret)
1346 block_group->disk_cache_state = BTRFS_DC_ERROR;
1347
1348 spin_unlock(&block_group->lock);
1349 spin_unlock(&trans->transaction->dirty_bgs_lock);
1350 io_ctl->inode = NULL;
1351 iput(inode);
1352 }
1353
1354 return ret;
1355
1356 }
1357
btrfs_wait_cache_io(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)1358 int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
1359 struct btrfs_block_group *block_group,
1360 struct btrfs_path *path)
1361 {
1362 return __btrfs_wait_cache_io(block_group->fs_info->tree_root, trans,
1363 block_group, &block_group->io_ctl,
1364 path, block_group->start);
1365 }
1366
1367 /**
1368 * Write out cached info to an inode
1369 *
1370 * @root: root the inode belongs to
1371 * @inode: freespace inode we are writing out
1372 * @ctl: free space cache we are going to write out
1373 * @block_group: block_group for this cache if it belongs to a block_group
1374 * @io_ctl: holds context for the io
1375 * @trans: the trans handle
1376 *
1377 * This function writes out a free space cache struct to disk for quick recovery
1378 * on mount. This will return 0 if it was successful in writing the cache out,
1379 * or an errno if it was not.
1380 */
__btrfs_write_out_cache(struct btrfs_root * root,struct inode * inode,struct btrfs_free_space_ctl * ctl,struct btrfs_block_group * block_group,struct btrfs_io_ctl * io_ctl,struct btrfs_trans_handle * trans)1381 static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1382 struct btrfs_free_space_ctl *ctl,
1383 struct btrfs_block_group *block_group,
1384 struct btrfs_io_ctl *io_ctl,
1385 struct btrfs_trans_handle *trans)
1386 {
1387 struct extent_state *cached_state = NULL;
1388 LIST_HEAD(bitmap_list);
1389 int entries = 0;
1390 int bitmaps = 0;
1391 int ret;
1392 int must_iput = 0;
1393
1394 if (!i_size_read(inode))
1395 return -EIO;
1396
1397 WARN_ON(io_ctl->pages);
1398 ret = io_ctl_init(io_ctl, inode, 1);
1399 if (ret)
1400 return ret;
1401
1402 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
1403 down_write(&block_group->data_rwsem);
1404 spin_lock(&block_group->lock);
1405 if (block_group->delalloc_bytes) {
1406 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
1407 spin_unlock(&block_group->lock);
1408 up_write(&block_group->data_rwsem);
1409 BTRFS_I(inode)->generation = 0;
1410 ret = 0;
1411 must_iput = 1;
1412 goto out;
1413 }
1414 spin_unlock(&block_group->lock);
1415 }
1416
1417 /* Lock all pages first so we can lock the extent safely. */
1418 ret = io_ctl_prepare_pages(io_ctl, false);
1419 if (ret)
1420 goto out_unlock;
1421
1422 lock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1423 &cached_state);
1424
1425 io_ctl_set_generation(io_ctl, trans->transid);
1426
1427 mutex_lock(&ctl->cache_writeout_mutex);
1428 /* Write out the extent entries in the free space cache */
1429 spin_lock(&ctl->tree_lock);
1430 ret = write_cache_extent_entries(io_ctl, ctl,
1431 block_group, &entries, &bitmaps,
1432 &bitmap_list);
1433 if (ret)
1434 goto out_nospc_locked;
1435
1436 /*
1437 * Some spaces that are freed in the current transaction are pinned,
1438 * they will be added into free space cache after the transaction is
1439 * committed, we shouldn't lose them.
1440 *
1441 * If this changes while we are working we'll get added back to
1442 * the dirty list and redo it. No locking needed
1443 */
1444 ret = write_pinned_extent_entries(trans, block_group, io_ctl, &entries);
1445 if (ret)
1446 goto out_nospc_locked;
1447
1448 /*
1449 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1450 * locked while doing it because a concurrent trim can be manipulating
1451 * or freeing the bitmap.
1452 */
1453 ret = write_bitmap_entries(io_ctl, &bitmap_list);
1454 spin_unlock(&ctl->tree_lock);
1455 mutex_unlock(&ctl->cache_writeout_mutex);
1456 if (ret)
1457 goto out_nospc;
1458
1459 /* Zero out the rest of the pages just to make sure */
1460 io_ctl_zero_remaining_pages(io_ctl);
1461
1462 /* Everything is written out, now we dirty the pages in the file. */
1463 ret = btrfs_dirty_pages(BTRFS_I(inode), io_ctl->pages,
1464 io_ctl->num_pages, 0, i_size_read(inode),
1465 &cached_state, false);
1466 if (ret)
1467 goto out_nospc;
1468
1469 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1470 up_write(&block_group->data_rwsem);
1471 /*
1472 * Release the pages and unlock the extent, we will flush
1473 * them out later
1474 */
1475 io_ctl_drop_pages(io_ctl);
1476 io_ctl_free(io_ctl);
1477
1478 unlock_extent(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
1479 &cached_state);
1480
1481 /*
1482 * at this point the pages are under IO and we're happy,
1483 * The caller is responsible for waiting on them and updating
1484 * the cache and the inode
1485 */
1486 io_ctl->entries = entries;
1487 io_ctl->bitmaps = bitmaps;
1488
1489 ret = btrfs_fdatawrite_range(inode, 0, (u64)-1);
1490 if (ret)
1491 goto out;
1492
1493 return 0;
1494
1495 out_nospc_locked:
1496 cleanup_bitmap_list(&bitmap_list);
1497 spin_unlock(&ctl->tree_lock);
1498 mutex_unlock(&ctl->cache_writeout_mutex);
1499
1500 out_nospc:
1501 cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
1502
1503 out_unlock:
1504 if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
1505 up_write(&block_group->data_rwsem);
1506
1507 out:
1508 io_ctl->inode = NULL;
1509 io_ctl_free(io_ctl);
1510 if (ret) {
1511 invalidate_inode_pages2(inode->i_mapping);
1512 BTRFS_I(inode)->generation = 0;
1513 }
1514 btrfs_update_inode(trans, root, BTRFS_I(inode));
1515 if (must_iput)
1516 iput(inode);
1517 return ret;
1518 }
1519
btrfs_write_out_cache(struct btrfs_trans_handle * trans,struct btrfs_block_group * block_group,struct btrfs_path * path)1520 int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
1521 struct btrfs_block_group *block_group,
1522 struct btrfs_path *path)
1523 {
1524 struct btrfs_fs_info *fs_info = trans->fs_info;
1525 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
1526 struct inode *inode;
1527 int ret = 0;
1528
1529 spin_lock(&block_group->lock);
1530 if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
1531 spin_unlock(&block_group->lock);
1532 return 0;
1533 }
1534 spin_unlock(&block_group->lock);
1535
1536 inode = lookup_free_space_inode(block_group, path);
1537 if (IS_ERR(inode))
1538 return 0;
1539
1540 ret = __btrfs_write_out_cache(fs_info->tree_root, inode, ctl,
1541 block_group, &block_group->io_ctl, trans);
1542 if (ret) {
1543 btrfs_debug(fs_info,
1544 "failed to write free space cache for block group %llu error %d",
1545 block_group->start, ret);
1546 spin_lock(&block_group->lock);
1547 block_group->disk_cache_state = BTRFS_DC_ERROR;
1548 spin_unlock(&block_group->lock);
1549
1550 block_group->io_ctl.inode = NULL;
1551 iput(inode);
1552 }
1553
1554 /*
1555 * if ret == 0 the caller is expected to call btrfs_wait_cache_io
1556 * to wait for IO and put the inode
1557 */
1558
1559 return ret;
1560 }
1561
offset_to_bit(u64 bitmap_start,u32 unit,u64 offset)1562 static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
1563 u64 offset)
1564 {
1565 ASSERT(offset >= bitmap_start);
1566 offset -= bitmap_start;
1567 return (unsigned long)(div_u64(offset, unit));
1568 }
1569
bytes_to_bits(u64 bytes,u32 unit)1570 static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
1571 {
1572 return (unsigned long)(div_u64(bytes, unit));
1573 }
1574
offset_to_bitmap(struct btrfs_free_space_ctl * ctl,u64 offset)1575 static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
1576 u64 offset)
1577 {
1578 u64 bitmap_start;
1579 u64 bytes_per_bitmap;
1580
1581 bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
1582 bitmap_start = offset - ctl->start;
1583 bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
1584 bitmap_start *= bytes_per_bitmap;
1585 bitmap_start += ctl->start;
1586
1587 return bitmap_start;
1588 }
1589
tree_insert_offset(struct rb_root * root,u64 offset,struct rb_node * node,int bitmap)1590 static int tree_insert_offset(struct rb_root *root, u64 offset,
1591 struct rb_node *node, int bitmap)
1592 {
1593 struct rb_node **p = &root->rb_node;
1594 struct rb_node *parent = NULL;
1595 struct btrfs_free_space *info;
1596
1597 while (*p) {
1598 parent = *p;
1599 info = rb_entry(parent, struct btrfs_free_space, offset_index);
1600
1601 if (offset < info->offset) {
1602 p = &(*p)->rb_left;
1603 } else if (offset > info->offset) {
1604 p = &(*p)->rb_right;
1605 } else {
1606 /*
1607 * we could have a bitmap entry and an extent entry
1608 * share the same offset. If this is the case, we want
1609 * the extent entry to always be found first if we do a
1610 * linear search through the tree, since we want to have
1611 * the quickest allocation time, and allocating from an
1612 * extent is faster than allocating from a bitmap. So
1613 * if we're inserting a bitmap and we find an entry at
1614 * this offset, we want to go right, or after this entry
1615 * logically. If we are inserting an extent and we've
1616 * found a bitmap, we want to go left, or before
1617 * logically.
1618 */
1619 if (bitmap) {
1620 if (info->bitmap) {
1621 WARN_ON_ONCE(1);
1622 return -EEXIST;
1623 }
1624 p = &(*p)->rb_right;
1625 } else {
1626 if (!info->bitmap) {
1627 WARN_ON_ONCE(1);
1628 return -EEXIST;
1629 }
1630 p = &(*p)->rb_left;
1631 }
1632 }
1633 }
1634
1635 rb_link_node(node, parent, p);
1636 rb_insert_color(node, root);
1637
1638 return 0;
1639 }
1640
1641 /*
1642 * This is a little subtle. We *only* have ->max_extent_size set if we actually
1643 * searched through the bitmap and figured out the largest ->max_extent_size,
1644 * otherwise it's 0. In the case that it's 0 we don't want to tell the
1645 * allocator the wrong thing, we want to use the actual real max_extent_size
1646 * we've found already if it's larger, or we want to use ->bytes.
1647 *
1648 * This matters because find_free_space() will skip entries who's ->bytes is
1649 * less than the required bytes. So if we didn't search down this bitmap, we
1650 * may pick some previous entry that has a smaller ->max_extent_size than we
1651 * have. For example, assume we have two entries, one that has
1652 * ->max_extent_size set to 4K and ->bytes set to 1M. A second entry hasn't set
1653 * ->max_extent_size yet, has ->bytes set to 8K and it's contiguous. We will
1654 * call into find_free_space(), and return with max_extent_size == 4K, because
1655 * that first bitmap entry had ->max_extent_size set, but the second one did
1656 * not. If instead we returned 8K we'd come in searching for 8K, and find the
1657 * 8K contiguous range.
1658 *
1659 * Consider the other case, we have 2 8K chunks in that second entry and still
1660 * don't have ->max_extent_size set. We'll return 16K, and the next time the
1661 * allocator comes in it'll fully search our second bitmap, and this time it'll
1662 * get an uptodate value of 8K as the maximum chunk size. Then we'll get the
1663 * right allocation the next loop through.
1664 */
get_max_extent_size(const struct btrfs_free_space * entry)1665 static inline u64 get_max_extent_size(const struct btrfs_free_space *entry)
1666 {
1667 if (entry->bitmap && entry->max_extent_size)
1668 return entry->max_extent_size;
1669 return entry->bytes;
1670 }
1671
1672 /*
1673 * We want the largest entry to be leftmost, so this is inverted from what you'd
1674 * normally expect.
1675 */
entry_less(struct rb_node * node,const struct rb_node * parent)1676 static bool entry_less(struct rb_node *node, const struct rb_node *parent)
1677 {
1678 const struct btrfs_free_space *entry, *exist;
1679
1680 entry = rb_entry(node, struct btrfs_free_space, bytes_index);
1681 exist = rb_entry(parent, struct btrfs_free_space, bytes_index);
1682 return get_max_extent_size(exist) < get_max_extent_size(entry);
1683 }
1684
1685 /*
1686 * searches the tree for the given offset.
1687 *
1688 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1689 * want a section that has at least bytes size and comes at or after the given
1690 * offset.
1691 */
1692 static struct btrfs_free_space *
tree_search_offset(struct btrfs_free_space_ctl * ctl,u64 offset,int bitmap_only,int fuzzy)1693 tree_search_offset(struct btrfs_free_space_ctl *ctl,
1694 u64 offset, int bitmap_only, int fuzzy)
1695 {
1696 struct rb_node *n = ctl->free_space_offset.rb_node;
1697 struct btrfs_free_space *entry = NULL, *prev = NULL;
1698
1699 /* find entry that is closest to the 'offset' */
1700 while (n) {
1701 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1702 prev = entry;
1703
1704 if (offset < entry->offset)
1705 n = n->rb_left;
1706 else if (offset > entry->offset)
1707 n = n->rb_right;
1708 else
1709 break;
1710
1711 entry = NULL;
1712 }
1713
1714 if (bitmap_only) {
1715 if (!entry)
1716 return NULL;
1717 if (entry->bitmap)
1718 return entry;
1719
1720 /*
1721 * bitmap entry and extent entry may share same offset,
1722 * in that case, bitmap entry comes after extent entry.
1723 */
1724 n = rb_next(n);
1725 if (!n)
1726 return NULL;
1727 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1728 if (entry->offset != offset)
1729 return NULL;
1730
1731 WARN_ON(!entry->bitmap);
1732 return entry;
1733 } else if (entry) {
1734 if (entry->bitmap) {
1735 /*
1736 * if previous extent entry covers the offset,
1737 * we should return it instead of the bitmap entry
1738 */
1739 n = rb_prev(&entry->offset_index);
1740 if (n) {
1741 prev = rb_entry(n, struct btrfs_free_space,
1742 offset_index);
1743 if (!prev->bitmap &&
1744 prev->offset + prev->bytes > offset)
1745 entry = prev;
1746 }
1747 }
1748 return entry;
1749 }
1750
1751 if (!prev)
1752 return NULL;
1753
1754 /* find last entry before the 'offset' */
1755 entry = prev;
1756 if (entry->offset > offset) {
1757 n = rb_prev(&entry->offset_index);
1758 if (n) {
1759 entry = rb_entry(n, struct btrfs_free_space,
1760 offset_index);
1761 ASSERT(entry->offset <= offset);
1762 } else {
1763 if (fuzzy)
1764 return entry;
1765 else
1766 return NULL;
1767 }
1768 }
1769
1770 if (entry->bitmap) {
1771 n = rb_prev(&entry->offset_index);
1772 if (n) {
1773 prev = rb_entry(n, struct btrfs_free_space,
1774 offset_index);
1775 if (!prev->bitmap &&
1776 prev->offset + prev->bytes > offset)
1777 return prev;
1778 }
1779 if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
1780 return entry;
1781 } else if (entry->offset + entry->bytes > offset)
1782 return entry;
1783
1784 if (!fuzzy)
1785 return NULL;
1786
1787 while (1) {
1788 n = rb_next(&entry->offset_index);
1789 if (!n)
1790 return NULL;
1791 entry = rb_entry(n, struct btrfs_free_space, offset_index);
1792 if (entry->bitmap) {
1793 if (entry->offset + BITS_PER_BITMAP *
1794 ctl->unit > offset)
1795 break;
1796 } else {
1797 if (entry->offset + entry->bytes > offset)
1798 break;
1799 }
1800 }
1801 return entry;
1802 }
1803
unlink_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)1804 static inline void unlink_free_space(struct btrfs_free_space_ctl *ctl,
1805 struct btrfs_free_space *info,
1806 bool update_stat)
1807 {
1808 rb_erase(&info->offset_index, &ctl->free_space_offset);
1809 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1810 ctl->free_extents--;
1811
1812 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1813 ctl->discardable_extents[BTRFS_STAT_CURR]--;
1814 ctl->discardable_bytes[BTRFS_STAT_CURR] -= info->bytes;
1815 }
1816
1817 if (update_stat)
1818 ctl->free_space -= info->bytes;
1819 }
1820
link_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1821 static int link_free_space(struct btrfs_free_space_ctl *ctl,
1822 struct btrfs_free_space *info)
1823 {
1824 int ret = 0;
1825
1826 ASSERT(info->bytes || info->bitmap);
1827 ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
1828 &info->offset_index, (info->bitmap != NULL));
1829 if (ret)
1830 return ret;
1831
1832 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1833
1834 if (!info->bitmap && !btrfs_free_space_trimmed(info)) {
1835 ctl->discardable_extents[BTRFS_STAT_CURR]++;
1836 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
1837 }
1838
1839 ctl->free_space += info->bytes;
1840 ctl->free_extents++;
1841 return ret;
1842 }
1843
relink_bitmap_entry(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)1844 static void relink_bitmap_entry(struct btrfs_free_space_ctl *ctl,
1845 struct btrfs_free_space *info)
1846 {
1847 ASSERT(info->bitmap);
1848
1849 /*
1850 * If our entry is empty it's because we're on a cluster and we don't
1851 * want to re-link it into our ctl bytes index.
1852 */
1853 if (RB_EMPTY_NODE(&info->bytes_index))
1854 return;
1855
1856 rb_erase_cached(&info->bytes_index, &ctl->free_space_bytes);
1857 rb_add_cached(&info->bytes_index, &ctl->free_space_bytes, entry_less);
1858 }
1859
bitmap_clear_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes,bool update_stat)1860 static inline void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
1861 struct btrfs_free_space *info,
1862 u64 offset, u64 bytes, bool update_stat)
1863 {
1864 unsigned long start, count, end;
1865 int extent_delta = -1;
1866
1867 start = offset_to_bit(info->offset, ctl->unit, offset);
1868 count = bytes_to_bits(bytes, ctl->unit);
1869 end = start + count;
1870 ASSERT(end <= BITS_PER_BITMAP);
1871
1872 bitmap_clear(info->bitmap, start, count);
1873
1874 info->bytes -= bytes;
1875 if (info->max_extent_size > ctl->unit)
1876 info->max_extent_size = 0;
1877
1878 relink_bitmap_entry(ctl, info);
1879
1880 if (start && test_bit(start - 1, info->bitmap))
1881 extent_delta++;
1882
1883 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1884 extent_delta++;
1885
1886 info->bitmap_extents += extent_delta;
1887 if (!btrfs_free_space_trimmed(info)) {
1888 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1889 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
1890 }
1891
1892 if (update_stat)
1893 ctl->free_space -= bytes;
1894 }
1895
bitmap_set_bits(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes)1896 static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
1897 struct btrfs_free_space *info, u64 offset,
1898 u64 bytes)
1899 {
1900 unsigned long start, count, end;
1901 int extent_delta = 1;
1902
1903 start = offset_to_bit(info->offset, ctl->unit, offset);
1904 count = bytes_to_bits(bytes, ctl->unit);
1905 end = start + count;
1906 ASSERT(end <= BITS_PER_BITMAP);
1907
1908 bitmap_set(info->bitmap, start, count);
1909
1910 /*
1911 * We set some bytes, we have no idea what the max extent size is
1912 * anymore.
1913 */
1914 info->max_extent_size = 0;
1915 info->bytes += bytes;
1916 ctl->free_space += bytes;
1917
1918 relink_bitmap_entry(ctl, info);
1919
1920 if (start && test_bit(start - 1, info->bitmap))
1921 extent_delta--;
1922
1923 if (end < BITS_PER_BITMAP && test_bit(end, info->bitmap))
1924 extent_delta--;
1925
1926 info->bitmap_extents += extent_delta;
1927 if (!btrfs_free_space_trimmed(info)) {
1928 ctl->discardable_extents[BTRFS_STAT_CURR] += extent_delta;
1929 ctl->discardable_bytes[BTRFS_STAT_CURR] += bytes;
1930 }
1931 }
1932
1933 /*
1934 * If we can not find suitable extent, we will use bytes to record
1935 * the size of the max extent.
1936 */
search_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes,bool for_alloc)1937 static int search_bitmap(struct btrfs_free_space_ctl *ctl,
1938 struct btrfs_free_space *bitmap_info, u64 *offset,
1939 u64 *bytes, bool for_alloc)
1940 {
1941 unsigned long found_bits = 0;
1942 unsigned long max_bits = 0;
1943 unsigned long bits, i;
1944 unsigned long next_zero;
1945 unsigned long extent_bits;
1946
1947 /*
1948 * Skip searching the bitmap if we don't have a contiguous section that
1949 * is large enough for this allocation.
1950 */
1951 if (for_alloc &&
1952 bitmap_info->max_extent_size &&
1953 bitmap_info->max_extent_size < *bytes) {
1954 *bytes = bitmap_info->max_extent_size;
1955 return -1;
1956 }
1957
1958 i = offset_to_bit(bitmap_info->offset, ctl->unit,
1959 max_t(u64, *offset, bitmap_info->offset));
1960 bits = bytes_to_bits(*bytes, ctl->unit);
1961
1962 for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
1963 if (for_alloc && bits == 1) {
1964 found_bits = 1;
1965 break;
1966 }
1967 next_zero = find_next_zero_bit(bitmap_info->bitmap,
1968 BITS_PER_BITMAP, i);
1969 extent_bits = next_zero - i;
1970 if (extent_bits >= bits) {
1971 found_bits = extent_bits;
1972 break;
1973 } else if (extent_bits > max_bits) {
1974 max_bits = extent_bits;
1975 }
1976 i = next_zero;
1977 }
1978
1979 if (found_bits) {
1980 *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
1981 *bytes = (u64)(found_bits) * ctl->unit;
1982 return 0;
1983 }
1984
1985 *bytes = (u64)(max_bits) * ctl->unit;
1986 bitmap_info->max_extent_size = *bytes;
1987 relink_bitmap_entry(ctl, bitmap_info);
1988 return -1;
1989 }
1990
1991 /* Cache the size of the max extent in bytes */
1992 static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl * ctl,u64 * offset,u64 * bytes,unsigned long align,u64 * max_extent_size,bool use_bytes_index)1993 find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
1994 unsigned long align, u64 *max_extent_size, bool use_bytes_index)
1995 {
1996 struct btrfs_free_space *entry;
1997 struct rb_node *node;
1998 u64 tmp;
1999 u64 align_off;
2000 int ret;
2001
2002 if (!ctl->free_space_offset.rb_node)
2003 goto out;
2004 again:
2005 if (use_bytes_index) {
2006 node = rb_first_cached(&ctl->free_space_bytes);
2007 } else {
2008 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset),
2009 0, 1);
2010 if (!entry)
2011 goto out;
2012 node = &entry->offset_index;
2013 }
2014
2015 for (; node; node = rb_next(node)) {
2016 if (use_bytes_index)
2017 entry = rb_entry(node, struct btrfs_free_space,
2018 bytes_index);
2019 else
2020 entry = rb_entry(node, struct btrfs_free_space,
2021 offset_index);
2022
2023 /*
2024 * If we are using the bytes index then all subsequent entries
2025 * in this tree are going to be < bytes, so simply set the max
2026 * extent size and exit the loop.
2027 *
2028 * If we're using the offset index then we need to keep going
2029 * through the rest of the tree.
2030 */
2031 if (entry->bytes < *bytes) {
2032 *max_extent_size = max(get_max_extent_size(entry),
2033 *max_extent_size);
2034 if (use_bytes_index)
2035 break;
2036 continue;
2037 }
2038
2039 /* make sure the space returned is big enough
2040 * to match our requested alignment
2041 */
2042 if (*bytes >= align) {
2043 tmp = entry->offset - ctl->start + align - 1;
2044 tmp = div64_u64(tmp, align);
2045 tmp = tmp * align + ctl->start;
2046 align_off = tmp - entry->offset;
2047 } else {
2048 align_off = 0;
2049 tmp = entry->offset;
2050 }
2051
2052 /*
2053 * We don't break here if we're using the bytes index because we
2054 * may have another entry that has the correct alignment that is
2055 * the right size, so we don't want to miss that possibility.
2056 * At worst this adds another loop through the logic, but if we
2057 * broke here we could prematurely ENOSPC.
2058 */
2059 if (entry->bytes < *bytes + align_off) {
2060 *max_extent_size = max(get_max_extent_size(entry),
2061 *max_extent_size);
2062 continue;
2063 }
2064
2065 if (entry->bitmap) {
2066 struct rb_node *old_next = rb_next(node);
2067 u64 size = *bytes;
2068
2069 ret = search_bitmap(ctl, entry, &tmp, &size, true);
2070 if (!ret) {
2071 *offset = tmp;
2072 *bytes = size;
2073 return entry;
2074 } else {
2075 *max_extent_size =
2076 max(get_max_extent_size(entry),
2077 *max_extent_size);
2078 }
2079
2080 /*
2081 * The bitmap may have gotten re-arranged in the space
2082 * index here because the max_extent_size may have been
2083 * updated. Start from the beginning again if this
2084 * happened.
2085 */
2086 if (use_bytes_index && old_next != rb_next(node))
2087 goto again;
2088 continue;
2089 }
2090
2091 *offset = tmp;
2092 *bytes = entry->bytes - align_off;
2093 return entry;
2094 }
2095 out:
2096 return NULL;
2097 }
2098
add_new_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset)2099 static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
2100 struct btrfs_free_space *info, u64 offset)
2101 {
2102 info->offset = offset_to_bitmap(ctl, offset);
2103 info->bytes = 0;
2104 info->bitmap_extents = 0;
2105 INIT_LIST_HEAD(&info->list);
2106 link_free_space(ctl, info);
2107 ctl->total_bitmaps++;
2108 recalculate_thresholds(ctl);
2109 }
2110
free_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info)2111 static void free_bitmap(struct btrfs_free_space_ctl *ctl,
2112 struct btrfs_free_space *bitmap_info)
2113 {
2114 /*
2115 * Normally when this is called, the bitmap is completely empty. However,
2116 * if we are blowing up the free space cache for one reason or another
2117 * via __btrfs_remove_free_space_cache(), then it may not be freed and
2118 * we may leave stats on the table.
2119 */
2120 if (bitmap_info->bytes && !btrfs_free_space_trimmed(bitmap_info)) {
2121 ctl->discardable_extents[BTRFS_STAT_CURR] -=
2122 bitmap_info->bitmap_extents;
2123 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bitmap_info->bytes;
2124
2125 }
2126 unlink_free_space(ctl, bitmap_info, true);
2127 kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
2128 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
2129 ctl->total_bitmaps--;
2130 recalculate_thresholds(ctl);
2131 }
2132
remove_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * bitmap_info,u64 * offset,u64 * bytes)2133 static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
2134 struct btrfs_free_space *bitmap_info,
2135 u64 *offset, u64 *bytes)
2136 {
2137 u64 end;
2138 u64 search_start, search_bytes;
2139 int ret;
2140
2141 again:
2142 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
2143
2144 /*
2145 * We need to search for bits in this bitmap. We could only cover some
2146 * of the extent in this bitmap thanks to how we add space, so we need
2147 * to search for as much as it as we can and clear that amount, and then
2148 * go searching for the next bit.
2149 */
2150 search_start = *offset;
2151 search_bytes = ctl->unit;
2152 search_bytes = min(search_bytes, end - search_start + 1);
2153 ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes,
2154 false);
2155 if (ret < 0 || search_start != *offset)
2156 return -EINVAL;
2157
2158 /* We may have found more bits than what we need */
2159 search_bytes = min(search_bytes, *bytes);
2160
2161 /* Cannot clear past the end of the bitmap */
2162 search_bytes = min(search_bytes, end - search_start + 1);
2163
2164 bitmap_clear_bits(ctl, bitmap_info, search_start, search_bytes, true);
2165 *offset += search_bytes;
2166 *bytes -= search_bytes;
2167
2168 if (*bytes) {
2169 struct rb_node *next = rb_next(&bitmap_info->offset_index);
2170 if (!bitmap_info->bytes)
2171 free_bitmap(ctl, bitmap_info);
2172
2173 /*
2174 * no entry after this bitmap, but we still have bytes to
2175 * remove, so something has gone wrong.
2176 */
2177 if (!next)
2178 return -EINVAL;
2179
2180 bitmap_info = rb_entry(next, struct btrfs_free_space,
2181 offset_index);
2182
2183 /*
2184 * if the next entry isn't a bitmap we need to return to let the
2185 * extent stuff do its work.
2186 */
2187 if (!bitmap_info->bitmap)
2188 return -EAGAIN;
2189
2190 /*
2191 * Ok the next item is a bitmap, but it may not actually hold
2192 * the information for the rest of this free space stuff, so
2193 * look for it, and if we don't find it return so we can try
2194 * everything over again.
2195 */
2196 search_start = *offset;
2197 search_bytes = ctl->unit;
2198 ret = search_bitmap(ctl, bitmap_info, &search_start,
2199 &search_bytes, false);
2200 if (ret < 0 || search_start != *offset)
2201 return -EAGAIN;
2202
2203 goto again;
2204 } else if (!bitmap_info->bytes)
2205 free_bitmap(ctl, bitmap_info);
2206
2207 return 0;
2208 }
2209
add_bytes_to_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,u64 offset,u64 bytes,enum btrfs_trim_state trim_state)2210 static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
2211 struct btrfs_free_space *info, u64 offset,
2212 u64 bytes, enum btrfs_trim_state trim_state)
2213 {
2214 u64 bytes_to_set = 0;
2215 u64 end;
2216
2217 /*
2218 * This is a tradeoff to make bitmap trim state minimal. We mark the
2219 * whole bitmap untrimmed if at any point we add untrimmed regions.
2220 */
2221 if (trim_state == BTRFS_TRIM_STATE_UNTRIMMED) {
2222 if (btrfs_free_space_trimmed(info)) {
2223 ctl->discardable_extents[BTRFS_STAT_CURR] +=
2224 info->bitmap_extents;
2225 ctl->discardable_bytes[BTRFS_STAT_CURR] += info->bytes;
2226 }
2227 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2228 }
2229
2230 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
2231
2232 bytes_to_set = min(end - offset, bytes);
2233
2234 bitmap_set_bits(ctl, info, offset, bytes_to_set);
2235
2236 return bytes_to_set;
2237
2238 }
2239
use_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)2240 static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
2241 struct btrfs_free_space *info)
2242 {
2243 struct btrfs_block_group *block_group = ctl->block_group;
2244 struct btrfs_fs_info *fs_info = block_group->fs_info;
2245 bool forced = false;
2246
2247 #ifdef CONFIG_BTRFS_DEBUG
2248 if (btrfs_should_fragment_free_space(block_group))
2249 forced = true;
2250 #endif
2251
2252 /* This is a way to reclaim large regions from the bitmaps. */
2253 if (!forced && info->bytes >= FORCE_EXTENT_THRESHOLD)
2254 return false;
2255
2256 /*
2257 * If we are below the extents threshold then we can add this as an
2258 * extent, and don't have to deal with the bitmap
2259 */
2260 if (!forced && ctl->free_extents < ctl->extents_thresh) {
2261 /*
2262 * If this block group has some small extents we don't want to
2263 * use up all of our free slots in the cache with them, we want
2264 * to reserve them to larger extents, however if we have plenty
2265 * of cache left then go ahead an dadd them, no sense in adding
2266 * the overhead of a bitmap if we don't have to.
2267 */
2268 if (info->bytes <= fs_info->sectorsize * 8) {
2269 if (ctl->free_extents * 3 <= ctl->extents_thresh)
2270 return false;
2271 } else {
2272 return false;
2273 }
2274 }
2275
2276 /*
2277 * The original block groups from mkfs can be really small, like 8
2278 * megabytes, so don't bother with a bitmap for those entries. However
2279 * some block groups can be smaller than what a bitmap would cover but
2280 * are still large enough that they could overflow the 32k memory limit,
2281 * so allow those block groups to still be allowed to have a bitmap
2282 * entry.
2283 */
2284 if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->length)
2285 return false;
2286
2287 return true;
2288 }
2289
2290 static const struct btrfs_free_space_op free_space_op = {
2291 .use_bitmap = use_bitmap,
2292 };
2293
insert_into_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info)2294 static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
2295 struct btrfs_free_space *info)
2296 {
2297 struct btrfs_free_space *bitmap_info;
2298 struct btrfs_block_group *block_group = NULL;
2299 int added = 0;
2300 u64 bytes, offset, bytes_added;
2301 enum btrfs_trim_state trim_state;
2302 int ret;
2303
2304 bytes = info->bytes;
2305 offset = info->offset;
2306 trim_state = info->trim_state;
2307
2308 if (!ctl->op->use_bitmap(ctl, info))
2309 return 0;
2310
2311 if (ctl->op == &free_space_op)
2312 block_group = ctl->block_group;
2313 again:
2314 /*
2315 * Since we link bitmaps right into the cluster we need to see if we
2316 * have a cluster here, and if so and it has our bitmap we need to add
2317 * the free space to that bitmap.
2318 */
2319 if (block_group && !list_empty(&block_group->cluster_list)) {
2320 struct btrfs_free_cluster *cluster;
2321 struct rb_node *node;
2322 struct btrfs_free_space *entry;
2323
2324 cluster = list_entry(block_group->cluster_list.next,
2325 struct btrfs_free_cluster,
2326 block_group_list);
2327 spin_lock(&cluster->lock);
2328 node = rb_first(&cluster->root);
2329 if (!node) {
2330 spin_unlock(&cluster->lock);
2331 goto no_cluster_bitmap;
2332 }
2333
2334 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2335 if (!entry->bitmap) {
2336 spin_unlock(&cluster->lock);
2337 goto no_cluster_bitmap;
2338 }
2339
2340 if (entry->offset == offset_to_bitmap(ctl, offset)) {
2341 bytes_added = add_bytes_to_bitmap(ctl, entry, offset,
2342 bytes, trim_state);
2343 bytes -= bytes_added;
2344 offset += bytes_added;
2345 }
2346 spin_unlock(&cluster->lock);
2347 if (!bytes) {
2348 ret = 1;
2349 goto out;
2350 }
2351 }
2352
2353 no_cluster_bitmap:
2354 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2355 1, 0);
2356 if (!bitmap_info) {
2357 ASSERT(added == 0);
2358 goto new_bitmap;
2359 }
2360
2361 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
2362 trim_state);
2363 bytes -= bytes_added;
2364 offset += bytes_added;
2365 added = 0;
2366
2367 if (!bytes) {
2368 ret = 1;
2369 goto out;
2370 } else
2371 goto again;
2372
2373 new_bitmap:
2374 if (info && info->bitmap) {
2375 add_new_bitmap(ctl, info, offset);
2376 added = 1;
2377 info = NULL;
2378 goto again;
2379 } else {
2380 spin_unlock(&ctl->tree_lock);
2381
2382 /* no pre-allocated info, allocate a new one */
2383 if (!info) {
2384 info = kmem_cache_zalloc(btrfs_free_space_cachep,
2385 GFP_NOFS);
2386 if (!info) {
2387 spin_lock(&ctl->tree_lock);
2388 ret = -ENOMEM;
2389 goto out;
2390 }
2391 }
2392
2393 /* allocate the bitmap */
2394 info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
2395 GFP_NOFS);
2396 info->trim_state = BTRFS_TRIM_STATE_TRIMMED;
2397 spin_lock(&ctl->tree_lock);
2398 if (!info->bitmap) {
2399 ret = -ENOMEM;
2400 goto out;
2401 }
2402 goto again;
2403 }
2404
2405 out:
2406 if (info) {
2407 if (info->bitmap)
2408 kmem_cache_free(btrfs_free_space_bitmap_cachep,
2409 info->bitmap);
2410 kmem_cache_free(btrfs_free_space_cachep, info);
2411 }
2412
2413 return ret;
2414 }
2415
2416 /*
2417 * Free space merging rules:
2418 * 1) Merge trimmed areas together
2419 * 2) Let untrimmed areas coalesce with trimmed areas
2420 * 3) Always pull neighboring regions from bitmaps
2421 *
2422 * The above rules are for when we merge free space based on btrfs_trim_state.
2423 * Rules 2 and 3 are subtle because they are suboptimal, but are done for the
2424 * same reason: to promote larger extent regions which makes life easier for
2425 * find_free_extent(). Rule 2 enables coalescing based on the common path
2426 * being returning free space from btrfs_finish_extent_commit(). So when free
2427 * space is trimmed, it will prevent aggregating trimmed new region and
2428 * untrimmed regions in the rb_tree. Rule 3 is purely to obtain larger extents
2429 * and provide find_free_extent() with the largest extents possible hoping for
2430 * the reuse path.
2431 */
try_merge_free_space(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2432 static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
2433 struct btrfs_free_space *info, bool update_stat)
2434 {
2435 struct btrfs_free_space *left_info = NULL;
2436 struct btrfs_free_space *right_info;
2437 bool merged = false;
2438 u64 offset = info->offset;
2439 u64 bytes = info->bytes;
2440 const bool is_trimmed = btrfs_free_space_trimmed(info);
2441
2442 /*
2443 * first we want to see if there is free space adjacent to the range we
2444 * are adding, if there is remove that struct and add a new one to
2445 * cover the entire range
2446 */
2447 right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
2448 if (right_info && rb_prev(&right_info->offset_index))
2449 left_info = rb_entry(rb_prev(&right_info->offset_index),
2450 struct btrfs_free_space, offset_index);
2451 else if (!right_info)
2452 left_info = tree_search_offset(ctl, offset - 1, 0, 0);
2453
2454 /* See try_merge_free_space() comment. */
2455 if (right_info && !right_info->bitmap &&
2456 (!is_trimmed || btrfs_free_space_trimmed(right_info))) {
2457 unlink_free_space(ctl, right_info, update_stat);
2458 info->bytes += right_info->bytes;
2459 kmem_cache_free(btrfs_free_space_cachep, right_info);
2460 merged = true;
2461 }
2462
2463 /* See try_merge_free_space() comment. */
2464 if (left_info && !left_info->bitmap &&
2465 left_info->offset + left_info->bytes == offset &&
2466 (!is_trimmed || btrfs_free_space_trimmed(left_info))) {
2467 unlink_free_space(ctl, left_info, update_stat);
2468 info->offset = left_info->offset;
2469 info->bytes += left_info->bytes;
2470 kmem_cache_free(btrfs_free_space_cachep, left_info);
2471 merged = true;
2472 }
2473
2474 return merged;
2475 }
2476
steal_from_bitmap_to_end(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2477 static bool steal_from_bitmap_to_end(struct btrfs_free_space_ctl *ctl,
2478 struct btrfs_free_space *info,
2479 bool update_stat)
2480 {
2481 struct btrfs_free_space *bitmap;
2482 unsigned long i;
2483 unsigned long j;
2484 const u64 end = info->offset + info->bytes;
2485 const u64 bitmap_offset = offset_to_bitmap(ctl, end);
2486 u64 bytes;
2487
2488 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2489 if (!bitmap)
2490 return false;
2491
2492 i = offset_to_bit(bitmap->offset, ctl->unit, end);
2493 j = find_next_zero_bit(bitmap->bitmap, BITS_PER_BITMAP, i);
2494 if (j == i)
2495 return false;
2496 bytes = (j - i) * ctl->unit;
2497 info->bytes += bytes;
2498
2499 /* See try_merge_free_space() comment. */
2500 if (!btrfs_free_space_trimmed(bitmap))
2501 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2502
2503 bitmap_clear_bits(ctl, bitmap, end, bytes, update_stat);
2504
2505 if (!bitmap->bytes)
2506 free_bitmap(ctl, bitmap);
2507
2508 return true;
2509 }
2510
steal_from_bitmap_to_front(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2511 static bool steal_from_bitmap_to_front(struct btrfs_free_space_ctl *ctl,
2512 struct btrfs_free_space *info,
2513 bool update_stat)
2514 {
2515 struct btrfs_free_space *bitmap;
2516 u64 bitmap_offset;
2517 unsigned long i;
2518 unsigned long j;
2519 unsigned long prev_j;
2520 u64 bytes;
2521
2522 bitmap_offset = offset_to_bitmap(ctl, info->offset);
2523 /* If we're on a boundary, try the previous logical bitmap. */
2524 if (bitmap_offset == info->offset) {
2525 if (info->offset == 0)
2526 return false;
2527 bitmap_offset = offset_to_bitmap(ctl, info->offset - 1);
2528 }
2529
2530 bitmap = tree_search_offset(ctl, bitmap_offset, 1, 0);
2531 if (!bitmap)
2532 return false;
2533
2534 i = offset_to_bit(bitmap->offset, ctl->unit, info->offset) - 1;
2535 j = 0;
2536 prev_j = (unsigned long)-1;
2537 for_each_clear_bit_from(j, bitmap->bitmap, BITS_PER_BITMAP) {
2538 if (j > i)
2539 break;
2540 prev_j = j;
2541 }
2542 if (prev_j == i)
2543 return false;
2544
2545 if (prev_j == (unsigned long)-1)
2546 bytes = (i + 1) * ctl->unit;
2547 else
2548 bytes = (i - prev_j) * ctl->unit;
2549
2550 info->offset -= bytes;
2551 info->bytes += bytes;
2552
2553 /* See try_merge_free_space() comment. */
2554 if (!btrfs_free_space_trimmed(bitmap))
2555 info->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2556
2557 bitmap_clear_bits(ctl, bitmap, info->offset, bytes, update_stat);
2558
2559 if (!bitmap->bytes)
2560 free_bitmap(ctl, bitmap);
2561
2562 return true;
2563 }
2564
2565 /*
2566 * We prefer always to allocate from extent entries, both for clustered and
2567 * non-clustered allocation requests. So when attempting to add a new extent
2568 * entry, try to see if there's adjacent free space in bitmap entries, and if
2569 * there is, migrate that space from the bitmaps to the extent.
2570 * Like this we get better chances of satisfying space allocation requests
2571 * because we attempt to satisfy them based on a single cache entry, and never
2572 * on 2 or more entries - even if the entries represent a contiguous free space
2573 * region (e.g. 1 extent entry + 1 bitmap entry starting where the extent entry
2574 * ends).
2575 */
steal_from_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * info,bool update_stat)2576 static void steal_from_bitmap(struct btrfs_free_space_ctl *ctl,
2577 struct btrfs_free_space *info,
2578 bool update_stat)
2579 {
2580 /*
2581 * Only work with disconnected entries, as we can change their offset,
2582 * and must be extent entries.
2583 */
2584 ASSERT(!info->bitmap);
2585 ASSERT(RB_EMPTY_NODE(&info->offset_index));
2586
2587 if (ctl->total_bitmaps > 0) {
2588 bool stole_end;
2589 bool stole_front = false;
2590
2591 stole_end = steal_from_bitmap_to_end(ctl, info, update_stat);
2592 if (ctl->total_bitmaps > 0)
2593 stole_front = steal_from_bitmap_to_front(ctl, info,
2594 update_stat);
2595
2596 if (stole_end || stole_front)
2597 try_merge_free_space(ctl, info, update_stat);
2598 }
2599 }
2600
__btrfs_add_free_space(struct btrfs_block_group * block_group,u64 offset,u64 bytes,enum btrfs_trim_state trim_state)2601 int __btrfs_add_free_space(struct btrfs_block_group *block_group,
2602 u64 offset, u64 bytes,
2603 enum btrfs_trim_state trim_state)
2604 {
2605 struct btrfs_fs_info *fs_info = block_group->fs_info;
2606 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2607 struct btrfs_free_space *info;
2608 int ret = 0;
2609 u64 filter_bytes = bytes;
2610
2611 ASSERT(!btrfs_is_zoned(fs_info));
2612
2613 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
2614 if (!info)
2615 return -ENOMEM;
2616
2617 info->offset = offset;
2618 info->bytes = bytes;
2619 info->trim_state = trim_state;
2620 RB_CLEAR_NODE(&info->offset_index);
2621 RB_CLEAR_NODE(&info->bytes_index);
2622
2623 spin_lock(&ctl->tree_lock);
2624
2625 if (try_merge_free_space(ctl, info, true))
2626 goto link;
2627
2628 /*
2629 * There was no extent directly to the left or right of this new
2630 * extent then we know we're going to have to allocate a new extent, so
2631 * before we do that see if we need to drop this into a bitmap
2632 */
2633 ret = insert_into_bitmap(ctl, info);
2634 if (ret < 0) {
2635 goto out;
2636 } else if (ret) {
2637 ret = 0;
2638 goto out;
2639 }
2640 link:
2641 /*
2642 * Only steal free space from adjacent bitmaps if we're sure we're not
2643 * going to add the new free space to existing bitmap entries - because
2644 * that would mean unnecessary work that would be reverted. Therefore
2645 * attempt to steal space from bitmaps if we're adding an extent entry.
2646 */
2647 steal_from_bitmap(ctl, info, true);
2648
2649 filter_bytes = max(filter_bytes, info->bytes);
2650
2651 ret = link_free_space(ctl, info);
2652 if (ret)
2653 kmem_cache_free(btrfs_free_space_cachep, info);
2654 out:
2655 btrfs_discard_update_discardable(block_group);
2656 spin_unlock(&ctl->tree_lock);
2657
2658 if (ret) {
2659 btrfs_crit(fs_info, "unable to add free space :%d", ret);
2660 ASSERT(ret != -EEXIST);
2661 }
2662
2663 if (trim_state != BTRFS_TRIM_STATE_TRIMMED) {
2664 btrfs_discard_check_filter(block_group, filter_bytes);
2665 btrfs_discard_queue_work(&fs_info->discard_ctl, block_group);
2666 }
2667
2668 return ret;
2669 }
2670
__btrfs_add_free_space_zoned(struct btrfs_block_group * block_group,u64 bytenr,u64 size,bool used)2671 static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
2672 u64 bytenr, u64 size, bool used)
2673 {
2674 struct btrfs_space_info *sinfo = block_group->space_info;
2675 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2676 u64 offset = bytenr - block_group->start;
2677 u64 to_free, to_unusable;
2678 int bg_reclaim_threshold = 0;
2679 bool initial = (size == block_group->length);
2680 u64 reclaimable_unusable;
2681
2682 WARN_ON(!initial && offset + size > block_group->zone_capacity);
2683
2684 if (!initial)
2685 bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
2686
2687 spin_lock(&ctl->tree_lock);
2688 if (!used)
2689 to_free = size;
2690 else if (initial)
2691 to_free = block_group->zone_capacity;
2692 else if (offset >= block_group->alloc_offset)
2693 to_free = size;
2694 else if (offset + size <= block_group->alloc_offset)
2695 to_free = 0;
2696 else
2697 to_free = offset + size - block_group->alloc_offset;
2698 to_unusable = size - to_free;
2699
2700 ctl->free_space += to_free;
2701 /*
2702 * If the block group is read-only, we should account freed space into
2703 * bytes_readonly.
2704 */
2705 if (!block_group->ro)
2706 block_group->zone_unusable += to_unusable;
2707 spin_unlock(&ctl->tree_lock);
2708 if (!used) {
2709 spin_lock(&block_group->lock);
2710 block_group->alloc_offset -= size;
2711 spin_unlock(&block_group->lock);
2712 }
2713
2714 reclaimable_unusable = block_group->zone_unusable -
2715 (block_group->length - block_group->zone_capacity);
2716 /* All the region is now unusable. Mark it as unused and reclaim */
2717 if (block_group->zone_unusable == block_group->length) {
2718 btrfs_mark_bg_unused(block_group);
2719 } else if (bg_reclaim_threshold &&
2720 reclaimable_unusable >=
2721 div_factor_fine(block_group->zone_capacity,
2722 bg_reclaim_threshold)) {
2723 btrfs_mark_bg_to_reclaim(block_group);
2724 }
2725
2726 return 0;
2727 }
2728
btrfs_add_free_space(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2729 int btrfs_add_free_space(struct btrfs_block_group *block_group,
2730 u64 bytenr, u64 size)
2731 {
2732 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2733
2734 if (btrfs_is_zoned(block_group->fs_info))
2735 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2736 true);
2737
2738 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC))
2739 trim_state = BTRFS_TRIM_STATE_TRIMMED;
2740
2741 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2742 }
2743
btrfs_add_free_space_unused(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2744 int btrfs_add_free_space_unused(struct btrfs_block_group *block_group,
2745 u64 bytenr, u64 size)
2746 {
2747 if (btrfs_is_zoned(block_group->fs_info))
2748 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2749 false);
2750
2751 return btrfs_add_free_space(block_group, bytenr, size);
2752 }
2753
2754 /*
2755 * This is a subtle distinction because when adding free space back in general,
2756 * we want it to be added as untrimmed for async. But in the case where we add
2757 * it on loading of a block group, we want to consider it trimmed.
2758 */
btrfs_add_free_space_async_trimmed(struct btrfs_block_group * block_group,u64 bytenr,u64 size)2759 int btrfs_add_free_space_async_trimmed(struct btrfs_block_group *block_group,
2760 u64 bytenr, u64 size)
2761 {
2762 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
2763
2764 if (btrfs_is_zoned(block_group->fs_info))
2765 return __btrfs_add_free_space_zoned(block_group, bytenr, size,
2766 true);
2767
2768 if (btrfs_test_opt(block_group->fs_info, DISCARD_SYNC) ||
2769 btrfs_test_opt(block_group->fs_info, DISCARD_ASYNC))
2770 trim_state = BTRFS_TRIM_STATE_TRIMMED;
2771
2772 return __btrfs_add_free_space(block_group, bytenr, size, trim_state);
2773 }
2774
btrfs_remove_free_space(struct btrfs_block_group * block_group,u64 offset,u64 bytes)2775 int btrfs_remove_free_space(struct btrfs_block_group *block_group,
2776 u64 offset, u64 bytes)
2777 {
2778 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2779 struct btrfs_free_space *info;
2780 int ret;
2781 bool re_search = false;
2782
2783 if (btrfs_is_zoned(block_group->fs_info)) {
2784 /*
2785 * This can happen with conventional zones when replaying log.
2786 * Since the allocation info of tree-log nodes are not recorded
2787 * to the extent-tree, calculate_alloc_pointer() failed to
2788 * advance the allocation pointer after last allocated tree log
2789 * node blocks.
2790 *
2791 * This function is called from
2792 * btrfs_pin_extent_for_log_replay() when replaying the log.
2793 * Advance the pointer not to overwrite the tree-log nodes.
2794 */
2795 if (block_group->start + block_group->alloc_offset <
2796 offset + bytes) {
2797 block_group->alloc_offset =
2798 offset + bytes - block_group->start;
2799 }
2800 return 0;
2801 }
2802
2803 spin_lock(&ctl->tree_lock);
2804
2805 again:
2806 ret = 0;
2807 if (!bytes)
2808 goto out_lock;
2809
2810 info = tree_search_offset(ctl, offset, 0, 0);
2811 if (!info) {
2812 /*
2813 * oops didn't find an extent that matched the space we wanted
2814 * to remove, look for a bitmap instead
2815 */
2816 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
2817 1, 0);
2818 if (!info) {
2819 /*
2820 * If we found a partial bit of our free space in a
2821 * bitmap but then couldn't find the other part this may
2822 * be a problem, so WARN about it.
2823 */
2824 WARN_ON(re_search);
2825 goto out_lock;
2826 }
2827 }
2828
2829 re_search = false;
2830 if (!info->bitmap) {
2831 unlink_free_space(ctl, info, true);
2832 if (offset == info->offset) {
2833 u64 to_free = min(bytes, info->bytes);
2834
2835 info->bytes -= to_free;
2836 info->offset += to_free;
2837 if (info->bytes) {
2838 ret = link_free_space(ctl, info);
2839 WARN_ON(ret);
2840 } else {
2841 kmem_cache_free(btrfs_free_space_cachep, info);
2842 }
2843
2844 offset += to_free;
2845 bytes -= to_free;
2846 goto again;
2847 } else {
2848 u64 old_end = info->bytes + info->offset;
2849
2850 info->bytes = offset - info->offset;
2851 ret = link_free_space(ctl, info);
2852 WARN_ON(ret);
2853 if (ret)
2854 goto out_lock;
2855
2856 /* Not enough bytes in this entry to satisfy us */
2857 if (old_end < offset + bytes) {
2858 bytes -= old_end - offset;
2859 offset = old_end;
2860 goto again;
2861 } else if (old_end == offset + bytes) {
2862 /* all done */
2863 goto out_lock;
2864 }
2865 spin_unlock(&ctl->tree_lock);
2866
2867 ret = __btrfs_add_free_space(block_group,
2868 offset + bytes,
2869 old_end - (offset + bytes),
2870 info->trim_state);
2871 WARN_ON(ret);
2872 goto out;
2873 }
2874 }
2875
2876 ret = remove_from_bitmap(ctl, info, &offset, &bytes);
2877 if (ret == -EAGAIN) {
2878 re_search = true;
2879 goto again;
2880 }
2881 out_lock:
2882 btrfs_discard_update_discardable(block_group);
2883 spin_unlock(&ctl->tree_lock);
2884 out:
2885 return ret;
2886 }
2887
btrfs_dump_free_space(struct btrfs_block_group * block_group,u64 bytes)2888 void btrfs_dump_free_space(struct btrfs_block_group *block_group,
2889 u64 bytes)
2890 {
2891 struct btrfs_fs_info *fs_info = block_group->fs_info;
2892 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2893 struct btrfs_free_space *info;
2894 struct rb_node *n;
2895 int count = 0;
2896
2897 /*
2898 * Zoned btrfs does not use free space tree and cluster. Just print
2899 * out the free space after the allocation offset.
2900 */
2901 if (btrfs_is_zoned(fs_info)) {
2902 btrfs_info(fs_info, "free space %llu active %d",
2903 block_group->zone_capacity - block_group->alloc_offset,
2904 test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
2905 &block_group->runtime_flags));
2906 return;
2907 }
2908
2909 spin_lock(&ctl->tree_lock);
2910 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
2911 info = rb_entry(n, struct btrfs_free_space, offset_index);
2912 if (info->bytes >= bytes && !block_group->ro)
2913 count++;
2914 btrfs_crit(fs_info, "entry offset %llu, bytes %llu, bitmap %s",
2915 info->offset, info->bytes,
2916 (info->bitmap) ? "yes" : "no");
2917 }
2918 spin_unlock(&ctl->tree_lock);
2919 btrfs_info(fs_info, "block group has cluster?: %s",
2920 list_empty(&block_group->cluster_list) ? "no" : "yes");
2921 btrfs_info(fs_info,
2922 "%d blocks of free space at or bigger than bytes is", count);
2923 }
2924
btrfs_init_free_space_ctl(struct btrfs_block_group * block_group,struct btrfs_free_space_ctl * ctl)2925 void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group,
2926 struct btrfs_free_space_ctl *ctl)
2927 {
2928 struct btrfs_fs_info *fs_info = block_group->fs_info;
2929
2930 spin_lock_init(&ctl->tree_lock);
2931 ctl->unit = fs_info->sectorsize;
2932 ctl->start = block_group->start;
2933 ctl->block_group = block_group;
2934 ctl->op = &free_space_op;
2935 ctl->free_space_bytes = RB_ROOT_CACHED;
2936 INIT_LIST_HEAD(&ctl->trimming_ranges);
2937 mutex_init(&ctl->cache_writeout_mutex);
2938
2939 /*
2940 * we only want to have 32k of ram per block group for keeping
2941 * track of free space, and if we pass 1/2 of that we want to
2942 * start converting things over to using bitmaps
2943 */
2944 ctl->extents_thresh = (SZ_32K / 2) / sizeof(struct btrfs_free_space);
2945 }
2946
2947 /*
2948 * for a given cluster, put all of its extents back into the free
2949 * space cache. If the block group passed doesn't match the block group
2950 * pointed to by the cluster, someone else raced in and freed the
2951 * cluster already. In that case, we just return without changing anything
2952 */
__btrfs_return_cluster_to_free_space(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster)2953 static void __btrfs_return_cluster_to_free_space(
2954 struct btrfs_block_group *block_group,
2955 struct btrfs_free_cluster *cluster)
2956 {
2957 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2958 struct btrfs_free_space *entry;
2959 struct rb_node *node;
2960
2961 spin_lock(&cluster->lock);
2962 if (cluster->block_group != block_group) {
2963 spin_unlock(&cluster->lock);
2964 return;
2965 }
2966
2967 cluster->block_group = NULL;
2968 cluster->window_start = 0;
2969 list_del_init(&cluster->block_group_list);
2970
2971 node = rb_first(&cluster->root);
2972 while (node) {
2973 bool bitmap;
2974
2975 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2976 node = rb_next(&entry->offset_index);
2977 rb_erase(&entry->offset_index, &cluster->root);
2978 RB_CLEAR_NODE(&entry->offset_index);
2979
2980 bitmap = (entry->bitmap != NULL);
2981 if (!bitmap) {
2982 /* Merging treats extents as if they were new */
2983 if (!btrfs_free_space_trimmed(entry)) {
2984 ctl->discardable_extents[BTRFS_STAT_CURR]--;
2985 ctl->discardable_bytes[BTRFS_STAT_CURR] -=
2986 entry->bytes;
2987 }
2988
2989 try_merge_free_space(ctl, entry, false);
2990 steal_from_bitmap(ctl, entry, false);
2991
2992 /* As we insert directly, update these statistics */
2993 if (!btrfs_free_space_trimmed(entry)) {
2994 ctl->discardable_extents[BTRFS_STAT_CURR]++;
2995 ctl->discardable_bytes[BTRFS_STAT_CURR] +=
2996 entry->bytes;
2997 }
2998 }
2999 tree_insert_offset(&ctl->free_space_offset,
3000 entry->offset, &entry->offset_index, bitmap);
3001 rb_add_cached(&entry->bytes_index, &ctl->free_space_bytes,
3002 entry_less);
3003 }
3004 cluster->root = RB_ROOT;
3005 spin_unlock(&cluster->lock);
3006 btrfs_put_block_group(block_group);
3007 }
3008
btrfs_remove_free_space_cache(struct btrfs_block_group * block_group)3009 void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
3010 {
3011 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3012 struct btrfs_free_cluster *cluster;
3013 struct list_head *head;
3014
3015 spin_lock(&ctl->tree_lock);
3016 while ((head = block_group->cluster_list.next) !=
3017 &block_group->cluster_list) {
3018 cluster = list_entry(head, struct btrfs_free_cluster,
3019 block_group_list);
3020
3021 WARN_ON(cluster->block_group != block_group);
3022 __btrfs_return_cluster_to_free_space(block_group, cluster);
3023
3024 cond_resched_lock(&ctl->tree_lock);
3025 }
3026 __btrfs_remove_free_space_cache(ctl);
3027 btrfs_discard_update_discardable(block_group);
3028 spin_unlock(&ctl->tree_lock);
3029
3030 }
3031
3032 /**
3033 * btrfs_is_free_space_trimmed - see if everything is trimmed
3034 * @block_group: block_group of interest
3035 *
3036 * Walk @block_group's free space rb_tree to determine if everything is trimmed.
3037 */
btrfs_is_free_space_trimmed(struct btrfs_block_group * block_group)3038 bool btrfs_is_free_space_trimmed(struct btrfs_block_group *block_group)
3039 {
3040 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3041 struct btrfs_free_space *info;
3042 struct rb_node *node;
3043 bool ret = true;
3044
3045 spin_lock(&ctl->tree_lock);
3046 node = rb_first(&ctl->free_space_offset);
3047
3048 while (node) {
3049 info = rb_entry(node, struct btrfs_free_space, offset_index);
3050
3051 if (!btrfs_free_space_trimmed(info)) {
3052 ret = false;
3053 break;
3054 }
3055
3056 node = rb_next(node);
3057 }
3058
3059 spin_unlock(&ctl->tree_lock);
3060 return ret;
3061 }
3062
btrfs_find_space_for_alloc(struct btrfs_block_group * block_group,u64 offset,u64 bytes,u64 empty_size,u64 * max_extent_size)3063 u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
3064 u64 offset, u64 bytes, u64 empty_size,
3065 u64 *max_extent_size)
3066 {
3067 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3068 struct btrfs_discard_ctl *discard_ctl =
3069 &block_group->fs_info->discard_ctl;
3070 struct btrfs_free_space *entry = NULL;
3071 u64 bytes_search = bytes + empty_size;
3072 u64 ret = 0;
3073 u64 align_gap = 0;
3074 u64 align_gap_len = 0;
3075 enum btrfs_trim_state align_gap_trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3076 bool use_bytes_index = (offset == block_group->start);
3077
3078 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3079
3080 spin_lock(&ctl->tree_lock);
3081 entry = find_free_space(ctl, &offset, &bytes_search,
3082 block_group->full_stripe_len, max_extent_size,
3083 use_bytes_index);
3084 if (!entry)
3085 goto out;
3086
3087 ret = offset;
3088 if (entry->bitmap) {
3089 bitmap_clear_bits(ctl, entry, offset, bytes, true);
3090
3091 if (!btrfs_free_space_trimmed(entry))
3092 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3093
3094 if (!entry->bytes)
3095 free_bitmap(ctl, entry);
3096 } else {
3097 unlink_free_space(ctl, entry, true);
3098 align_gap_len = offset - entry->offset;
3099 align_gap = entry->offset;
3100 align_gap_trim_state = entry->trim_state;
3101
3102 if (!btrfs_free_space_trimmed(entry))
3103 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3104
3105 entry->offset = offset + bytes;
3106 WARN_ON(entry->bytes < bytes + align_gap_len);
3107
3108 entry->bytes -= bytes + align_gap_len;
3109 if (!entry->bytes)
3110 kmem_cache_free(btrfs_free_space_cachep, entry);
3111 else
3112 link_free_space(ctl, entry);
3113 }
3114 out:
3115 btrfs_discard_update_discardable(block_group);
3116 spin_unlock(&ctl->tree_lock);
3117
3118 if (align_gap_len)
3119 __btrfs_add_free_space(block_group, align_gap, align_gap_len,
3120 align_gap_trim_state);
3121 return ret;
3122 }
3123
3124 /*
3125 * given a cluster, put all of its extents back into the free space
3126 * cache. If a block group is passed, this function will only free
3127 * a cluster that belongs to the passed block group.
3128 *
3129 * Otherwise, it'll get a reference on the block group pointed to by the
3130 * cluster and remove the cluster from it.
3131 */
btrfs_return_cluster_to_free_space(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster)3132 void btrfs_return_cluster_to_free_space(
3133 struct btrfs_block_group *block_group,
3134 struct btrfs_free_cluster *cluster)
3135 {
3136 struct btrfs_free_space_ctl *ctl;
3137
3138 /* first, get a safe pointer to the block group */
3139 spin_lock(&cluster->lock);
3140 if (!block_group) {
3141 block_group = cluster->block_group;
3142 if (!block_group) {
3143 spin_unlock(&cluster->lock);
3144 return;
3145 }
3146 } else if (cluster->block_group != block_group) {
3147 /* someone else has already freed it don't redo their work */
3148 spin_unlock(&cluster->lock);
3149 return;
3150 }
3151 btrfs_get_block_group(block_group);
3152 spin_unlock(&cluster->lock);
3153
3154 ctl = block_group->free_space_ctl;
3155
3156 /* now return any extents the cluster had on it */
3157 spin_lock(&ctl->tree_lock);
3158 __btrfs_return_cluster_to_free_space(block_group, cluster);
3159 spin_unlock(&ctl->tree_lock);
3160
3161 btrfs_discard_queue_work(&block_group->fs_info->discard_ctl, block_group);
3162
3163 /* finally drop our ref */
3164 btrfs_put_block_group(block_group);
3165 }
3166
btrfs_alloc_from_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct btrfs_free_space * entry,u64 bytes,u64 min_start,u64 * max_extent_size)3167 static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group *block_group,
3168 struct btrfs_free_cluster *cluster,
3169 struct btrfs_free_space *entry,
3170 u64 bytes, u64 min_start,
3171 u64 *max_extent_size)
3172 {
3173 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3174 int err;
3175 u64 search_start = cluster->window_start;
3176 u64 search_bytes = bytes;
3177 u64 ret = 0;
3178
3179 search_start = min_start;
3180 search_bytes = bytes;
3181
3182 err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
3183 if (err) {
3184 *max_extent_size = max(get_max_extent_size(entry),
3185 *max_extent_size);
3186 return 0;
3187 }
3188
3189 ret = search_start;
3190 bitmap_clear_bits(ctl, entry, ret, bytes, false);
3191
3192 return ret;
3193 }
3194
3195 /*
3196 * given a cluster, try to allocate 'bytes' from it, returns 0
3197 * if it couldn't find anything suitably large, or a logical disk offset
3198 * if things worked out
3199 */
btrfs_alloc_from_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,u64 bytes,u64 min_start,u64 * max_extent_size)3200 u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
3201 struct btrfs_free_cluster *cluster, u64 bytes,
3202 u64 min_start, u64 *max_extent_size)
3203 {
3204 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3205 struct btrfs_discard_ctl *discard_ctl =
3206 &block_group->fs_info->discard_ctl;
3207 struct btrfs_free_space *entry = NULL;
3208 struct rb_node *node;
3209 u64 ret = 0;
3210
3211 ASSERT(!btrfs_is_zoned(block_group->fs_info));
3212
3213 spin_lock(&cluster->lock);
3214 if (bytes > cluster->max_size)
3215 goto out;
3216
3217 if (cluster->block_group != block_group)
3218 goto out;
3219
3220 node = rb_first(&cluster->root);
3221 if (!node)
3222 goto out;
3223
3224 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3225 while (1) {
3226 if (entry->bytes < bytes)
3227 *max_extent_size = max(get_max_extent_size(entry),
3228 *max_extent_size);
3229
3230 if (entry->bytes < bytes ||
3231 (!entry->bitmap && entry->offset < min_start)) {
3232 node = rb_next(&entry->offset_index);
3233 if (!node)
3234 break;
3235 entry = rb_entry(node, struct btrfs_free_space,
3236 offset_index);
3237 continue;
3238 }
3239
3240 if (entry->bitmap) {
3241 ret = btrfs_alloc_from_bitmap(block_group,
3242 cluster, entry, bytes,
3243 cluster->window_start,
3244 max_extent_size);
3245 if (ret == 0) {
3246 node = rb_next(&entry->offset_index);
3247 if (!node)
3248 break;
3249 entry = rb_entry(node, struct btrfs_free_space,
3250 offset_index);
3251 continue;
3252 }
3253 cluster->window_start += bytes;
3254 } else {
3255 ret = entry->offset;
3256
3257 entry->offset += bytes;
3258 entry->bytes -= bytes;
3259 }
3260
3261 break;
3262 }
3263 out:
3264 spin_unlock(&cluster->lock);
3265
3266 if (!ret)
3267 return 0;
3268
3269 spin_lock(&ctl->tree_lock);
3270
3271 if (!btrfs_free_space_trimmed(entry))
3272 atomic64_add(bytes, &discard_ctl->discard_bytes_saved);
3273
3274 ctl->free_space -= bytes;
3275 if (!entry->bitmap && !btrfs_free_space_trimmed(entry))
3276 ctl->discardable_bytes[BTRFS_STAT_CURR] -= bytes;
3277
3278 spin_lock(&cluster->lock);
3279 if (entry->bytes == 0) {
3280 rb_erase(&entry->offset_index, &cluster->root);
3281 ctl->free_extents--;
3282 if (entry->bitmap) {
3283 kmem_cache_free(btrfs_free_space_bitmap_cachep,
3284 entry->bitmap);
3285 ctl->total_bitmaps--;
3286 recalculate_thresholds(ctl);
3287 } else if (!btrfs_free_space_trimmed(entry)) {
3288 ctl->discardable_extents[BTRFS_STAT_CURR]--;
3289 }
3290 kmem_cache_free(btrfs_free_space_cachep, entry);
3291 }
3292
3293 spin_unlock(&cluster->lock);
3294 spin_unlock(&ctl->tree_lock);
3295
3296 return ret;
3297 }
3298
btrfs_bitmap_cluster(struct btrfs_block_group * block_group,struct btrfs_free_space * entry,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3299 static int btrfs_bitmap_cluster(struct btrfs_block_group *block_group,
3300 struct btrfs_free_space *entry,
3301 struct btrfs_free_cluster *cluster,
3302 u64 offset, u64 bytes,
3303 u64 cont1_bytes, u64 min_bytes)
3304 {
3305 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3306 unsigned long next_zero;
3307 unsigned long i;
3308 unsigned long want_bits;
3309 unsigned long min_bits;
3310 unsigned long found_bits;
3311 unsigned long max_bits = 0;
3312 unsigned long start = 0;
3313 unsigned long total_found = 0;
3314 int ret;
3315
3316 i = offset_to_bit(entry->offset, ctl->unit,
3317 max_t(u64, offset, entry->offset));
3318 want_bits = bytes_to_bits(bytes, ctl->unit);
3319 min_bits = bytes_to_bits(min_bytes, ctl->unit);
3320
3321 /*
3322 * Don't bother looking for a cluster in this bitmap if it's heavily
3323 * fragmented.
3324 */
3325 if (entry->max_extent_size &&
3326 entry->max_extent_size < cont1_bytes)
3327 return -ENOSPC;
3328 again:
3329 found_bits = 0;
3330 for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
3331 next_zero = find_next_zero_bit(entry->bitmap,
3332 BITS_PER_BITMAP, i);
3333 if (next_zero - i >= min_bits) {
3334 found_bits = next_zero - i;
3335 if (found_bits > max_bits)
3336 max_bits = found_bits;
3337 break;
3338 }
3339 if (next_zero - i > max_bits)
3340 max_bits = next_zero - i;
3341 i = next_zero;
3342 }
3343
3344 if (!found_bits) {
3345 entry->max_extent_size = (u64)max_bits * ctl->unit;
3346 return -ENOSPC;
3347 }
3348
3349 if (!total_found) {
3350 start = i;
3351 cluster->max_size = 0;
3352 }
3353
3354 total_found += found_bits;
3355
3356 if (cluster->max_size < found_bits * ctl->unit)
3357 cluster->max_size = found_bits * ctl->unit;
3358
3359 if (total_found < want_bits || cluster->max_size < cont1_bytes) {
3360 i = next_zero + 1;
3361 goto again;
3362 }
3363
3364 cluster->window_start = start * ctl->unit + entry->offset;
3365 rb_erase(&entry->offset_index, &ctl->free_space_offset);
3366 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3367
3368 /*
3369 * We need to know if we're currently on the normal space index when we
3370 * manipulate the bitmap so that we know we need to remove and re-insert
3371 * it into the space_index tree. Clear the bytes_index node here so the
3372 * bitmap manipulation helpers know not to mess with the space_index
3373 * until this bitmap entry is added back into the normal cache.
3374 */
3375 RB_CLEAR_NODE(&entry->bytes_index);
3376
3377 ret = tree_insert_offset(&cluster->root, entry->offset,
3378 &entry->offset_index, 1);
3379 ASSERT(!ret); /* -EEXIST; Logic error */
3380
3381 trace_btrfs_setup_cluster(block_group, cluster,
3382 total_found * ctl->unit, 1);
3383 return 0;
3384 }
3385
3386 /*
3387 * This searches the block group for just extents to fill the cluster with.
3388 * Try to find a cluster with at least bytes total bytes, at least one
3389 * extent of cont1_bytes, and other clusters of at least min_bytes.
3390 */
3391 static noinline int
setup_cluster_no_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3392 setup_cluster_no_bitmap(struct btrfs_block_group *block_group,
3393 struct btrfs_free_cluster *cluster,
3394 struct list_head *bitmaps, u64 offset, u64 bytes,
3395 u64 cont1_bytes, u64 min_bytes)
3396 {
3397 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3398 struct btrfs_free_space *first = NULL;
3399 struct btrfs_free_space *entry = NULL;
3400 struct btrfs_free_space *last;
3401 struct rb_node *node;
3402 u64 window_free;
3403 u64 max_extent;
3404 u64 total_size = 0;
3405
3406 entry = tree_search_offset(ctl, offset, 0, 1);
3407 if (!entry)
3408 return -ENOSPC;
3409
3410 /*
3411 * We don't want bitmaps, so just move along until we find a normal
3412 * extent entry.
3413 */
3414 while (entry->bitmap || entry->bytes < min_bytes) {
3415 if (entry->bitmap && list_empty(&entry->list))
3416 list_add_tail(&entry->list, bitmaps);
3417 node = rb_next(&entry->offset_index);
3418 if (!node)
3419 return -ENOSPC;
3420 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3421 }
3422
3423 window_free = entry->bytes;
3424 max_extent = entry->bytes;
3425 first = entry;
3426 last = entry;
3427
3428 for (node = rb_next(&entry->offset_index); node;
3429 node = rb_next(&entry->offset_index)) {
3430 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3431
3432 if (entry->bitmap) {
3433 if (list_empty(&entry->list))
3434 list_add_tail(&entry->list, bitmaps);
3435 continue;
3436 }
3437
3438 if (entry->bytes < min_bytes)
3439 continue;
3440
3441 last = entry;
3442 window_free += entry->bytes;
3443 if (entry->bytes > max_extent)
3444 max_extent = entry->bytes;
3445 }
3446
3447 if (window_free < bytes || max_extent < cont1_bytes)
3448 return -ENOSPC;
3449
3450 cluster->window_start = first->offset;
3451
3452 node = &first->offset_index;
3453
3454 /*
3455 * now we've found our entries, pull them out of the free space
3456 * cache and put them into the cluster rbtree
3457 */
3458 do {
3459 int ret;
3460
3461 entry = rb_entry(node, struct btrfs_free_space, offset_index);
3462 node = rb_next(&entry->offset_index);
3463 if (entry->bitmap || entry->bytes < min_bytes)
3464 continue;
3465
3466 rb_erase(&entry->offset_index, &ctl->free_space_offset);
3467 rb_erase_cached(&entry->bytes_index, &ctl->free_space_bytes);
3468 ret = tree_insert_offset(&cluster->root, entry->offset,
3469 &entry->offset_index, 0);
3470 total_size += entry->bytes;
3471 ASSERT(!ret); /* -EEXIST; Logic error */
3472 } while (node && entry != last);
3473
3474 cluster->max_size = max_extent;
3475 trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
3476 return 0;
3477 }
3478
3479 /*
3480 * This specifically looks for bitmaps that may work in the cluster, we assume
3481 * that we have already failed to find extents that will work.
3482 */
3483 static noinline int
setup_cluster_bitmap(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,struct list_head * bitmaps,u64 offset,u64 bytes,u64 cont1_bytes,u64 min_bytes)3484 setup_cluster_bitmap(struct btrfs_block_group *block_group,
3485 struct btrfs_free_cluster *cluster,
3486 struct list_head *bitmaps, u64 offset, u64 bytes,
3487 u64 cont1_bytes, u64 min_bytes)
3488 {
3489 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3490 struct btrfs_free_space *entry = NULL;
3491 int ret = -ENOSPC;
3492 u64 bitmap_offset = offset_to_bitmap(ctl, offset);
3493
3494 if (ctl->total_bitmaps == 0)
3495 return -ENOSPC;
3496
3497 /*
3498 * The bitmap that covers offset won't be in the list unless offset
3499 * is just its start offset.
3500 */
3501 if (!list_empty(bitmaps))
3502 entry = list_first_entry(bitmaps, struct btrfs_free_space, list);
3503
3504 if (!entry || entry->offset != bitmap_offset) {
3505 entry = tree_search_offset(ctl, bitmap_offset, 1, 0);
3506 if (entry && list_empty(&entry->list))
3507 list_add(&entry->list, bitmaps);
3508 }
3509
3510 list_for_each_entry(entry, bitmaps, list) {
3511 if (entry->bytes < bytes)
3512 continue;
3513 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
3514 bytes, cont1_bytes, min_bytes);
3515 if (!ret)
3516 return 0;
3517 }
3518
3519 /*
3520 * The bitmaps list has all the bitmaps that record free space
3521 * starting after offset, so no more search is required.
3522 */
3523 return -ENOSPC;
3524 }
3525
3526 /*
3527 * here we try to find a cluster of blocks in a block group. The goal
3528 * is to find at least bytes+empty_size.
3529 * We might not find them all in one contiguous area.
3530 *
3531 * returns zero and sets up cluster if things worked out, otherwise
3532 * it returns -enospc
3533 */
btrfs_find_space_cluster(struct btrfs_block_group * block_group,struct btrfs_free_cluster * cluster,u64 offset,u64 bytes,u64 empty_size)3534 int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
3535 struct btrfs_free_cluster *cluster,
3536 u64 offset, u64 bytes, u64 empty_size)
3537 {
3538 struct btrfs_fs_info *fs_info = block_group->fs_info;
3539 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3540 struct btrfs_free_space *entry, *tmp;
3541 LIST_HEAD(bitmaps);
3542 u64 min_bytes;
3543 u64 cont1_bytes;
3544 int ret;
3545
3546 /*
3547 * Choose the minimum extent size we'll require for this
3548 * cluster. For SSD_SPREAD, don't allow any fragmentation.
3549 * For metadata, allow allocates with smaller extents. For
3550 * data, keep it dense.
3551 */
3552 if (btrfs_test_opt(fs_info, SSD_SPREAD)) {
3553 cont1_bytes = bytes + empty_size;
3554 min_bytes = cont1_bytes;
3555 } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
3556 cont1_bytes = bytes;
3557 min_bytes = fs_info->sectorsize;
3558 } else {
3559 cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
3560 min_bytes = fs_info->sectorsize;
3561 }
3562
3563 spin_lock(&ctl->tree_lock);
3564
3565 /*
3566 * If we know we don't have enough space to make a cluster don't even
3567 * bother doing all the work to try and find one.
3568 */
3569 if (ctl->free_space < bytes) {
3570 spin_unlock(&ctl->tree_lock);
3571 return -ENOSPC;
3572 }
3573
3574 spin_lock(&cluster->lock);
3575
3576 /* someone already found a cluster, hooray */
3577 if (cluster->block_group) {
3578 ret = 0;
3579 goto out;
3580 }
3581
3582 trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
3583 min_bytes);
3584
3585 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
3586 bytes + empty_size,
3587 cont1_bytes, min_bytes);
3588 if (ret)
3589 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
3590 offset, bytes + empty_size,
3591 cont1_bytes, min_bytes);
3592
3593 /* Clear our temporary list */
3594 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
3595 list_del_init(&entry->list);
3596
3597 if (!ret) {
3598 btrfs_get_block_group(block_group);
3599 list_add_tail(&cluster->block_group_list,
3600 &block_group->cluster_list);
3601 cluster->block_group = block_group;
3602 } else {
3603 trace_btrfs_failed_cluster_setup(block_group);
3604 }
3605 out:
3606 spin_unlock(&cluster->lock);
3607 spin_unlock(&ctl->tree_lock);
3608
3609 return ret;
3610 }
3611
3612 /*
3613 * simple code to zero out a cluster
3614 */
btrfs_init_free_cluster(struct btrfs_free_cluster * cluster)3615 void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
3616 {
3617 spin_lock_init(&cluster->lock);
3618 spin_lock_init(&cluster->refill_lock);
3619 cluster->root = RB_ROOT;
3620 cluster->max_size = 0;
3621 cluster->fragmented = false;
3622 INIT_LIST_HEAD(&cluster->block_group_list);
3623 cluster->block_group = NULL;
3624 }
3625
do_trimming(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 bytes,u64 reserved_start,u64 reserved_bytes,enum btrfs_trim_state reserved_trim_state,struct btrfs_trim_range * trim_entry)3626 static int do_trimming(struct btrfs_block_group *block_group,
3627 u64 *total_trimmed, u64 start, u64 bytes,
3628 u64 reserved_start, u64 reserved_bytes,
3629 enum btrfs_trim_state reserved_trim_state,
3630 struct btrfs_trim_range *trim_entry)
3631 {
3632 struct btrfs_space_info *space_info = block_group->space_info;
3633 struct btrfs_fs_info *fs_info = block_group->fs_info;
3634 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3635 int ret;
3636 int update = 0;
3637 const u64 end = start + bytes;
3638 const u64 reserved_end = reserved_start + reserved_bytes;
3639 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3640 u64 trimmed = 0;
3641
3642 spin_lock(&space_info->lock);
3643 spin_lock(&block_group->lock);
3644 if (!block_group->ro) {
3645 block_group->reserved += reserved_bytes;
3646 space_info->bytes_reserved += reserved_bytes;
3647 update = 1;
3648 }
3649 spin_unlock(&block_group->lock);
3650 spin_unlock(&space_info->lock);
3651
3652 ret = btrfs_discard_extent(fs_info, start, bytes, &trimmed);
3653 if (!ret) {
3654 *total_trimmed += trimmed;
3655 trim_state = BTRFS_TRIM_STATE_TRIMMED;
3656 }
3657
3658 mutex_lock(&ctl->cache_writeout_mutex);
3659 if (reserved_start < start)
3660 __btrfs_add_free_space(block_group, reserved_start,
3661 start - reserved_start,
3662 reserved_trim_state);
3663 if (start + bytes < reserved_start + reserved_bytes)
3664 __btrfs_add_free_space(block_group, end, reserved_end - end,
3665 reserved_trim_state);
3666 __btrfs_add_free_space(block_group, start, bytes, trim_state);
3667 list_del(&trim_entry->list);
3668 mutex_unlock(&ctl->cache_writeout_mutex);
3669
3670 if (update) {
3671 spin_lock(&space_info->lock);
3672 spin_lock(&block_group->lock);
3673 if (block_group->ro)
3674 space_info->bytes_readonly += reserved_bytes;
3675 block_group->reserved -= reserved_bytes;
3676 space_info->bytes_reserved -= reserved_bytes;
3677 spin_unlock(&block_group->lock);
3678 spin_unlock(&space_info->lock);
3679 }
3680
3681 return ret;
3682 }
3683
3684 /*
3685 * If @async is set, then we will trim 1 region and return.
3686 */
trim_no_bitmap(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen,bool async)3687 static int trim_no_bitmap(struct btrfs_block_group *block_group,
3688 u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3689 bool async)
3690 {
3691 struct btrfs_discard_ctl *discard_ctl =
3692 &block_group->fs_info->discard_ctl;
3693 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3694 struct btrfs_free_space *entry;
3695 struct rb_node *node;
3696 int ret = 0;
3697 u64 extent_start;
3698 u64 extent_bytes;
3699 enum btrfs_trim_state extent_trim_state;
3700 u64 bytes;
3701 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3702
3703 while (start < end) {
3704 struct btrfs_trim_range trim_entry;
3705
3706 mutex_lock(&ctl->cache_writeout_mutex);
3707 spin_lock(&ctl->tree_lock);
3708
3709 if (ctl->free_space < minlen)
3710 goto out_unlock;
3711
3712 entry = tree_search_offset(ctl, start, 0, 1);
3713 if (!entry)
3714 goto out_unlock;
3715
3716 /* Skip bitmaps and if async, already trimmed entries */
3717 while (entry->bitmap ||
3718 (async && btrfs_free_space_trimmed(entry))) {
3719 node = rb_next(&entry->offset_index);
3720 if (!node)
3721 goto out_unlock;
3722 entry = rb_entry(node, struct btrfs_free_space,
3723 offset_index);
3724 }
3725
3726 if (entry->offset >= end)
3727 goto out_unlock;
3728
3729 extent_start = entry->offset;
3730 extent_bytes = entry->bytes;
3731 extent_trim_state = entry->trim_state;
3732 if (async) {
3733 start = entry->offset;
3734 bytes = entry->bytes;
3735 if (bytes < minlen) {
3736 spin_unlock(&ctl->tree_lock);
3737 mutex_unlock(&ctl->cache_writeout_mutex);
3738 goto next;
3739 }
3740 unlink_free_space(ctl, entry, true);
3741 /*
3742 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3743 * If X < BTRFS_ASYNC_DISCARD_MIN_FILTER, we won't trim
3744 * X when we come back around. So trim it now.
3745 */
3746 if (max_discard_size &&
3747 bytes >= (max_discard_size +
3748 BTRFS_ASYNC_DISCARD_MIN_FILTER)) {
3749 bytes = max_discard_size;
3750 extent_bytes = max_discard_size;
3751 entry->offset += max_discard_size;
3752 entry->bytes -= max_discard_size;
3753 link_free_space(ctl, entry);
3754 } else {
3755 kmem_cache_free(btrfs_free_space_cachep, entry);
3756 }
3757 } else {
3758 start = max(start, extent_start);
3759 bytes = min(extent_start + extent_bytes, end) - start;
3760 if (bytes < minlen) {
3761 spin_unlock(&ctl->tree_lock);
3762 mutex_unlock(&ctl->cache_writeout_mutex);
3763 goto next;
3764 }
3765
3766 unlink_free_space(ctl, entry, true);
3767 kmem_cache_free(btrfs_free_space_cachep, entry);
3768 }
3769
3770 spin_unlock(&ctl->tree_lock);
3771 trim_entry.start = extent_start;
3772 trim_entry.bytes = extent_bytes;
3773 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3774 mutex_unlock(&ctl->cache_writeout_mutex);
3775
3776 ret = do_trimming(block_group, total_trimmed, start, bytes,
3777 extent_start, extent_bytes, extent_trim_state,
3778 &trim_entry);
3779 if (ret) {
3780 block_group->discard_cursor = start + bytes;
3781 break;
3782 }
3783 next:
3784 start += bytes;
3785 block_group->discard_cursor = start;
3786 if (async && *total_trimmed)
3787 break;
3788
3789 if (fatal_signal_pending(current)) {
3790 ret = -ERESTARTSYS;
3791 break;
3792 }
3793
3794 cond_resched();
3795 }
3796
3797 return ret;
3798
3799 out_unlock:
3800 block_group->discard_cursor = btrfs_block_group_end(block_group);
3801 spin_unlock(&ctl->tree_lock);
3802 mutex_unlock(&ctl->cache_writeout_mutex);
3803
3804 return ret;
3805 }
3806
3807 /*
3808 * If we break out of trimming a bitmap prematurely, we should reset the
3809 * trimming bit. In a rather contrieved case, it's possible to race here so
3810 * reset the state to BTRFS_TRIM_STATE_UNTRIMMED.
3811 *
3812 * start = start of bitmap
3813 * end = near end of bitmap
3814 *
3815 * Thread 1: Thread 2:
3816 * trim_bitmaps(start)
3817 * trim_bitmaps(end)
3818 * end_trimming_bitmap()
3819 * reset_trimming_bitmap()
3820 */
reset_trimming_bitmap(struct btrfs_free_space_ctl * ctl,u64 offset)3821 static void reset_trimming_bitmap(struct btrfs_free_space_ctl *ctl, u64 offset)
3822 {
3823 struct btrfs_free_space *entry;
3824
3825 spin_lock(&ctl->tree_lock);
3826 entry = tree_search_offset(ctl, offset, 1, 0);
3827 if (entry) {
3828 if (btrfs_free_space_trimmed(entry)) {
3829 ctl->discardable_extents[BTRFS_STAT_CURR] +=
3830 entry->bitmap_extents;
3831 ctl->discardable_bytes[BTRFS_STAT_CURR] += entry->bytes;
3832 }
3833 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3834 }
3835
3836 spin_unlock(&ctl->tree_lock);
3837 }
3838
end_trimming_bitmap(struct btrfs_free_space_ctl * ctl,struct btrfs_free_space * entry)3839 static void end_trimming_bitmap(struct btrfs_free_space_ctl *ctl,
3840 struct btrfs_free_space *entry)
3841 {
3842 if (btrfs_free_space_trimming_bitmap(entry)) {
3843 entry->trim_state = BTRFS_TRIM_STATE_TRIMMED;
3844 ctl->discardable_extents[BTRFS_STAT_CURR] -=
3845 entry->bitmap_extents;
3846 ctl->discardable_bytes[BTRFS_STAT_CURR] -= entry->bytes;
3847 }
3848 }
3849
3850 /*
3851 * If @async is set, then we will trim 1 region and return.
3852 */
trim_bitmaps(struct btrfs_block_group * block_group,u64 * total_trimmed,u64 start,u64 end,u64 minlen,u64 maxlen,bool async)3853 static int trim_bitmaps(struct btrfs_block_group *block_group,
3854 u64 *total_trimmed, u64 start, u64 end, u64 minlen,
3855 u64 maxlen, bool async)
3856 {
3857 struct btrfs_discard_ctl *discard_ctl =
3858 &block_group->fs_info->discard_ctl;
3859 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
3860 struct btrfs_free_space *entry;
3861 int ret = 0;
3862 int ret2;
3863 u64 bytes;
3864 u64 offset = offset_to_bitmap(ctl, start);
3865 const u64 max_discard_size = READ_ONCE(discard_ctl->max_discard_size);
3866
3867 while (offset < end) {
3868 bool next_bitmap = false;
3869 struct btrfs_trim_range trim_entry;
3870
3871 mutex_lock(&ctl->cache_writeout_mutex);
3872 spin_lock(&ctl->tree_lock);
3873
3874 if (ctl->free_space < minlen) {
3875 block_group->discard_cursor =
3876 btrfs_block_group_end(block_group);
3877 spin_unlock(&ctl->tree_lock);
3878 mutex_unlock(&ctl->cache_writeout_mutex);
3879 break;
3880 }
3881
3882 entry = tree_search_offset(ctl, offset, 1, 0);
3883 /*
3884 * Bitmaps are marked trimmed lossily now to prevent constant
3885 * discarding of the same bitmap (the reason why we are bound
3886 * by the filters). So, retrim the block group bitmaps when we
3887 * are preparing to punt to the unused_bgs list. This uses
3888 * @minlen to determine if we are in BTRFS_DISCARD_INDEX_UNUSED
3889 * which is the only discard index which sets minlen to 0.
3890 */
3891 if (!entry || (async && minlen && start == offset &&
3892 btrfs_free_space_trimmed(entry))) {
3893 spin_unlock(&ctl->tree_lock);
3894 mutex_unlock(&ctl->cache_writeout_mutex);
3895 next_bitmap = true;
3896 goto next;
3897 }
3898
3899 /*
3900 * Async discard bitmap trimming begins at by setting the start
3901 * to be key.objectid and the offset_to_bitmap() aligns to the
3902 * start of the bitmap. This lets us know we are fully
3903 * scanning the bitmap rather than only some portion of it.
3904 */
3905 if (start == offset)
3906 entry->trim_state = BTRFS_TRIM_STATE_TRIMMING;
3907
3908 bytes = minlen;
3909 ret2 = search_bitmap(ctl, entry, &start, &bytes, false);
3910 if (ret2 || start >= end) {
3911 /*
3912 * We lossily consider a bitmap trimmed if we only skip
3913 * over regions <= BTRFS_ASYNC_DISCARD_MIN_FILTER.
3914 */
3915 if (ret2 && minlen <= BTRFS_ASYNC_DISCARD_MIN_FILTER)
3916 end_trimming_bitmap(ctl, entry);
3917 else
3918 entry->trim_state = BTRFS_TRIM_STATE_UNTRIMMED;
3919 spin_unlock(&ctl->tree_lock);
3920 mutex_unlock(&ctl->cache_writeout_mutex);
3921 next_bitmap = true;
3922 goto next;
3923 }
3924
3925 /*
3926 * We already trimmed a region, but are using the locking above
3927 * to reset the trim_state.
3928 */
3929 if (async && *total_trimmed) {
3930 spin_unlock(&ctl->tree_lock);
3931 mutex_unlock(&ctl->cache_writeout_mutex);
3932 goto out;
3933 }
3934
3935 bytes = min(bytes, end - start);
3936 if (bytes < minlen || (async && maxlen && bytes > maxlen)) {
3937 spin_unlock(&ctl->tree_lock);
3938 mutex_unlock(&ctl->cache_writeout_mutex);
3939 goto next;
3940 }
3941
3942 /*
3943 * Let bytes = BTRFS_MAX_DISCARD_SIZE + X.
3944 * If X < @minlen, we won't trim X when we come back around.
3945 * So trim it now. We differ here from trimming extents as we
3946 * don't keep individual state per bit.
3947 */
3948 if (async &&
3949 max_discard_size &&
3950 bytes > (max_discard_size + minlen))
3951 bytes = max_discard_size;
3952
3953 bitmap_clear_bits(ctl, entry, start, bytes, true);
3954 if (entry->bytes == 0)
3955 free_bitmap(ctl, entry);
3956
3957 spin_unlock(&ctl->tree_lock);
3958 trim_entry.start = start;
3959 trim_entry.bytes = bytes;
3960 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3961 mutex_unlock(&ctl->cache_writeout_mutex);
3962
3963 ret = do_trimming(block_group, total_trimmed, start, bytes,
3964 start, bytes, 0, &trim_entry);
3965 if (ret) {
3966 reset_trimming_bitmap(ctl, offset);
3967 block_group->discard_cursor =
3968 btrfs_block_group_end(block_group);
3969 break;
3970 }
3971 next:
3972 if (next_bitmap) {
3973 offset += BITS_PER_BITMAP * ctl->unit;
3974 start = offset;
3975 } else {
3976 start += bytes;
3977 }
3978 block_group->discard_cursor = start;
3979
3980 if (fatal_signal_pending(current)) {
3981 if (start != offset)
3982 reset_trimming_bitmap(ctl, offset);
3983 ret = -ERESTARTSYS;
3984 break;
3985 }
3986
3987 cond_resched();
3988 }
3989
3990 if (offset >= end)
3991 block_group->discard_cursor = end;
3992
3993 out:
3994 return ret;
3995 }
3996
btrfs_trim_block_group(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen)3997 int btrfs_trim_block_group(struct btrfs_block_group *block_group,
3998 u64 *trimmed, u64 start, u64 end, u64 minlen)
3999 {
4000 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
4001 int ret;
4002 u64 rem = 0;
4003
4004 ASSERT(!btrfs_is_zoned(block_group->fs_info));
4005
4006 *trimmed = 0;
4007
4008 spin_lock(&block_group->lock);
4009 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4010 spin_unlock(&block_group->lock);
4011 return 0;
4012 }
4013 btrfs_freeze_block_group(block_group);
4014 spin_unlock(&block_group->lock);
4015
4016 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, false);
4017 if (ret)
4018 goto out;
4019
4020 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, 0, false);
4021 div64_u64_rem(end, BITS_PER_BITMAP * ctl->unit, &rem);
4022 /* If we ended in the middle of a bitmap, reset the trimming flag */
4023 if (rem)
4024 reset_trimming_bitmap(ctl, offset_to_bitmap(ctl, end));
4025 out:
4026 btrfs_unfreeze_block_group(block_group);
4027 return ret;
4028 }
4029
btrfs_trim_block_group_extents(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen,bool async)4030 int btrfs_trim_block_group_extents(struct btrfs_block_group *block_group,
4031 u64 *trimmed, u64 start, u64 end, u64 minlen,
4032 bool async)
4033 {
4034 int ret;
4035
4036 *trimmed = 0;
4037
4038 spin_lock(&block_group->lock);
4039 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4040 spin_unlock(&block_group->lock);
4041 return 0;
4042 }
4043 btrfs_freeze_block_group(block_group);
4044 spin_unlock(&block_group->lock);
4045
4046 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen, async);
4047 btrfs_unfreeze_block_group(block_group);
4048
4049 return ret;
4050 }
4051
btrfs_trim_block_group_bitmaps(struct btrfs_block_group * block_group,u64 * trimmed,u64 start,u64 end,u64 minlen,u64 maxlen,bool async)4052 int btrfs_trim_block_group_bitmaps(struct btrfs_block_group *block_group,
4053 u64 *trimmed, u64 start, u64 end, u64 minlen,
4054 u64 maxlen, bool async)
4055 {
4056 int ret;
4057
4058 *trimmed = 0;
4059
4060 spin_lock(&block_group->lock);
4061 if (test_bit(BLOCK_GROUP_FLAG_REMOVED, &block_group->runtime_flags)) {
4062 spin_unlock(&block_group->lock);
4063 return 0;
4064 }
4065 btrfs_freeze_block_group(block_group);
4066 spin_unlock(&block_group->lock);
4067
4068 ret = trim_bitmaps(block_group, trimmed, start, end, minlen, maxlen,
4069 async);
4070
4071 btrfs_unfreeze_block_group(block_group);
4072
4073 return ret;
4074 }
4075
btrfs_free_space_cache_v1_active(struct btrfs_fs_info * fs_info)4076 bool btrfs_free_space_cache_v1_active(struct btrfs_fs_info *fs_info)
4077 {
4078 return btrfs_super_cache_generation(fs_info->super_copy);
4079 }
4080
cleanup_free_space_cache_v1(struct btrfs_fs_info * fs_info,struct btrfs_trans_handle * trans)4081 static int cleanup_free_space_cache_v1(struct btrfs_fs_info *fs_info,
4082 struct btrfs_trans_handle *trans)
4083 {
4084 struct btrfs_block_group *block_group;
4085 struct rb_node *node;
4086 int ret = 0;
4087
4088 btrfs_info(fs_info, "cleaning free space cache v1");
4089
4090 node = rb_first_cached(&fs_info->block_group_cache_tree);
4091 while (node) {
4092 block_group = rb_entry(node, struct btrfs_block_group, cache_node);
4093 ret = btrfs_remove_free_space_inode(trans, NULL, block_group);
4094 if (ret)
4095 goto out;
4096 node = rb_next(node);
4097 }
4098 out:
4099 return ret;
4100 }
4101
btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info * fs_info,bool active)4102 int btrfs_set_free_space_cache_v1_active(struct btrfs_fs_info *fs_info, bool active)
4103 {
4104 struct btrfs_trans_handle *trans;
4105 int ret;
4106
4107 /*
4108 * update_super_roots will appropriately set or unset
4109 * super_copy->cache_generation based on SPACE_CACHE and
4110 * BTRFS_FS_CLEANUP_SPACE_CACHE_V1. For this reason, we need a
4111 * transaction commit whether we are enabling space cache v1 and don't
4112 * have any other work to do, or are disabling it and removing free
4113 * space inodes.
4114 */
4115 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4116 if (IS_ERR(trans))
4117 return PTR_ERR(trans);
4118
4119 if (!active) {
4120 set_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4121 ret = cleanup_free_space_cache_v1(fs_info, trans);
4122 if (ret) {
4123 btrfs_abort_transaction(trans, ret);
4124 btrfs_end_transaction(trans);
4125 goto out;
4126 }
4127 }
4128
4129 ret = btrfs_commit_transaction(trans);
4130 out:
4131 clear_bit(BTRFS_FS_CLEANUP_SPACE_CACHE_V1, &fs_info->flags);
4132
4133 return ret;
4134 }
4135
4136 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4137 /*
4138 * Use this if you need to make a bitmap or extent entry specifically, it
4139 * doesn't do any of the merging that add_free_space does, this acts a lot like
4140 * how the free space cache loading stuff works, so you can get really weird
4141 * configurations.
4142 */
test_add_free_space_entry(struct btrfs_block_group * cache,u64 offset,u64 bytes,bool bitmap)4143 int test_add_free_space_entry(struct btrfs_block_group *cache,
4144 u64 offset, u64 bytes, bool bitmap)
4145 {
4146 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4147 struct btrfs_free_space *info = NULL, *bitmap_info;
4148 void *map = NULL;
4149 enum btrfs_trim_state trim_state = BTRFS_TRIM_STATE_TRIMMED;
4150 u64 bytes_added;
4151 int ret;
4152
4153 again:
4154 if (!info) {
4155 info = kmem_cache_zalloc(btrfs_free_space_cachep, GFP_NOFS);
4156 if (!info)
4157 return -ENOMEM;
4158 }
4159
4160 if (!bitmap) {
4161 spin_lock(&ctl->tree_lock);
4162 info->offset = offset;
4163 info->bytes = bytes;
4164 info->max_extent_size = 0;
4165 ret = link_free_space(ctl, info);
4166 spin_unlock(&ctl->tree_lock);
4167 if (ret)
4168 kmem_cache_free(btrfs_free_space_cachep, info);
4169 return ret;
4170 }
4171
4172 if (!map) {
4173 map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
4174 if (!map) {
4175 kmem_cache_free(btrfs_free_space_cachep, info);
4176 return -ENOMEM;
4177 }
4178 }
4179
4180 spin_lock(&ctl->tree_lock);
4181 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4182 1, 0);
4183 if (!bitmap_info) {
4184 info->bitmap = map;
4185 map = NULL;
4186 add_new_bitmap(ctl, info, offset);
4187 bitmap_info = info;
4188 info = NULL;
4189 }
4190
4191 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes,
4192 trim_state);
4193
4194 bytes -= bytes_added;
4195 offset += bytes_added;
4196 spin_unlock(&ctl->tree_lock);
4197
4198 if (bytes)
4199 goto again;
4200
4201 if (info)
4202 kmem_cache_free(btrfs_free_space_cachep, info);
4203 if (map)
4204 kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
4205 return 0;
4206 }
4207
4208 /*
4209 * Checks to see if the given range is in the free space cache. This is really
4210 * just used to check the absence of space, so if there is free space in the
4211 * range at all we will return 1.
4212 */
test_check_exists(struct btrfs_block_group * cache,u64 offset,u64 bytes)4213 int test_check_exists(struct btrfs_block_group *cache,
4214 u64 offset, u64 bytes)
4215 {
4216 struct btrfs_free_space_ctl *ctl = cache->free_space_ctl;
4217 struct btrfs_free_space *info;
4218 int ret = 0;
4219
4220 spin_lock(&ctl->tree_lock);
4221 info = tree_search_offset(ctl, offset, 0, 0);
4222 if (!info) {
4223 info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
4224 1, 0);
4225 if (!info)
4226 goto out;
4227 }
4228
4229 have_info:
4230 if (info->bitmap) {
4231 u64 bit_off, bit_bytes;
4232 struct rb_node *n;
4233 struct btrfs_free_space *tmp;
4234
4235 bit_off = offset;
4236 bit_bytes = ctl->unit;
4237 ret = search_bitmap(ctl, info, &bit_off, &bit_bytes, false);
4238 if (!ret) {
4239 if (bit_off == offset) {
4240 ret = 1;
4241 goto out;
4242 } else if (bit_off > offset &&
4243 offset + bytes > bit_off) {
4244 ret = 1;
4245 goto out;
4246 }
4247 }
4248
4249 n = rb_prev(&info->offset_index);
4250 while (n) {
4251 tmp = rb_entry(n, struct btrfs_free_space,
4252 offset_index);
4253 if (tmp->offset + tmp->bytes < offset)
4254 break;
4255 if (offset + bytes < tmp->offset) {
4256 n = rb_prev(&tmp->offset_index);
4257 continue;
4258 }
4259 info = tmp;
4260 goto have_info;
4261 }
4262
4263 n = rb_next(&info->offset_index);
4264 while (n) {
4265 tmp = rb_entry(n, struct btrfs_free_space,
4266 offset_index);
4267 if (offset + bytes < tmp->offset)
4268 break;
4269 if (tmp->offset + tmp->bytes < offset) {
4270 n = rb_next(&tmp->offset_index);
4271 continue;
4272 }
4273 info = tmp;
4274 goto have_info;
4275 }
4276
4277 ret = 0;
4278 goto out;
4279 }
4280
4281 if (info->offset == offset) {
4282 ret = 1;
4283 goto out;
4284 }
4285
4286 if (offset > info->offset && offset < info->offset + info->bytes)
4287 ret = 1;
4288 out:
4289 spin_unlock(&ctl->tree_lock);
4290 return ret;
4291 }
4292 #endif /* CONFIG_BTRFS_FS_RUN_SANITY_TESTS */
4293