1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22 */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40
41 /*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
91 #define MAX_NEED_GC 64
92 #define MAX_SAVE_PRIO 72
93 #define MAX_GC_TIMES 100
94 #define MIN_GC_NODES 100
95 #define GC_SLEEP_MS 100
96
97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
98
99 #define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102 static struct workqueue_struct *btree_io_wq;
103
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
105
106
write_block(struct btree * b)107 static inline struct bset *write_block(struct btree *b)
108 {
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110 }
111
bch_btree_init_next(struct btree * b)112 static void bch_btree_init_next(struct btree *b)
113 {
114 /* If not a leaf node, always sort */
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
117 else
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
119
120 if (b->written < btree_blocks(b))
121 bch_bset_init_next(&b->keys, write_block(b),
122 bset_magic(&b->c->cache->sb));
123
124 }
125
126 /* Btree key manipulation */
127
bkey_put(struct cache_set * c,struct bkey * k)128 void bkey_put(struct cache_set *c, struct bkey *k)
129 {
130 unsigned int i;
131
132 for (i = 0; i < KEY_PTRS(k); i++)
133 if (ptr_available(c, k, i))
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135 }
136
137 /* Btree IO */
138
btree_csum_set(struct btree * b,struct bset * i)139 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140 {
141 uint64_t crc = b->key.ptr[0];
142 void *data = (void *) i + 8, *end = bset_bkey_last(i);
143
144 crc = bch_crc64_update(crc, data, end - data);
145 return crc ^ 0xffffffffffffffffULL;
146 }
147
bch_btree_node_read_done(struct btree * b)148 void bch_btree_node_read_done(struct btree *b)
149 {
150 const char *err = "bad btree header";
151 struct bset *i = btree_bset_first(b);
152 struct btree_iter *iter;
153
154 /*
155 * c->fill_iter can allocate an iterator with more memory space
156 * than static MAX_BSETS.
157 * See the comment arount cache_set->fill_iter.
158 */
159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161 iter->used = 0;
162
163 #ifdef CONFIG_BCACHE_DEBUG
164 iter->b = &b->keys;
165 #endif
166
167 if (!i->seq)
168 goto err;
169
170 for (;
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 i = write_block(b)) {
173 err = "unsupported bset version";
174 if (i->version > BCACHE_BSET_VERSION)
175 goto err;
176
177 err = "bad btree header";
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179 btree_blocks(b))
180 goto err;
181
182 err = "bad magic";
183 if (i->magic != bset_magic(&b->c->cache->sb))
184 goto err;
185
186 err = "bad checksum";
187 switch (i->version) {
188 case 0:
189 if (i->csum != csum_set(i))
190 goto err;
191 break;
192 case BCACHE_BSET_VERSION:
193 if (i->csum != btree_csum_set(b, i))
194 goto err;
195 break;
196 }
197
198 err = "empty set";
199 if (i != b->keys.set[0].data && !i->keys)
200 goto err;
201
202 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203
204 b->written += set_blocks(i, block_bytes(b->c->cache));
205 }
206
207 err = "corrupted btree";
208 for (i = write_block(b);
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 i = ((void *) i) + block_bytes(b->c->cache))
211 if (i->seq == b->keys.set[0].data->seq)
212 goto err;
213
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215
216 i = b->keys.set[0].data;
217 err = "short btree key";
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220 goto err;
221
222 if (b->written < btree_blocks(b))
223 bch_bset_init_next(&b->keys, write_block(b),
224 bset_magic(&b->c->cache->sb));
225 out:
226 mempool_free(iter, &b->c->fill_iter);
227 return;
228 err:
229 set_btree_node_io_error(b);
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 bset_block_offset(b, i), i->keys);
233 goto out;
234 }
235
btree_node_read_endio(struct bio * bio)236 static void btree_node_read_endio(struct bio *bio)
237 {
238 struct closure *cl = bio->bi_private;
239
240 closure_put(cl);
241 }
242
bch_btree_node_read(struct btree * b)243 static void bch_btree_node_read(struct btree *b)
244 {
245 uint64_t start_time = local_clock();
246 struct closure cl;
247 struct bio *bio;
248
249 trace_bcache_btree_read(b);
250
251 closure_init_stack(&cl);
252
253 bio = bch_bbio_alloc(b->c);
254 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 bio->bi_end_io = btree_node_read_endio;
256 bio->bi_private = &cl;
257 bio->bi_opf = REQ_OP_READ | REQ_META;
258
259 bch_bio_map(bio, b->keys.set[0].data);
260
261 bch_submit_bbio(bio, b->c, &b->key, 0);
262 closure_sync(&cl);
263
264 if (bio->bi_status)
265 set_btree_node_io_error(b);
266
267 bch_bbio_free(bio, b->c);
268
269 if (btree_node_io_error(b))
270 goto err;
271
272 bch_btree_node_read_done(b);
273 bch_time_stats_update(&b->c->btree_read_time, start_time);
274
275 return;
276 err:
277 bch_cache_set_error(b->c, "io error reading bucket %zu",
278 PTR_BUCKET_NR(b->c, &b->key, 0));
279 }
280
btree_complete_write(struct btree * b,struct btree_write * w)281 static void btree_complete_write(struct btree *b, struct btree_write *w)
282 {
283 if (w->prio_blocked &&
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 wake_up_allocators(b->c);
286
287 if (w->journal) {
288 atomic_dec_bug(w->journal);
289 __closure_wake_up(&b->c->journal.wait);
290 }
291
292 w->prio_blocked = 0;
293 w->journal = NULL;
294 }
295
btree_node_write_unlock(struct closure * cl)296 static void btree_node_write_unlock(struct closure *cl)
297 {
298 struct btree *b = container_of(cl, struct btree, io);
299
300 up(&b->io_mutex);
301 }
302
__btree_node_write_done(struct closure * cl)303 static void __btree_node_write_done(struct closure *cl)
304 {
305 struct btree *b = container_of(cl, struct btree, io);
306 struct btree_write *w = btree_prev_write(b);
307
308 bch_bbio_free(b->bio, b->c);
309 b->bio = NULL;
310 btree_complete_write(b, w);
311
312 if (btree_node_dirty(b))
313 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314
315 closure_return_with_destructor(cl, btree_node_write_unlock);
316 }
317
btree_node_write_done(struct closure * cl)318 static void btree_node_write_done(struct closure *cl)
319 {
320 struct btree *b = container_of(cl, struct btree, io);
321
322 bio_free_pages(b->bio);
323 __btree_node_write_done(cl);
324 }
325
btree_node_write_endio(struct bio * bio)326 static void btree_node_write_endio(struct bio *bio)
327 {
328 struct closure *cl = bio->bi_private;
329 struct btree *b = container_of(cl, struct btree, io);
330
331 if (bio->bi_status)
332 set_btree_node_io_error(b);
333
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335 closure_put(cl);
336 }
337
do_btree_node_write(struct btree * b)338 static void do_btree_node_write(struct btree *b)
339 {
340 struct closure *cl = &b->io;
341 struct bset *i = btree_bset_last(b);
342 BKEY_PADDED(key) k;
343
344 i->version = BCACHE_BSET_VERSION;
345 i->csum = btree_csum_set(b, i);
346
347 BUG_ON(b->bio);
348 b->bio = bch_bbio_alloc(b->c);
349
350 b->bio->bi_end_io = btree_node_write_endio;
351 b->bio->bi_private = cl;
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache));
353 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
354 bch_bio_map(b->bio, i);
355
356 /*
357 * If we're appending to a leaf node, we don't technically need FUA -
358 * this write just needs to be persisted before the next journal write,
359 * which will be marked FLUSH|FUA.
360 *
361 * Similarly if we're writing a new btree root - the pointer is going to
362 * be in the next journal entry.
363 *
364 * But if we're writing a new btree node (that isn't a root) or
365 * appending to a non leaf btree node, we need either FUA or a flush
366 * when we write the parent with the new pointer. FUA is cheaper than a
367 * flush, and writes appending to leaf nodes aren't blocking anything so
368 * just make all btree node writes FUA to keep things sane.
369 */
370
371 bkey_copy(&k.key, &b->key);
372 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 bset_sector_offset(&b->keys, i));
374
375 if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376 struct bio_vec *bv;
377 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 struct bvec_iter_all iter_all;
379
380 bio_for_each_segment_all(bv, b->bio, iter_all) {
381 memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382 addr += PAGE_SIZE;
383 }
384
385 bch_submit_bbio(b->bio, b->c, &k.key, 0);
386
387 continue_at(cl, btree_node_write_done, NULL);
388 } else {
389 /*
390 * No problem for multipage bvec since the bio is
391 * just allocated
392 */
393 b->bio->bi_vcnt = 0;
394 bch_bio_map(b->bio, i);
395
396 bch_submit_bbio(b->bio, b->c, &k.key, 0);
397
398 closure_sync(cl);
399 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400 }
401 }
402
__bch_btree_node_write(struct btree * b,struct closure * parent)403 void __bch_btree_node_write(struct btree *b, struct closure *parent)
404 {
405 struct bset *i = btree_bset_last(b);
406
407 lockdep_assert_held(&b->write_lock);
408
409 trace_bcache_btree_write(b);
410
411 BUG_ON(current->bio_list);
412 BUG_ON(b->written >= btree_blocks(b));
413 BUG_ON(b->written && !i->keys);
414 BUG_ON(btree_bset_first(b)->seq != i->seq);
415 bch_check_keys(&b->keys, "writing");
416
417 cancel_delayed_work(&b->work);
418
419 /* If caller isn't waiting for write, parent refcount is cache set */
420 down(&b->io_mutex);
421 closure_init(&b->io, parent ?: &b->c->cl);
422
423 clear_bit(BTREE_NODE_dirty, &b->flags);
424 change_bit(BTREE_NODE_write_idx, &b->flags);
425
426 do_btree_node_write(b);
427
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
430
431 b->written += set_blocks(i, block_bytes(b->c->cache));
432 }
433
bch_btree_node_write(struct btree * b,struct closure * parent)434 void bch_btree_node_write(struct btree *b, struct closure *parent)
435 {
436 unsigned int nsets = b->keys.nsets;
437
438 lockdep_assert_held(&b->lock);
439
440 __bch_btree_node_write(b, parent);
441
442 /*
443 * do verify if there was more than one set initially (i.e. we did a
444 * sort) and we sorted down to a single set:
445 */
446 if (nsets && !b->keys.nsets)
447 bch_btree_verify(b);
448
449 bch_btree_init_next(b);
450 }
451
bch_btree_node_write_sync(struct btree * b)452 static void bch_btree_node_write_sync(struct btree *b)
453 {
454 struct closure cl;
455
456 closure_init_stack(&cl);
457
458 mutex_lock(&b->write_lock);
459 bch_btree_node_write(b, &cl);
460 mutex_unlock(&b->write_lock);
461
462 closure_sync(&cl);
463 }
464
btree_node_write_work(struct work_struct * w)465 static void btree_node_write_work(struct work_struct *w)
466 {
467 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468
469 mutex_lock(&b->write_lock);
470 if (btree_node_dirty(b))
471 __bch_btree_node_write(b, NULL);
472 mutex_unlock(&b->write_lock);
473 }
474
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476 {
477 struct bset *i = btree_bset_last(b);
478 struct btree_write *w = btree_current_write(b);
479
480 lockdep_assert_held(&b->write_lock);
481
482 BUG_ON(!b->written);
483 BUG_ON(!i->keys);
484
485 if (!btree_node_dirty(b))
486 queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487
488 set_btree_node_dirty(b);
489
490 /*
491 * w->journal is always the oldest journal pin of all bkeys
492 * in the leaf node, to make sure the oldest jset seq won't
493 * be increased before this btree node is flushed.
494 */
495 if (journal_ref) {
496 if (w->journal &&
497 journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 atomic_dec_bug(w->journal);
499 w->journal = NULL;
500 }
501
502 if (!w->journal) {
503 w->journal = journal_ref;
504 atomic_inc(w->journal);
505 }
506 }
507
508 /* Force write if set is too big */
509 if (set_bytes(i) > PAGE_SIZE - 48 &&
510 !current->bio_list)
511 bch_btree_node_write(b, NULL);
512 }
513
514 /*
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
517 */
518
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c) \
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523
mca_data_free(struct btree * b)524 static void mca_data_free(struct btree *b)
525 {
526 BUG_ON(b->io_mutex.count != 1);
527
528 bch_btree_keys_free(&b->keys);
529
530 b->c->btree_cache_used--;
531 list_move(&b->list, &b->c->btree_cache_freed);
532 }
533
mca_bucket_free(struct btree * b)534 static void mca_bucket_free(struct btree *b)
535 {
536 BUG_ON(btree_node_dirty(b));
537
538 b->key.ptr[0] = 0;
539 hlist_del_init_rcu(&b->hash);
540 list_move(&b->list, &b->c->btree_cache_freeable);
541 }
542
btree_order(struct bkey * k)543 static unsigned int btree_order(struct bkey *k)
544 {
545 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546 }
547
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549 {
550 if (!bch_btree_keys_alloc(&b->keys,
551 max_t(unsigned int,
552 ilog2(b->c->btree_pages),
553 btree_order(k)),
554 gfp)) {
555 b->c->btree_cache_used++;
556 list_move(&b->list, &b->c->btree_cache);
557 } else {
558 list_move(&b->list, &b->c->btree_cache_freed);
559 }
560 }
561
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)562 static struct btree *mca_bucket_alloc(struct cache_set *c,
563 struct bkey *k, gfp_t gfp)
564 {
565 /*
566 * kzalloc() is necessary here for initialization,
567 * see code comments in bch_btree_keys_init().
568 */
569 struct btree *b = kzalloc(sizeof(struct btree), gfp);
570
571 if (!b)
572 return NULL;
573
574 init_rwsem(&b->lock);
575 lockdep_set_novalidate_class(&b->lock);
576 mutex_init(&b->write_lock);
577 lockdep_set_novalidate_class(&b->write_lock);
578 INIT_LIST_HEAD(&b->list);
579 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
580 b->c = c;
581 sema_init(&b->io_mutex, 1);
582
583 mca_data_alloc(b, k, gfp);
584 return b;
585 }
586
mca_reap(struct btree * b,unsigned int min_order,bool flush)587 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
588 {
589 struct closure cl;
590
591 closure_init_stack(&cl);
592 lockdep_assert_held(&b->c->bucket_lock);
593
594 if (!down_write_trylock(&b->lock))
595 return -ENOMEM;
596
597 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
598
599 if (b->keys.page_order < min_order)
600 goto out_unlock;
601
602 if (!flush) {
603 if (btree_node_dirty(b))
604 goto out_unlock;
605
606 if (down_trylock(&b->io_mutex))
607 goto out_unlock;
608 up(&b->io_mutex);
609 }
610
611 retry:
612 /*
613 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
614 * __bch_btree_node_write(). To avoid an extra flush, acquire
615 * b->write_lock before checking BTREE_NODE_dirty bit.
616 */
617 mutex_lock(&b->write_lock);
618 /*
619 * If this btree node is selected in btree_flush_write() by journal
620 * code, delay and retry until the node is flushed by journal code
621 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
622 */
623 if (btree_node_journal_flush(b)) {
624 pr_debug("bnode %p is flushing by journal, retry\n", b);
625 mutex_unlock(&b->write_lock);
626 udelay(1);
627 goto retry;
628 }
629
630 if (btree_node_dirty(b))
631 __bch_btree_node_write(b, &cl);
632 mutex_unlock(&b->write_lock);
633
634 closure_sync(&cl);
635
636 /* wait for any in flight btree write */
637 down(&b->io_mutex);
638 up(&b->io_mutex);
639
640 return 0;
641 out_unlock:
642 rw_unlock(true, b);
643 return -ENOMEM;
644 }
645
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)646 static unsigned long bch_mca_scan(struct shrinker *shrink,
647 struct shrink_control *sc)
648 {
649 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
650 struct btree *b, *t;
651 unsigned long i, nr = sc->nr_to_scan;
652 unsigned long freed = 0;
653 unsigned int btree_cache_used;
654
655 if (c->shrinker_disabled)
656 return SHRINK_STOP;
657
658 if (c->btree_cache_alloc_lock)
659 return SHRINK_STOP;
660
661 /* Return -1 if we can't do anything right now */
662 if (sc->gfp_mask & __GFP_IO)
663 mutex_lock(&c->bucket_lock);
664 else if (!mutex_trylock(&c->bucket_lock))
665 return -1;
666
667 /*
668 * It's _really_ critical that we don't free too many btree nodes - we
669 * have to always leave ourselves a reserve. The reserve is how we
670 * guarantee that allocating memory for a new btree node can always
671 * succeed, so that inserting keys into the btree can always succeed and
672 * IO can always make forward progress:
673 */
674 nr /= c->btree_pages;
675 if (nr == 0)
676 nr = 1;
677 nr = min_t(unsigned long, nr, mca_can_free(c));
678
679 i = 0;
680 btree_cache_used = c->btree_cache_used;
681 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
682 if (nr <= 0)
683 goto out;
684
685 if (!mca_reap(b, 0, false)) {
686 mca_data_free(b);
687 rw_unlock(true, b);
688 freed++;
689 }
690 nr--;
691 i++;
692 }
693
694 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
695 if (nr <= 0 || i >= btree_cache_used)
696 goto out;
697
698 if (!mca_reap(b, 0, false)) {
699 mca_bucket_free(b);
700 mca_data_free(b);
701 rw_unlock(true, b);
702 freed++;
703 }
704
705 nr--;
706 i++;
707 }
708 out:
709 mutex_unlock(&c->bucket_lock);
710 return freed * c->btree_pages;
711 }
712
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)713 static unsigned long bch_mca_count(struct shrinker *shrink,
714 struct shrink_control *sc)
715 {
716 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
717
718 if (c->shrinker_disabled)
719 return 0;
720
721 if (c->btree_cache_alloc_lock)
722 return 0;
723
724 return mca_can_free(c) * c->btree_pages;
725 }
726
bch_btree_cache_free(struct cache_set * c)727 void bch_btree_cache_free(struct cache_set *c)
728 {
729 struct btree *b;
730 struct closure cl;
731
732 closure_init_stack(&cl);
733
734 if (c->shrink.list.next)
735 unregister_shrinker(&c->shrink);
736
737 mutex_lock(&c->bucket_lock);
738
739 #ifdef CONFIG_BCACHE_DEBUG
740 if (c->verify_data)
741 list_move(&c->verify_data->list, &c->btree_cache);
742
743 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
744 #endif
745
746 list_splice(&c->btree_cache_freeable,
747 &c->btree_cache);
748
749 while (!list_empty(&c->btree_cache)) {
750 b = list_first_entry(&c->btree_cache, struct btree, list);
751
752 /*
753 * This function is called by cache_set_free(), no I/O
754 * request on cache now, it is unnecessary to acquire
755 * b->write_lock before clearing BTREE_NODE_dirty anymore.
756 */
757 if (btree_node_dirty(b)) {
758 btree_complete_write(b, btree_current_write(b));
759 clear_bit(BTREE_NODE_dirty, &b->flags);
760 }
761 mca_data_free(b);
762 }
763
764 while (!list_empty(&c->btree_cache_freed)) {
765 b = list_first_entry(&c->btree_cache_freed,
766 struct btree, list);
767 list_del(&b->list);
768 cancel_delayed_work_sync(&b->work);
769 kfree(b);
770 }
771
772 mutex_unlock(&c->bucket_lock);
773 }
774
bch_btree_cache_alloc(struct cache_set * c)775 int bch_btree_cache_alloc(struct cache_set *c)
776 {
777 unsigned int i;
778
779 for (i = 0; i < mca_reserve(c); i++)
780 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
781 return -ENOMEM;
782
783 list_splice_init(&c->btree_cache,
784 &c->btree_cache_freeable);
785
786 #ifdef CONFIG_BCACHE_DEBUG
787 mutex_init(&c->verify_lock);
788
789 c->verify_ondisk = (void *)
790 __get_free_pages(GFP_KERNEL|__GFP_COMP,
791 ilog2(meta_bucket_pages(&c->cache->sb)));
792 if (!c->verify_ondisk) {
793 /*
794 * Don't worry about the mca_rereserve buckets
795 * allocated in previous for-loop, they will be
796 * handled properly in bch_cache_set_unregister().
797 */
798 return -ENOMEM;
799 }
800
801 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
802
803 if (c->verify_data &&
804 c->verify_data->keys.set->data)
805 list_del_init(&c->verify_data->list);
806 else
807 c->verify_data = NULL;
808 #endif
809
810 c->shrink.count_objects = bch_mca_count;
811 c->shrink.scan_objects = bch_mca_scan;
812 c->shrink.seeks = 4;
813 c->shrink.batch = c->btree_pages * 2;
814
815 if (register_shrinker(&c->shrink))
816 pr_warn("bcache: %s: could not register shrinker\n",
817 __func__);
818
819 return 0;
820 }
821
822 /* Btree in memory cache - hash table */
823
mca_hash(struct cache_set * c,struct bkey * k)824 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
825 {
826 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
827 }
828
mca_find(struct cache_set * c,struct bkey * k)829 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
830 {
831 struct btree *b;
832
833 rcu_read_lock();
834 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
836 goto out;
837 b = NULL;
838 out:
839 rcu_read_unlock();
840 return b;
841 }
842
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)843 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
844 {
845 spin_lock(&c->btree_cannibalize_lock);
846 if (likely(c->btree_cache_alloc_lock == NULL)) {
847 c->btree_cache_alloc_lock = current;
848 } else if (c->btree_cache_alloc_lock != current) {
849 if (op)
850 prepare_to_wait(&c->btree_cache_wait, &op->wait,
851 TASK_UNINTERRUPTIBLE);
852 spin_unlock(&c->btree_cannibalize_lock);
853 return -EINTR;
854 }
855 spin_unlock(&c->btree_cannibalize_lock);
856
857 return 0;
858 }
859
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)860 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
861 struct bkey *k)
862 {
863 struct btree *b;
864
865 trace_bcache_btree_cache_cannibalize(c);
866
867 if (mca_cannibalize_lock(c, op))
868 return ERR_PTR(-EINTR);
869
870 list_for_each_entry_reverse(b, &c->btree_cache, list)
871 if (!mca_reap(b, btree_order(k), false))
872 return b;
873
874 list_for_each_entry_reverse(b, &c->btree_cache, list)
875 if (!mca_reap(b, btree_order(k), true))
876 return b;
877
878 WARN(1, "btree cache cannibalize failed\n");
879 return ERR_PTR(-ENOMEM);
880 }
881
882 /*
883 * We can only have one thread cannibalizing other cached btree nodes at a time,
884 * or we'll deadlock. We use an open coded mutex to ensure that, which a
885 * cannibalize_bucket() will take. This means every time we unlock the root of
886 * the btree, we need to release this lock if we have it held.
887 */
bch_cannibalize_unlock(struct cache_set * c)888 void bch_cannibalize_unlock(struct cache_set *c)
889 {
890 spin_lock(&c->btree_cannibalize_lock);
891 if (c->btree_cache_alloc_lock == current) {
892 c->btree_cache_alloc_lock = NULL;
893 wake_up(&c->btree_cache_wait);
894 }
895 spin_unlock(&c->btree_cannibalize_lock);
896 }
897
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)898 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
899 struct bkey *k, int level)
900 {
901 struct btree *b;
902
903 BUG_ON(current->bio_list);
904
905 lockdep_assert_held(&c->bucket_lock);
906
907 if (mca_find(c, k))
908 return NULL;
909
910 /* btree_free() doesn't free memory; it sticks the node on the end of
911 * the list. Check if there's any freed nodes there:
912 */
913 list_for_each_entry(b, &c->btree_cache_freeable, list)
914 if (!mca_reap(b, btree_order(k), false))
915 goto out;
916
917 /* We never free struct btree itself, just the memory that holds the on
918 * disk node. Check the freed list before allocating a new one:
919 */
920 list_for_each_entry(b, &c->btree_cache_freed, list)
921 if (!mca_reap(b, 0, false)) {
922 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b->keys.set[0].data)
924 goto err;
925 else
926 goto out;
927 }
928
929 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
930 if (!b)
931 goto err;
932
933 BUG_ON(!down_write_trylock(&b->lock));
934 if (!b->keys.set->data)
935 goto err;
936 out:
937 BUG_ON(b->io_mutex.count != 1);
938
939 bkey_copy(&b->key, k);
940 list_move(&b->list, &c->btree_cache);
941 hlist_del_init_rcu(&b->hash);
942 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
943
944 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945 b->parent = (void *) ~0UL;
946 b->flags = 0;
947 b->written = 0;
948 b->level = level;
949
950 if (!b->level)
951 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
952 &b->c->expensive_debug_checks);
953 else
954 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
955 &b->c->expensive_debug_checks);
956
957 return b;
958 err:
959 if (b)
960 rw_unlock(true, b);
961
962 b = mca_cannibalize(c, op, k);
963 if (!IS_ERR(b))
964 goto out;
965
966 return b;
967 }
968
969 /*
970 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
971 * in from disk if necessary.
972 *
973 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
974 *
975 * The btree node will have either a read or a write lock held, depending on
976 * level and op->lock.
977 */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)978 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
979 struct bkey *k, int level, bool write,
980 struct btree *parent)
981 {
982 int i = 0;
983 struct btree *b;
984
985 BUG_ON(level < 0);
986 retry:
987 b = mca_find(c, k);
988
989 if (!b) {
990 if (current->bio_list)
991 return ERR_PTR(-EAGAIN);
992
993 mutex_lock(&c->bucket_lock);
994 b = mca_alloc(c, op, k, level);
995 mutex_unlock(&c->bucket_lock);
996
997 if (!b)
998 goto retry;
999 if (IS_ERR(b))
1000 return b;
1001
1002 bch_btree_node_read(b);
1003
1004 if (!write)
1005 downgrade_write(&b->lock);
1006 } else {
1007 rw_lock(write, b, level);
1008 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1009 rw_unlock(write, b);
1010 goto retry;
1011 }
1012 BUG_ON(b->level != level);
1013 }
1014
1015 if (btree_node_io_error(b)) {
1016 rw_unlock(write, b);
1017 return ERR_PTR(-EIO);
1018 }
1019
1020 BUG_ON(!b->written);
1021
1022 b->parent = parent;
1023
1024 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1025 prefetch(b->keys.set[i].tree);
1026 prefetch(b->keys.set[i].data);
1027 }
1028
1029 for (; i <= b->keys.nsets; i++)
1030 prefetch(b->keys.set[i].data);
1031
1032 return b;
1033 }
1034
btree_node_prefetch(struct btree * parent,struct bkey * k)1035 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1036 {
1037 struct btree *b;
1038
1039 mutex_lock(&parent->c->bucket_lock);
1040 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1041 mutex_unlock(&parent->c->bucket_lock);
1042
1043 if (!IS_ERR_OR_NULL(b)) {
1044 b->parent = parent;
1045 bch_btree_node_read(b);
1046 rw_unlock(true, b);
1047 }
1048 }
1049
1050 /* Btree alloc */
1051
btree_node_free(struct btree * b)1052 static void btree_node_free(struct btree *b)
1053 {
1054 trace_bcache_btree_node_free(b);
1055
1056 BUG_ON(b == b->c->root);
1057
1058 retry:
1059 mutex_lock(&b->write_lock);
1060 /*
1061 * If the btree node is selected and flushing in btree_flush_write(),
1062 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1063 * then it is safe to free the btree node here. Otherwise this btree
1064 * node will be in race condition.
1065 */
1066 if (btree_node_journal_flush(b)) {
1067 mutex_unlock(&b->write_lock);
1068 pr_debug("bnode %p journal_flush set, retry\n", b);
1069 udelay(1);
1070 goto retry;
1071 }
1072
1073 if (btree_node_dirty(b)) {
1074 btree_complete_write(b, btree_current_write(b));
1075 clear_bit(BTREE_NODE_dirty, &b->flags);
1076 }
1077
1078 mutex_unlock(&b->write_lock);
1079
1080 cancel_delayed_work(&b->work);
1081
1082 mutex_lock(&b->c->bucket_lock);
1083 bch_bucket_free(b->c, &b->key);
1084 mca_bucket_free(b);
1085 mutex_unlock(&b->c->bucket_lock);
1086 }
1087
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1088 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1089 int level, bool wait,
1090 struct btree *parent)
1091 {
1092 BKEY_PADDED(key) k;
1093 struct btree *b;
1094
1095 mutex_lock(&c->bucket_lock);
1096 retry:
1097 /* return ERR_PTR(-EAGAIN) when it fails */
1098 b = ERR_PTR(-EAGAIN);
1099 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1100 goto err;
1101
1102 bkey_put(c, &k.key);
1103 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1104
1105 b = mca_alloc(c, op, &k.key, level);
1106 if (IS_ERR(b))
1107 goto err_free;
1108
1109 if (!b) {
1110 cache_bug(c,
1111 "Tried to allocate bucket that was in btree cache");
1112 goto retry;
1113 }
1114
1115 b->parent = parent;
1116 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1117
1118 mutex_unlock(&c->bucket_lock);
1119
1120 trace_bcache_btree_node_alloc(b);
1121 return b;
1122 err_free:
1123 bch_bucket_free(c, &k.key);
1124 err:
1125 mutex_unlock(&c->bucket_lock);
1126
1127 trace_bcache_btree_node_alloc_fail(c);
1128 return b;
1129 }
1130
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1131 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1132 struct btree_op *op, int level,
1133 struct btree *parent)
1134 {
1135 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1136 }
1137
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1138 static struct btree *btree_node_alloc_replacement(struct btree *b,
1139 struct btree_op *op)
1140 {
1141 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1142
1143 if (!IS_ERR(n)) {
1144 mutex_lock(&n->write_lock);
1145 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1146 bkey_copy_key(&n->key, &b->key);
1147 mutex_unlock(&n->write_lock);
1148 }
1149
1150 return n;
1151 }
1152
make_btree_freeing_key(struct btree * b,struct bkey * k)1153 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1154 {
1155 unsigned int i;
1156
1157 mutex_lock(&b->c->bucket_lock);
1158
1159 atomic_inc(&b->c->prio_blocked);
1160
1161 bkey_copy(k, &b->key);
1162 bkey_copy_key(k, &ZERO_KEY);
1163
1164 for (i = 0; i < KEY_PTRS(k); i++)
1165 SET_PTR_GEN(k, i,
1166 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1167 PTR_BUCKET(b->c, &b->key, i)));
1168
1169 mutex_unlock(&b->c->bucket_lock);
1170 }
1171
btree_check_reserve(struct btree * b,struct btree_op * op)1172 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1173 {
1174 struct cache_set *c = b->c;
1175 struct cache *ca = c->cache;
1176 unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1177
1178 mutex_lock(&c->bucket_lock);
1179
1180 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1181 if (op)
1182 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1183 TASK_UNINTERRUPTIBLE);
1184 mutex_unlock(&c->bucket_lock);
1185 return -EINTR;
1186 }
1187
1188 mutex_unlock(&c->bucket_lock);
1189
1190 return mca_cannibalize_lock(b->c, op);
1191 }
1192
1193 /* Garbage collection */
1194
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1195 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1196 struct bkey *k)
1197 {
1198 uint8_t stale = 0;
1199 unsigned int i;
1200 struct bucket *g;
1201
1202 /*
1203 * ptr_invalid() can't return true for the keys that mark btree nodes as
1204 * freed, but since ptr_bad() returns true we'll never actually use them
1205 * for anything and thus we don't want mark their pointers here
1206 */
1207 if (!bkey_cmp(k, &ZERO_KEY))
1208 return stale;
1209
1210 for (i = 0; i < KEY_PTRS(k); i++) {
1211 if (!ptr_available(c, k, i))
1212 continue;
1213
1214 g = PTR_BUCKET(c, k, i);
1215
1216 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1217 g->last_gc = PTR_GEN(k, i);
1218
1219 if (ptr_stale(c, k, i)) {
1220 stale = max(stale, ptr_stale(c, k, i));
1221 continue;
1222 }
1223
1224 cache_bug_on(GC_MARK(g) &&
1225 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1226 c, "inconsistent ptrs: mark = %llu, level = %i",
1227 GC_MARK(g), level);
1228
1229 if (level)
1230 SET_GC_MARK(g, GC_MARK_METADATA);
1231 else if (KEY_DIRTY(k))
1232 SET_GC_MARK(g, GC_MARK_DIRTY);
1233 else if (!GC_MARK(g))
1234 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1235
1236 /* guard against overflow */
1237 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1238 GC_SECTORS_USED(g) + KEY_SIZE(k),
1239 MAX_GC_SECTORS_USED));
1240
1241 BUG_ON(!GC_SECTORS_USED(g));
1242 }
1243
1244 return stale;
1245 }
1246
1247 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1248
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1249 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1250 {
1251 unsigned int i;
1252
1253 for (i = 0; i < KEY_PTRS(k); i++)
1254 if (ptr_available(c, k, i) &&
1255 !ptr_stale(c, k, i)) {
1256 struct bucket *b = PTR_BUCKET(c, k, i);
1257
1258 b->gen = PTR_GEN(k, i);
1259
1260 if (level && bkey_cmp(k, &ZERO_KEY))
1261 b->prio = BTREE_PRIO;
1262 else if (!level && b->prio == BTREE_PRIO)
1263 b->prio = INITIAL_PRIO;
1264 }
1265
1266 __bch_btree_mark_key(c, level, k);
1267 }
1268
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1269 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1270 {
1271 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1272 }
1273
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1274 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1275 {
1276 uint8_t stale = 0;
1277 unsigned int keys = 0, good_keys = 0;
1278 struct bkey *k;
1279 struct btree_iter iter;
1280 struct bset_tree *t;
1281
1282 gc->nodes++;
1283
1284 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1285 stale = max(stale, btree_mark_key(b, k));
1286 keys++;
1287
1288 if (bch_ptr_bad(&b->keys, k))
1289 continue;
1290
1291 gc->key_bytes += bkey_u64s(k);
1292 gc->nkeys++;
1293 good_keys++;
1294
1295 gc->data += KEY_SIZE(k);
1296 }
1297
1298 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1299 btree_bug_on(t->size &&
1300 bset_written(&b->keys, t) &&
1301 bkey_cmp(&b->key, &t->end) < 0,
1302 b, "found short btree key in gc");
1303
1304 if (b->c->gc_always_rewrite)
1305 return true;
1306
1307 if (stale > 10)
1308 return true;
1309
1310 if ((keys - good_keys) * 2 > keys)
1311 return true;
1312
1313 return false;
1314 }
1315
1316 #define GC_MERGE_NODES 4U
1317
1318 struct gc_merge_info {
1319 struct btree *b;
1320 unsigned int keys;
1321 };
1322
1323 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1324 struct keylist *insert_keys,
1325 atomic_t *journal_ref,
1326 struct bkey *replace_key);
1327
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1328 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1329 struct gc_stat *gc, struct gc_merge_info *r)
1330 {
1331 unsigned int i, nodes = 0, keys = 0, blocks;
1332 struct btree *new_nodes[GC_MERGE_NODES];
1333 struct keylist keylist;
1334 struct closure cl;
1335 struct bkey *k;
1336
1337 bch_keylist_init(&keylist);
1338
1339 if (btree_check_reserve(b, NULL))
1340 return 0;
1341
1342 memset(new_nodes, 0, sizeof(new_nodes));
1343 closure_init_stack(&cl);
1344
1345 while (nodes < GC_MERGE_NODES && !IS_ERR(r[nodes].b))
1346 keys += r[nodes++].keys;
1347
1348 blocks = btree_default_blocks(b->c) * 2 / 3;
1349
1350 if (nodes < 2 ||
1351 __set_blocks(b->keys.set[0].data, keys,
1352 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1353 return 0;
1354
1355 for (i = 0; i < nodes; i++) {
1356 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1357 if (IS_ERR(new_nodes[i]))
1358 goto out_nocoalesce;
1359 }
1360
1361 /*
1362 * We have to check the reserve here, after we've allocated our new
1363 * nodes, to make sure the insert below will succeed - we also check
1364 * before as an optimization to potentially avoid a bunch of expensive
1365 * allocs/sorts
1366 */
1367 if (btree_check_reserve(b, NULL))
1368 goto out_nocoalesce;
1369
1370 for (i = 0; i < nodes; i++)
1371 mutex_lock(&new_nodes[i]->write_lock);
1372
1373 for (i = nodes - 1; i > 0; --i) {
1374 struct bset *n1 = btree_bset_first(new_nodes[i]);
1375 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1376 struct bkey *k, *last = NULL;
1377
1378 keys = 0;
1379
1380 if (i > 1) {
1381 for (k = n2->start;
1382 k < bset_bkey_last(n2);
1383 k = bkey_next(k)) {
1384 if (__set_blocks(n1, n1->keys + keys +
1385 bkey_u64s(k),
1386 block_bytes(b->c->cache)) > blocks)
1387 break;
1388
1389 last = k;
1390 keys += bkey_u64s(k);
1391 }
1392 } else {
1393 /*
1394 * Last node we're not getting rid of - we're getting
1395 * rid of the node at r[0]. Have to try and fit all of
1396 * the remaining keys into this node; we can't ensure
1397 * they will always fit due to rounding and variable
1398 * length keys (shouldn't be possible in practice,
1399 * though)
1400 */
1401 if (__set_blocks(n1, n1->keys + n2->keys,
1402 block_bytes(b->c->cache)) >
1403 btree_blocks(new_nodes[i]))
1404 goto out_unlock_nocoalesce;
1405
1406 keys = n2->keys;
1407 /* Take the key of the node we're getting rid of */
1408 last = &r->b->key;
1409 }
1410
1411 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1412 btree_blocks(new_nodes[i]));
1413
1414 if (last)
1415 bkey_copy_key(&new_nodes[i]->key, last);
1416
1417 memcpy(bset_bkey_last(n1),
1418 n2->start,
1419 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1420
1421 n1->keys += keys;
1422 r[i].keys = n1->keys;
1423
1424 memmove(n2->start,
1425 bset_bkey_idx(n2, keys),
1426 (void *) bset_bkey_last(n2) -
1427 (void *) bset_bkey_idx(n2, keys));
1428
1429 n2->keys -= keys;
1430
1431 if (__bch_keylist_realloc(&keylist,
1432 bkey_u64s(&new_nodes[i]->key)))
1433 goto out_unlock_nocoalesce;
1434
1435 bch_btree_node_write(new_nodes[i], &cl);
1436 bch_keylist_add(&keylist, &new_nodes[i]->key);
1437 }
1438
1439 for (i = 0; i < nodes; i++)
1440 mutex_unlock(&new_nodes[i]->write_lock);
1441
1442 closure_sync(&cl);
1443
1444 /* We emptied out this node */
1445 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1446 btree_node_free(new_nodes[0]);
1447 rw_unlock(true, new_nodes[0]);
1448 new_nodes[0] = NULL;
1449
1450 for (i = 0; i < nodes; i++) {
1451 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1452 goto out_nocoalesce;
1453
1454 make_btree_freeing_key(r[i].b, keylist.top);
1455 bch_keylist_push(&keylist);
1456 }
1457
1458 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1459 BUG_ON(!bch_keylist_empty(&keylist));
1460
1461 for (i = 0; i < nodes; i++) {
1462 btree_node_free(r[i].b);
1463 rw_unlock(true, r[i].b);
1464
1465 r[i].b = new_nodes[i];
1466 }
1467
1468 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1469 r[nodes - 1].b = ERR_PTR(-EINTR);
1470
1471 trace_bcache_btree_gc_coalesce(nodes);
1472 gc->nodes--;
1473
1474 bch_keylist_free(&keylist);
1475
1476 /* Invalidated our iterator */
1477 return -EINTR;
1478
1479 out_unlock_nocoalesce:
1480 for (i = 0; i < nodes; i++)
1481 mutex_unlock(&new_nodes[i]->write_lock);
1482
1483 out_nocoalesce:
1484 closure_sync(&cl);
1485
1486 while ((k = bch_keylist_pop(&keylist)))
1487 if (!bkey_cmp(k, &ZERO_KEY))
1488 atomic_dec(&b->c->prio_blocked);
1489 bch_keylist_free(&keylist);
1490
1491 for (i = 0; i < nodes; i++)
1492 if (!IS_ERR(new_nodes[i])) {
1493 btree_node_free(new_nodes[i]);
1494 rw_unlock(true, new_nodes[i]);
1495 }
1496 return 0;
1497 }
1498
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1499 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1500 struct btree *replace)
1501 {
1502 struct keylist keys;
1503 struct btree *n;
1504
1505 if (btree_check_reserve(b, NULL))
1506 return 0;
1507
1508 n = btree_node_alloc_replacement(replace, NULL);
1509
1510 /* recheck reserve after allocating replacement node */
1511 if (btree_check_reserve(b, NULL)) {
1512 btree_node_free(n);
1513 rw_unlock(true, n);
1514 return 0;
1515 }
1516
1517 bch_btree_node_write_sync(n);
1518
1519 bch_keylist_init(&keys);
1520 bch_keylist_add(&keys, &n->key);
1521
1522 make_btree_freeing_key(replace, keys.top);
1523 bch_keylist_push(&keys);
1524
1525 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1526 BUG_ON(!bch_keylist_empty(&keys));
1527
1528 btree_node_free(replace);
1529 rw_unlock(true, n);
1530
1531 /* Invalidated our iterator */
1532 return -EINTR;
1533 }
1534
btree_gc_count_keys(struct btree * b)1535 static unsigned int btree_gc_count_keys(struct btree *b)
1536 {
1537 struct bkey *k;
1538 struct btree_iter iter;
1539 unsigned int ret = 0;
1540
1541 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1542 ret += bkey_u64s(k);
1543
1544 return ret;
1545 }
1546
btree_gc_min_nodes(struct cache_set * c)1547 static size_t btree_gc_min_nodes(struct cache_set *c)
1548 {
1549 size_t min_nodes;
1550
1551 /*
1552 * Since incremental GC would stop 100ms when front
1553 * side I/O comes, so when there are many btree nodes,
1554 * if GC only processes constant (100) nodes each time,
1555 * GC would last a long time, and the front side I/Os
1556 * would run out of the buckets (since no new bucket
1557 * can be allocated during GC), and be blocked again.
1558 * So GC should not process constant nodes, but varied
1559 * nodes according to the number of btree nodes, which
1560 * realized by dividing GC into constant(100) times,
1561 * so when there are many btree nodes, GC can process
1562 * more nodes each time, otherwise, GC will process less
1563 * nodes each time (but no less than MIN_GC_NODES)
1564 */
1565 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1566 if (min_nodes < MIN_GC_NODES)
1567 min_nodes = MIN_GC_NODES;
1568
1569 return min_nodes;
1570 }
1571
1572
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1573 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1574 struct closure *writes, struct gc_stat *gc)
1575 {
1576 int ret = 0;
1577 bool should_rewrite;
1578 struct bkey *k;
1579 struct btree_iter iter;
1580 struct gc_merge_info r[GC_MERGE_NODES];
1581 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1582
1583 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1584
1585 for (i = r; i < r + ARRAY_SIZE(r); i++)
1586 i->b = ERR_PTR(-EINTR);
1587
1588 while (1) {
1589 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1590 if (k) {
1591 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1592 true, b);
1593 if (IS_ERR(r->b)) {
1594 ret = PTR_ERR(r->b);
1595 break;
1596 }
1597
1598 r->keys = btree_gc_count_keys(r->b);
1599
1600 ret = btree_gc_coalesce(b, op, gc, r);
1601 if (ret)
1602 break;
1603 }
1604
1605 if (!last->b)
1606 break;
1607
1608 if (!IS_ERR(last->b)) {
1609 should_rewrite = btree_gc_mark_node(last->b, gc);
1610 if (should_rewrite) {
1611 ret = btree_gc_rewrite_node(b, op, last->b);
1612 if (ret)
1613 break;
1614 }
1615
1616 if (last->b->level) {
1617 ret = btree_gc_recurse(last->b, op, writes, gc);
1618 if (ret)
1619 break;
1620 }
1621
1622 bkey_copy_key(&b->c->gc_done, &last->b->key);
1623
1624 /*
1625 * Must flush leaf nodes before gc ends, since replace
1626 * operations aren't journalled
1627 */
1628 mutex_lock(&last->b->write_lock);
1629 if (btree_node_dirty(last->b))
1630 bch_btree_node_write(last->b, writes);
1631 mutex_unlock(&last->b->write_lock);
1632 rw_unlock(true, last->b);
1633 }
1634
1635 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1636 r->b = NULL;
1637
1638 if (atomic_read(&b->c->search_inflight) &&
1639 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1640 gc->nodes_pre = gc->nodes;
1641 ret = -EAGAIN;
1642 break;
1643 }
1644
1645 if (need_resched()) {
1646 ret = -EAGAIN;
1647 break;
1648 }
1649 }
1650
1651 for (i = r; i < r + ARRAY_SIZE(r); i++)
1652 if (!IS_ERR_OR_NULL(i->b)) {
1653 mutex_lock(&i->b->write_lock);
1654 if (btree_node_dirty(i->b))
1655 bch_btree_node_write(i->b, writes);
1656 mutex_unlock(&i->b->write_lock);
1657 rw_unlock(true, i->b);
1658 }
1659
1660 return ret;
1661 }
1662
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1663 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1664 struct closure *writes, struct gc_stat *gc)
1665 {
1666 struct btree *n = NULL;
1667 int ret = 0;
1668 bool should_rewrite;
1669
1670 should_rewrite = btree_gc_mark_node(b, gc);
1671 if (should_rewrite) {
1672 n = btree_node_alloc_replacement(b, NULL);
1673
1674 if (!IS_ERR(n)) {
1675 bch_btree_node_write_sync(n);
1676
1677 bch_btree_set_root(n);
1678 btree_node_free(b);
1679 rw_unlock(true, n);
1680
1681 return -EINTR;
1682 }
1683 }
1684
1685 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1686
1687 if (b->level) {
1688 ret = btree_gc_recurse(b, op, writes, gc);
1689 if (ret)
1690 return ret;
1691 }
1692
1693 bkey_copy_key(&b->c->gc_done, &b->key);
1694
1695 return ret;
1696 }
1697
btree_gc_start(struct cache_set * c)1698 static void btree_gc_start(struct cache_set *c)
1699 {
1700 struct cache *ca;
1701 struct bucket *b;
1702
1703 if (!c->gc_mark_valid)
1704 return;
1705
1706 mutex_lock(&c->bucket_lock);
1707
1708 c->gc_mark_valid = 0;
1709 c->gc_done = ZERO_KEY;
1710
1711 ca = c->cache;
1712 for_each_bucket(b, ca) {
1713 b->last_gc = b->gen;
1714 if (!atomic_read(&b->pin)) {
1715 SET_GC_MARK(b, 0);
1716 SET_GC_SECTORS_USED(b, 0);
1717 }
1718 }
1719
1720 mutex_unlock(&c->bucket_lock);
1721 }
1722
bch_btree_gc_finish(struct cache_set * c)1723 static void bch_btree_gc_finish(struct cache_set *c)
1724 {
1725 struct bucket *b;
1726 struct cache *ca;
1727 unsigned int i, j;
1728 uint64_t *k;
1729
1730 mutex_lock(&c->bucket_lock);
1731
1732 set_gc_sectors(c);
1733 c->gc_mark_valid = 1;
1734 c->need_gc = 0;
1735
1736 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1737 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1738 GC_MARK_METADATA);
1739
1740 /* don't reclaim buckets to which writeback keys point */
1741 rcu_read_lock();
1742 for (i = 0; i < c->devices_max_used; i++) {
1743 struct bcache_device *d = c->devices[i];
1744 struct cached_dev *dc;
1745 struct keybuf_key *w, *n;
1746
1747 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1748 continue;
1749 dc = container_of(d, struct cached_dev, disk);
1750
1751 spin_lock(&dc->writeback_keys.lock);
1752 rbtree_postorder_for_each_entry_safe(w, n,
1753 &dc->writeback_keys.keys, node)
1754 for (j = 0; j < KEY_PTRS(&w->key); j++)
1755 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1756 GC_MARK_DIRTY);
1757 spin_unlock(&dc->writeback_keys.lock);
1758 }
1759 rcu_read_unlock();
1760
1761 c->avail_nbuckets = 0;
1762
1763 ca = c->cache;
1764 ca->invalidate_needs_gc = 0;
1765
1766 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1767 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1768
1769 for (k = ca->prio_buckets;
1770 k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1771 SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1772
1773 for_each_bucket(b, ca) {
1774 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1775
1776 if (atomic_read(&b->pin))
1777 continue;
1778
1779 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1780
1781 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1782 c->avail_nbuckets++;
1783 }
1784
1785 mutex_unlock(&c->bucket_lock);
1786 }
1787
bch_btree_gc(struct cache_set * c)1788 static void bch_btree_gc(struct cache_set *c)
1789 {
1790 int ret;
1791 struct gc_stat stats;
1792 struct closure writes;
1793 struct btree_op op;
1794 uint64_t start_time = local_clock();
1795
1796 trace_bcache_gc_start(c);
1797
1798 memset(&stats, 0, sizeof(struct gc_stat));
1799 closure_init_stack(&writes);
1800 bch_btree_op_init(&op, SHRT_MAX);
1801
1802 btree_gc_start(c);
1803
1804 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1805 do {
1806 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1807 closure_sync(&writes);
1808 cond_resched();
1809
1810 if (ret == -EAGAIN)
1811 schedule_timeout_interruptible(msecs_to_jiffies
1812 (GC_SLEEP_MS));
1813 else if (ret)
1814 pr_warn("gc failed!\n");
1815 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1816
1817 bch_btree_gc_finish(c);
1818 wake_up_allocators(c);
1819
1820 bch_time_stats_update(&c->btree_gc_time, start_time);
1821
1822 stats.key_bytes *= sizeof(uint64_t);
1823 stats.data <<= 9;
1824 bch_update_bucket_in_use(c, &stats);
1825 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1826
1827 trace_bcache_gc_end(c);
1828
1829 bch_moving_gc(c);
1830 }
1831
gc_should_run(struct cache_set * c)1832 static bool gc_should_run(struct cache_set *c)
1833 {
1834 struct cache *ca = c->cache;
1835
1836 if (ca->invalidate_needs_gc)
1837 return true;
1838
1839 if (atomic_read(&c->sectors_to_gc) < 0)
1840 return true;
1841
1842 return false;
1843 }
1844
bch_gc_thread(void * arg)1845 static int bch_gc_thread(void *arg)
1846 {
1847 struct cache_set *c = arg;
1848
1849 while (1) {
1850 wait_event_interruptible(c->gc_wait,
1851 kthread_should_stop() ||
1852 test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1853 gc_should_run(c));
1854
1855 if (kthread_should_stop() ||
1856 test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1857 break;
1858
1859 set_gc_sectors(c);
1860 bch_btree_gc(c);
1861 }
1862
1863 wait_for_kthread_stop();
1864 return 0;
1865 }
1866
bch_gc_thread_start(struct cache_set * c)1867 int bch_gc_thread_start(struct cache_set *c)
1868 {
1869 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1870 return PTR_ERR_OR_ZERO(c->gc_thread);
1871 }
1872
1873 /* Initial partial gc */
1874
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1875 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1876 {
1877 int ret = 0;
1878 struct bkey *k, *p = NULL;
1879 struct btree_iter iter;
1880
1881 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1882 bch_initial_mark_key(b->c, b->level, k);
1883
1884 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1885
1886 if (b->level) {
1887 bch_btree_iter_init(&b->keys, &iter, NULL);
1888
1889 do {
1890 k = bch_btree_iter_next_filter(&iter, &b->keys,
1891 bch_ptr_bad);
1892 if (k) {
1893 btree_node_prefetch(b, k);
1894 /*
1895 * initiallize c->gc_stats.nodes
1896 * for incremental GC
1897 */
1898 b->c->gc_stats.nodes++;
1899 }
1900
1901 if (p)
1902 ret = bcache_btree(check_recurse, p, b, op);
1903
1904 p = k;
1905 } while (p && !ret);
1906 }
1907
1908 return ret;
1909 }
1910
1911
bch_btree_check_thread(void * arg)1912 static int bch_btree_check_thread(void *arg)
1913 {
1914 int ret;
1915 struct btree_check_info *info = arg;
1916 struct btree_check_state *check_state = info->state;
1917 struct cache_set *c = check_state->c;
1918 struct btree_iter iter;
1919 struct bkey *k, *p;
1920 int cur_idx, prev_idx, skip_nr;
1921
1922 k = p = NULL;
1923 cur_idx = prev_idx = 0;
1924 ret = 0;
1925
1926 /* root node keys are checked before thread created */
1927 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1928 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1929 BUG_ON(!k);
1930
1931 p = k;
1932 while (k) {
1933 /*
1934 * Fetch a root node key index, skip the keys which
1935 * should be fetched by other threads, then check the
1936 * sub-tree indexed by the fetched key.
1937 */
1938 spin_lock(&check_state->idx_lock);
1939 cur_idx = check_state->key_idx;
1940 check_state->key_idx++;
1941 spin_unlock(&check_state->idx_lock);
1942
1943 skip_nr = cur_idx - prev_idx;
1944
1945 while (skip_nr) {
1946 k = bch_btree_iter_next_filter(&iter,
1947 &c->root->keys,
1948 bch_ptr_bad);
1949 if (k)
1950 p = k;
1951 else {
1952 /*
1953 * No more keys to check in root node,
1954 * current checking threads are enough,
1955 * stop creating more.
1956 */
1957 atomic_set(&check_state->enough, 1);
1958 /* Update check_state->enough earlier */
1959 smp_mb__after_atomic();
1960 goto out;
1961 }
1962 skip_nr--;
1963 cond_resched();
1964 }
1965
1966 if (p) {
1967 struct btree_op op;
1968
1969 btree_node_prefetch(c->root, p);
1970 c->gc_stats.nodes++;
1971 bch_btree_op_init(&op, 0);
1972 ret = bcache_btree(check_recurse, p, c->root, &op);
1973 /*
1974 * The op may be added to cache_set's btree_cache_wait
1975 * in mca_cannibalize(), must ensure it is removed from
1976 * the list and release btree_cache_alloc_lock before
1977 * free op memory.
1978 * Otherwise, the btree_cache_wait will be damaged.
1979 */
1980 bch_cannibalize_unlock(c);
1981 finish_wait(&c->btree_cache_wait, &(&op)->wait);
1982 if (ret)
1983 goto out;
1984 }
1985 p = NULL;
1986 prev_idx = cur_idx;
1987 cond_resched();
1988 }
1989
1990 out:
1991 info->result = ret;
1992 /* update check_state->started among all CPUs */
1993 smp_mb__before_atomic();
1994 if (atomic_dec_and_test(&check_state->started))
1995 wake_up(&check_state->wait);
1996
1997 return ret;
1998 }
1999
2000
2001
bch_btree_chkthread_nr(void)2002 static int bch_btree_chkthread_nr(void)
2003 {
2004 int n = num_online_cpus()/2;
2005
2006 if (n == 0)
2007 n = 1;
2008 else if (n > BCH_BTR_CHKTHREAD_MAX)
2009 n = BCH_BTR_CHKTHREAD_MAX;
2010
2011 return n;
2012 }
2013
bch_btree_check(struct cache_set * c)2014 int bch_btree_check(struct cache_set *c)
2015 {
2016 int ret = 0;
2017 int i;
2018 struct bkey *k = NULL;
2019 struct btree_iter iter;
2020 struct btree_check_state check_state;
2021
2022 /* check and mark root node keys */
2023 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2024 bch_initial_mark_key(c, c->root->level, k);
2025
2026 bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2027
2028 if (c->root->level == 0)
2029 return 0;
2030
2031 memset(&check_state, 0, sizeof(struct btree_check_state));
2032 check_state.c = c;
2033 check_state.total_threads = bch_btree_chkthread_nr();
2034 check_state.key_idx = 0;
2035 spin_lock_init(&check_state.idx_lock);
2036 atomic_set(&check_state.started, 0);
2037 atomic_set(&check_state.enough, 0);
2038 init_waitqueue_head(&check_state.wait);
2039
2040 rw_lock(0, c->root, c->root->level);
2041 /*
2042 * Run multiple threads to check btree nodes in parallel,
2043 * if check_state.enough is non-zero, it means current
2044 * running check threads are enough, unncessary to create
2045 * more.
2046 */
2047 for (i = 0; i < check_state.total_threads; i++) {
2048 /* fetch latest check_state.enough earlier */
2049 smp_mb__before_atomic();
2050 if (atomic_read(&check_state.enough))
2051 break;
2052
2053 check_state.infos[i].result = 0;
2054 check_state.infos[i].state = &check_state;
2055
2056 check_state.infos[i].thread =
2057 kthread_run(bch_btree_check_thread,
2058 &check_state.infos[i],
2059 "bch_btrchk[%d]", i);
2060 if (IS_ERR(check_state.infos[i].thread)) {
2061 pr_err("fails to run thread bch_btrchk[%d]\n", i);
2062 for (--i; i >= 0; i--)
2063 kthread_stop(check_state.infos[i].thread);
2064 ret = -ENOMEM;
2065 goto out;
2066 }
2067 atomic_inc(&check_state.started);
2068 }
2069
2070 /*
2071 * Must wait for all threads to stop.
2072 */
2073 wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2074
2075 for (i = 0; i < check_state.total_threads; i++) {
2076 if (check_state.infos[i].result) {
2077 ret = check_state.infos[i].result;
2078 goto out;
2079 }
2080 }
2081
2082 out:
2083 rw_unlock(0, c->root);
2084 return ret;
2085 }
2086
bch_initial_gc_finish(struct cache_set * c)2087 void bch_initial_gc_finish(struct cache_set *c)
2088 {
2089 struct cache *ca = c->cache;
2090 struct bucket *b;
2091
2092 bch_btree_gc_finish(c);
2093
2094 mutex_lock(&c->bucket_lock);
2095
2096 /*
2097 * We need to put some unused buckets directly on the prio freelist in
2098 * order to get the allocator thread started - it needs freed buckets in
2099 * order to rewrite the prios and gens, and it needs to rewrite prios
2100 * and gens in order to free buckets.
2101 *
2102 * This is only safe for buckets that have no live data in them, which
2103 * there should always be some of.
2104 */
2105 for_each_bucket(b, ca) {
2106 if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2107 fifo_full(&ca->free[RESERVE_BTREE]))
2108 break;
2109
2110 if (bch_can_invalidate_bucket(ca, b) &&
2111 !GC_MARK(b)) {
2112 __bch_invalidate_one_bucket(ca, b);
2113 if (!fifo_push(&ca->free[RESERVE_PRIO],
2114 b - ca->buckets))
2115 fifo_push(&ca->free[RESERVE_BTREE],
2116 b - ca->buckets);
2117 }
2118 }
2119
2120 mutex_unlock(&c->bucket_lock);
2121 }
2122
2123 /* Btree insertion */
2124
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2125 static bool btree_insert_key(struct btree *b, struct bkey *k,
2126 struct bkey *replace_key)
2127 {
2128 unsigned int status;
2129
2130 BUG_ON(bkey_cmp(k, &b->key) > 0);
2131
2132 status = bch_btree_insert_key(&b->keys, k, replace_key);
2133 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2134 bch_check_keys(&b->keys, "%u for %s", status,
2135 replace_key ? "replace" : "insert");
2136
2137 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2138 status);
2139 return true;
2140 } else
2141 return false;
2142 }
2143
insert_u64s_remaining(struct btree * b)2144 static size_t insert_u64s_remaining(struct btree *b)
2145 {
2146 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2147
2148 /*
2149 * Might land in the middle of an existing extent and have to split it
2150 */
2151 if (b->keys.ops->is_extents)
2152 ret -= KEY_MAX_U64S;
2153
2154 return max(ret, 0L);
2155 }
2156
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2157 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2158 struct keylist *insert_keys,
2159 struct bkey *replace_key)
2160 {
2161 bool ret = false;
2162 int oldsize = bch_count_data(&b->keys);
2163
2164 while (!bch_keylist_empty(insert_keys)) {
2165 struct bkey *k = insert_keys->keys;
2166
2167 if (bkey_u64s(k) > insert_u64s_remaining(b))
2168 break;
2169
2170 if (bkey_cmp(k, &b->key) <= 0) {
2171 if (!b->level)
2172 bkey_put(b->c, k);
2173
2174 ret |= btree_insert_key(b, k, replace_key);
2175 bch_keylist_pop_front(insert_keys);
2176 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2177 BKEY_PADDED(key) temp;
2178 bkey_copy(&temp.key, insert_keys->keys);
2179
2180 bch_cut_back(&b->key, &temp.key);
2181 bch_cut_front(&b->key, insert_keys->keys);
2182
2183 ret |= btree_insert_key(b, &temp.key, replace_key);
2184 break;
2185 } else {
2186 break;
2187 }
2188 }
2189
2190 if (!ret)
2191 op->insert_collision = true;
2192
2193 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2194
2195 BUG_ON(bch_count_data(&b->keys) < oldsize);
2196 return ret;
2197 }
2198
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2199 static int btree_split(struct btree *b, struct btree_op *op,
2200 struct keylist *insert_keys,
2201 struct bkey *replace_key)
2202 {
2203 bool split;
2204 struct btree *n1, *n2 = NULL, *n3 = NULL;
2205 uint64_t start_time = local_clock();
2206 struct closure cl;
2207 struct keylist parent_keys;
2208
2209 closure_init_stack(&cl);
2210 bch_keylist_init(&parent_keys);
2211
2212 if (btree_check_reserve(b, op)) {
2213 if (!b->level)
2214 return -EINTR;
2215 else
2216 WARN(1, "insufficient reserve for split\n");
2217 }
2218
2219 n1 = btree_node_alloc_replacement(b, op);
2220 if (IS_ERR(n1))
2221 goto err;
2222
2223 split = set_blocks(btree_bset_first(n1),
2224 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2225
2226 if (split) {
2227 unsigned int keys = 0;
2228
2229 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2230
2231 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2232 if (IS_ERR(n2))
2233 goto err_free1;
2234
2235 if (!b->parent) {
2236 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2237 if (IS_ERR(n3))
2238 goto err_free2;
2239 }
2240
2241 mutex_lock(&n1->write_lock);
2242 mutex_lock(&n2->write_lock);
2243
2244 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2245
2246 /*
2247 * Has to be a linear search because we don't have an auxiliary
2248 * search tree yet
2249 */
2250
2251 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2252 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2253 keys));
2254
2255 bkey_copy_key(&n1->key,
2256 bset_bkey_idx(btree_bset_first(n1), keys));
2257 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2258
2259 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2260 btree_bset_first(n1)->keys = keys;
2261
2262 memcpy(btree_bset_first(n2)->start,
2263 bset_bkey_last(btree_bset_first(n1)),
2264 btree_bset_first(n2)->keys * sizeof(uint64_t));
2265
2266 bkey_copy_key(&n2->key, &b->key);
2267
2268 bch_keylist_add(&parent_keys, &n2->key);
2269 bch_btree_node_write(n2, &cl);
2270 mutex_unlock(&n2->write_lock);
2271 rw_unlock(true, n2);
2272 } else {
2273 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2274
2275 mutex_lock(&n1->write_lock);
2276 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2277 }
2278
2279 bch_keylist_add(&parent_keys, &n1->key);
2280 bch_btree_node_write(n1, &cl);
2281 mutex_unlock(&n1->write_lock);
2282
2283 if (n3) {
2284 /* Depth increases, make a new root */
2285 mutex_lock(&n3->write_lock);
2286 bkey_copy_key(&n3->key, &MAX_KEY);
2287 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2288 bch_btree_node_write(n3, &cl);
2289 mutex_unlock(&n3->write_lock);
2290
2291 closure_sync(&cl);
2292 bch_btree_set_root(n3);
2293 rw_unlock(true, n3);
2294 } else if (!b->parent) {
2295 /* Root filled up but didn't need to be split */
2296 closure_sync(&cl);
2297 bch_btree_set_root(n1);
2298 } else {
2299 /* Split a non root node */
2300 closure_sync(&cl);
2301 make_btree_freeing_key(b, parent_keys.top);
2302 bch_keylist_push(&parent_keys);
2303
2304 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2305 BUG_ON(!bch_keylist_empty(&parent_keys));
2306 }
2307
2308 btree_node_free(b);
2309 rw_unlock(true, n1);
2310
2311 bch_time_stats_update(&b->c->btree_split_time, start_time);
2312
2313 return 0;
2314 err_free2:
2315 bkey_put(b->c, &n2->key);
2316 btree_node_free(n2);
2317 rw_unlock(true, n2);
2318 err_free1:
2319 bkey_put(b->c, &n1->key);
2320 btree_node_free(n1);
2321 rw_unlock(true, n1);
2322 err:
2323 WARN(1, "bcache: btree split failed (level %u)", b->level);
2324
2325 if (n3 == ERR_PTR(-EAGAIN) ||
2326 n2 == ERR_PTR(-EAGAIN) ||
2327 n1 == ERR_PTR(-EAGAIN))
2328 return -EAGAIN;
2329
2330 return -ENOMEM;
2331 }
2332
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2333 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2334 struct keylist *insert_keys,
2335 atomic_t *journal_ref,
2336 struct bkey *replace_key)
2337 {
2338 struct closure cl;
2339
2340 BUG_ON(b->level && replace_key);
2341
2342 closure_init_stack(&cl);
2343
2344 mutex_lock(&b->write_lock);
2345
2346 if (write_block(b) != btree_bset_last(b) &&
2347 b->keys.last_set_unwritten)
2348 bch_btree_init_next(b); /* just wrote a set */
2349
2350 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2351 mutex_unlock(&b->write_lock);
2352 goto split;
2353 }
2354
2355 BUG_ON(write_block(b) != btree_bset_last(b));
2356
2357 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2358 if (!b->level)
2359 bch_btree_leaf_dirty(b, journal_ref);
2360 else
2361 bch_btree_node_write(b, &cl);
2362 }
2363
2364 mutex_unlock(&b->write_lock);
2365
2366 /* wait for btree node write if necessary, after unlock */
2367 closure_sync(&cl);
2368
2369 return 0;
2370 split:
2371 if (current->bio_list) {
2372 op->lock = b->c->root->level + 1;
2373 return -EAGAIN;
2374 } else if (op->lock <= b->c->root->level) {
2375 op->lock = b->c->root->level + 1;
2376 return -EINTR;
2377 } else {
2378 /* Invalidated all iterators */
2379 int ret = btree_split(b, op, insert_keys, replace_key);
2380
2381 if (bch_keylist_empty(insert_keys))
2382 return 0;
2383 else if (!ret)
2384 return -EINTR;
2385 return ret;
2386 }
2387 }
2388
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2389 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2390 struct bkey *check_key)
2391 {
2392 int ret = -EINTR;
2393 uint64_t btree_ptr = b->key.ptr[0];
2394 unsigned long seq = b->seq;
2395 struct keylist insert;
2396 bool upgrade = op->lock == -1;
2397
2398 bch_keylist_init(&insert);
2399
2400 if (upgrade) {
2401 rw_unlock(false, b);
2402 rw_lock(true, b, b->level);
2403
2404 if (b->key.ptr[0] != btree_ptr ||
2405 b->seq != seq + 1) {
2406 op->lock = b->level;
2407 goto out;
2408 }
2409 }
2410
2411 SET_KEY_PTRS(check_key, 1);
2412 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2413
2414 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2415
2416 bch_keylist_add(&insert, check_key);
2417
2418 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2419
2420 BUG_ON(!ret && !bch_keylist_empty(&insert));
2421 out:
2422 if (upgrade)
2423 downgrade_write(&b->lock);
2424 return ret;
2425 }
2426
2427 struct btree_insert_op {
2428 struct btree_op op;
2429 struct keylist *keys;
2430 atomic_t *journal_ref;
2431 struct bkey *replace_key;
2432 };
2433
btree_insert_fn(struct btree_op * b_op,struct btree * b)2434 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2435 {
2436 struct btree_insert_op *op = container_of(b_op,
2437 struct btree_insert_op, op);
2438
2439 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2440 op->journal_ref, op->replace_key);
2441 if (ret && !bch_keylist_empty(op->keys))
2442 return ret;
2443 else
2444 return MAP_DONE;
2445 }
2446
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2447 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2448 atomic_t *journal_ref, struct bkey *replace_key)
2449 {
2450 struct btree_insert_op op;
2451 int ret = 0;
2452
2453 BUG_ON(current->bio_list);
2454 BUG_ON(bch_keylist_empty(keys));
2455
2456 bch_btree_op_init(&op.op, 0);
2457 op.keys = keys;
2458 op.journal_ref = journal_ref;
2459 op.replace_key = replace_key;
2460
2461 while (!ret && !bch_keylist_empty(keys)) {
2462 op.op.lock = 0;
2463 ret = bch_btree_map_leaf_nodes(&op.op, c,
2464 &START_KEY(keys->keys),
2465 btree_insert_fn);
2466 }
2467
2468 if (ret) {
2469 struct bkey *k;
2470
2471 pr_err("error %i\n", ret);
2472
2473 while ((k = bch_keylist_pop(keys)))
2474 bkey_put(c, k);
2475 } else if (op.op.insert_collision)
2476 ret = -ESRCH;
2477
2478 return ret;
2479 }
2480
bch_btree_set_root(struct btree * b)2481 void bch_btree_set_root(struct btree *b)
2482 {
2483 unsigned int i;
2484 struct closure cl;
2485
2486 closure_init_stack(&cl);
2487
2488 trace_bcache_btree_set_root(b);
2489
2490 BUG_ON(!b->written);
2491
2492 for (i = 0; i < KEY_PTRS(&b->key); i++)
2493 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2494
2495 mutex_lock(&b->c->bucket_lock);
2496 list_del_init(&b->list);
2497 mutex_unlock(&b->c->bucket_lock);
2498
2499 b->c->root = b;
2500
2501 bch_journal_meta(b->c, &cl);
2502 closure_sync(&cl);
2503 }
2504
2505 /* Map across nodes or keys */
2506
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2507 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2508 struct bkey *from,
2509 btree_map_nodes_fn *fn, int flags)
2510 {
2511 int ret = MAP_CONTINUE;
2512
2513 if (b->level) {
2514 struct bkey *k;
2515 struct btree_iter iter;
2516
2517 bch_btree_iter_init(&b->keys, &iter, from);
2518
2519 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2520 bch_ptr_bad))) {
2521 ret = bcache_btree(map_nodes_recurse, k, b,
2522 op, from, fn, flags);
2523 from = NULL;
2524
2525 if (ret != MAP_CONTINUE)
2526 return ret;
2527 }
2528 }
2529
2530 if (!b->level || flags == MAP_ALL_NODES)
2531 ret = fn(op, b);
2532
2533 return ret;
2534 }
2535
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2536 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2537 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2538 {
2539 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2540 }
2541
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2542 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2543 struct bkey *from, btree_map_keys_fn *fn,
2544 int flags)
2545 {
2546 int ret = MAP_CONTINUE;
2547 struct bkey *k;
2548 struct btree_iter iter;
2549
2550 bch_btree_iter_init(&b->keys, &iter, from);
2551
2552 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2553 ret = !b->level
2554 ? fn(op, b, k)
2555 : bcache_btree(map_keys_recurse, k,
2556 b, op, from, fn, flags);
2557 from = NULL;
2558
2559 if (ret != MAP_CONTINUE)
2560 return ret;
2561 }
2562
2563 if (!b->level && (flags & MAP_END_KEY))
2564 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2565 KEY_OFFSET(&b->key), 0));
2566
2567 return ret;
2568 }
2569
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2570 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2571 struct bkey *from, btree_map_keys_fn *fn, int flags)
2572 {
2573 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2574 }
2575
2576 /* Keybuf code */
2577
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2578 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2579 {
2580 /* Overlapping keys compare equal */
2581 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2582 return -1;
2583 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2584 return 1;
2585 return 0;
2586 }
2587
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2588 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2589 struct keybuf_key *r)
2590 {
2591 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2592 }
2593
2594 struct refill {
2595 struct btree_op op;
2596 unsigned int nr_found;
2597 struct keybuf *buf;
2598 struct bkey *end;
2599 keybuf_pred_fn *pred;
2600 };
2601
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2602 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2603 struct bkey *k)
2604 {
2605 struct refill *refill = container_of(op, struct refill, op);
2606 struct keybuf *buf = refill->buf;
2607 int ret = MAP_CONTINUE;
2608
2609 if (bkey_cmp(k, refill->end) > 0) {
2610 ret = MAP_DONE;
2611 goto out;
2612 }
2613
2614 if (!KEY_SIZE(k)) /* end key */
2615 goto out;
2616
2617 if (refill->pred(buf, k)) {
2618 struct keybuf_key *w;
2619
2620 spin_lock(&buf->lock);
2621
2622 w = array_alloc(&buf->freelist);
2623 if (!w) {
2624 spin_unlock(&buf->lock);
2625 return MAP_DONE;
2626 }
2627
2628 w->private = NULL;
2629 bkey_copy(&w->key, k);
2630
2631 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2632 array_free(&buf->freelist, w);
2633 else
2634 refill->nr_found++;
2635
2636 if (array_freelist_empty(&buf->freelist))
2637 ret = MAP_DONE;
2638
2639 spin_unlock(&buf->lock);
2640 }
2641 out:
2642 buf->last_scanned = *k;
2643 return ret;
2644 }
2645
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2646 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2647 struct bkey *end, keybuf_pred_fn *pred)
2648 {
2649 struct bkey start = buf->last_scanned;
2650 struct refill refill;
2651
2652 cond_resched();
2653
2654 bch_btree_op_init(&refill.op, -1);
2655 refill.nr_found = 0;
2656 refill.buf = buf;
2657 refill.end = end;
2658 refill.pred = pred;
2659
2660 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2661 refill_keybuf_fn, MAP_END_KEY);
2662
2663 trace_bcache_keyscan(refill.nr_found,
2664 KEY_INODE(&start), KEY_OFFSET(&start),
2665 KEY_INODE(&buf->last_scanned),
2666 KEY_OFFSET(&buf->last_scanned));
2667
2668 spin_lock(&buf->lock);
2669
2670 if (!RB_EMPTY_ROOT(&buf->keys)) {
2671 struct keybuf_key *w;
2672
2673 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2674 buf->start = START_KEY(&w->key);
2675
2676 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2677 buf->end = w->key;
2678 } else {
2679 buf->start = MAX_KEY;
2680 buf->end = MAX_KEY;
2681 }
2682
2683 spin_unlock(&buf->lock);
2684 }
2685
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2686 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2687 {
2688 rb_erase(&w->node, &buf->keys);
2689 array_free(&buf->freelist, w);
2690 }
2691
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2692 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2693 {
2694 spin_lock(&buf->lock);
2695 __bch_keybuf_del(buf, w);
2696 spin_unlock(&buf->lock);
2697 }
2698
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2699 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2700 struct bkey *end)
2701 {
2702 bool ret = false;
2703 struct keybuf_key *p, *w, s;
2704
2705 s.key = *start;
2706
2707 if (bkey_cmp(end, &buf->start) <= 0 ||
2708 bkey_cmp(start, &buf->end) >= 0)
2709 return false;
2710
2711 spin_lock(&buf->lock);
2712 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2713
2714 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2715 p = w;
2716 w = RB_NEXT(w, node);
2717
2718 if (p->private)
2719 ret = true;
2720 else
2721 __bch_keybuf_del(buf, p);
2722 }
2723
2724 spin_unlock(&buf->lock);
2725 return ret;
2726 }
2727
bch_keybuf_next(struct keybuf * buf)2728 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2729 {
2730 struct keybuf_key *w;
2731
2732 spin_lock(&buf->lock);
2733
2734 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2735
2736 while (w && w->private)
2737 w = RB_NEXT(w, node);
2738
2739 if (w)
2740 w->private = ERR_PTR(-EINTR);
2741
2742 spin_unlock(&buf->lock);
2743 return w;
2744 }
2745
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2746 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2747 struct keybuf *buf,
2748 struct bkey *end,
2749 keybuf_pred_fn *pred)
2750 {
2751 struct keybuf_key *ret;
2752
2753 while (1) {
2754 ret = bch_keybuf_next(buf);
2755 if (ret)
2756 break;
2757
2758 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2759 pr_debug("scan finished\n");
2760 break;
2761 }
2762
2763 bch_refill_keybuf(c, buf, end, pred);
2764 }
2765
2766 return ret;
2767 }
2768
bch_keybuf_init(struct keybuf * buf)2769 void bch_keybuf_init(struct keybuf *buf)
2770 {
2771 buf->last_scanned = MAX_KEY;
2772 buf->keys = RB_ROOT;
2773
2774 spin_lock_init(&buf->lock);
2775 array_allocator_init(&buf->freelist);
2776 }
2777
bch_btree_exit(void)2778 void bch_btree_exit(void)
2779 {
2780 if (btree_io_wq)
2781 destroy_workqueue(btree_io_wq);
2782 }
2783
bch_btree_init(void)2784 int __init bch_btree_init(void)
2785 {
2786 btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2787 if (!btree_io_wq)
2788 return -ENOMEM;
2789
2790 return 0;
2791 }
2792