1 /*
2 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
3 *
4 * Uses a block device as cache for other block devices; optimized for SSDs.
5 * All allocation is done in buckets, which should match the erase block size
6 * of the device.
7 *
8 * Buckets containing cached data are kept on a heap sorted by priority;
9 * bucket priority is increased on cache hit, and periodically all the buckets
10 * on the heap have their priority scaled down. This currently is just used as
11 * an LRU but in the future should allow for more intelligent heuristics.
12 *
13 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
14 * counter. Garbage collection is used to remove stale pointers.
15 *
16 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
17 * as keys are inserted we only sort the pages that have not yet been written.
18 * When garbage collection is run, we resort the entire node.
19 *
20 * All configuration is done via sysfs; see Documentation/bcache.txt.
21 */
22
23 #include "bcache.h"
24 #include "btree.h"
25 #include "debug.h"
26 #include "extents.h"
27
28 #include <linux/slab.h>
29 #include <linux/bitops.h>
30 #include <linux/freezer.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <trace/events/bcache.h>
37
38 /*
39 * Todo:
40 * register_bcache: Return errors out to userspace correctly
41 *
42 * Writeback: don't undirty key until after a cache flush
43 *
44 * Create an iterator for key pointers
45 *
46 * On btree write error, mark bucket such that it won't be freed from the cache
47 *
48 * Journalling:
49 * Check for bad keys in replay
50 * Propagate barriers
51 * Refcount journal entries in journal_replay
52 *
53 * Garbage collection:
54 * Finish incremental gc
55 * Gc should free old UUIDs, data for invalid UUIDs
56 *
57 * Provide a way to list backing device UUIDs we have data cached for, and
58 * probably how long it's been since we've seen them, and a way to invalidate
59 * dirty data for devices that will never be attached again
60 *
61 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
62 * that based on that and how much dirty data we have we can keep writeback
63 * from being starved
64 *
65 * Add a tracepoint or somesuch to watch for writeback starvation
66 *
67 * When btree depth > 1 and splitting an interior node, we have to make sure
68 * alloc_bucket() cannot fail. This should be true but is not completely
69 * obvious.
70 *
71 * Plugging?
72 *
73 * If data write is less than hard sector size of ssd, round up offset in open
74 * bucket to the next whole sector
75 *
76 * Superblock needs to be fleshed out for multiple cache devices
77 *
78 * Add a sysfs tunable for the number of writeback IOs in flight
79 *
80 * Add a sysfs tunable for the number of open data buckets
81 *
82 * IO tracking: Can we track when one process is doing io on behalf of another?
83 * IO tracking: Don't use just an average, weigh more recent stuff higher
84 *
85 * Test module load/unload
86 */
87
88 #define MAX_NEED_GC 64
89 #define MAX_SAVE_PRIO 72
90
91 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
92
93 #define PTR_HASH(c, k) \
94 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
95
96 #define insert_lock(s, b) ((b)->level <= (s)->lock)
97
98 /*
99 * These macros are for recursing down the btree - they handle the details of
100 * locking and looking up nodes in the cache for you. They're best treated as
101 * mere syntax when reading code that uses them.
102 *
103 * op->lock determines whether we take a read or a write lock at a given depth.
104 * If you've got a read lock and find that you need a write lock (i.e. you're
105 * going to have to split), set op->lock and return -EINTR; btree_root() will
106 * call you again and you'll have the correct lock.
107 */
108
109 /**
110 * btree - recurse down the btree on a specified key
111 * @fn: function to call, which will be passed the child node
112 * @key: key to recurse on
113 * @b: parent btree node
114 * @op: pointer to struct btree_op
115 */
116 #define btree(fn, key, b, op, ...) \
117 ({ \
118 int _r, l = (b)->level - 1; \
119 bool _w = l <= (op)->lock; \
120 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
121 _w, b); \
122 if (!IS_ERR(_child)) { \
123 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
124 rw_unlock(_w, _child); \
125 } else \
126 _r = PTR_ERR(_child); \
127 _r; \
128 })
129
130 /**
131 * btree_root - call a function on the root of the btree
132 * @fn: function to call, which will be passed the child node
133 * @c: cache set
134 * @op: pointer to struct btree_op
135 */
136 #define btree_root(fn, c, op, ...) \
137 ({ \
138 int _r = -EINTR; \
139 do { \
140 struct btree *_b = (c)->root; \
141 bool _w = insert_lock(op, _b); \
142 rw_lock(_w, _b, _b->level); \
143 if (_b == (c)->root && \
144 _w == insert_lock(op, _b)) { \
145 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
146 } \
147 rw_unlock(_w, _b); \
148 bch_cannibalize_unlock(c); \
149 if (_r == -EINTR) \
150 schedule(); \
151 } while (_r == -EINTR); \
152 \
153 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
154 _r; \
155 })
156
write_block(struct btree * b)157 static inline struct bset *write_block(struct btree *b)
158 {
159 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
160 }
161
bch_btree_init_next(struct btree * b)162 static void bch_btree_init_next(struct btree *b)
163 {
164 /* If not a leaf node, always sort */
165 if (b->level && b->keys.nsets)
166 bch_btree_sort(&b->keys, &b->c->sort);
167 else
168 bch_btree_sort_lazy(&b->keys, &b->c->sort);
169
170 if (b->written < btree_blocks(b))
171 bch_bset_init_next(&b->keys, write_block(b),
172 bset_magic(&b->c->sb));
173
174 }
175
176 /* Btree key manipulation */
177
bkey_put(struct cache_set * c,struct bkey * k)178 void bkey_put(struct cache_set *c, struct bkey *k)
179 {
180 unsigned i;
181
182 for (i = 0; i < KEY_PTRS(k); i++)
183 if (ptr_available(c, k, i))
184 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
185 }
186
187 /* Btree IO */
188
btree_csum_set(struct btree * b,struct bset * i)189 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
190 {
191 uint64_t crc = b->key.ptr[0];
192 void *data = (void *) i + 8, *end = bset_bkey_last(i);
193
194 crc = bch_crc64_update(crc, data, end - data);
195 return crc ^ 0xffffffffffffffffULL;
196 }
197
bch_btree_node_read_done(struct btree * b)198 void bch_btree_node_read_done(struct btree *b)
199 {
200 const char *err = "bad btree header";
201 struct bset *i = btree_bset_first(b);
202 struct btree_iter *iter;
203
204 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
205 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
206 iter->used = 0;
207
208 #ifdef CONFIG_BCACHE_DEBUG
209 iter->b = &b->keys;
210 #endif
211
212 if (!i->seq)
213 goto err;
214
215 for (;
216 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
217 i = write_block(b)) {
218 err = "unsupported bset version";
219 if (i->version > BCACHE_BSET_VERSION)
220 goto err;
221
222 err = "bad btree header";
223 if (b->written + set_blocks(i, block_bytes(b->c)) >
224 btree_blocks(b))
225 goto err;
226
227 err = "bad magic";
228 if (i->magic != bset_magic(&b->c->sb))
229 goto err;
230
231 err = "bad checksum";
232 switch (i->version) {
233 case 0:
234 if (i->csum != csum_set(i))
235 goto err;
236 break;
237 case BCACHE_BSET_VERSION:
238 if (i->csum != btree_csum_set(b, i))
239 goto err;
240 break;
241 }
242
243 err = "empty set";
244 if (i != b->keys.set[0].data && !i->keys)
245 goto err;
246
247 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
248
249 b->written += set_blocks(i, block_bytes(b->c));
250 }
251
252 err = "corrupted btree";
253 for (i = write_block(b);
254 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
255 i = ((void *) i) + block_bytes(b->c))
256 if (i->seq == b->keys.set[0].data->seq)
257 goto err;
258
259 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
260
261 i = b->keys.set[0].data;
262 err = "short btree key";
263 if (b->keys.set[0].size &&
264 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
265 goto err;
266
267 if (b->written < btree_blocks(b))
268 bch_bset_init_next(&b->keys, write_block(b),
269 bset_magic(&b->c->sb));
270 out:
271 mempool_free(iter, b->c->fill_iter);
272 return;
273 err:
274 set_btree_node_io_error(b);
275 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
276 err, PTR_BUCKET_NR(b->c, &b->key, 0),
277 bset_block_offset(b, i), i->keys);
278 goto out;
279 }
280
btree_node_read_endio(struct bio * bio,int error)281 static void btree_node_read_endio(struct bio *bio, int error)
282 {
283 struct closure *cl = bio->bi_private;
284 closure_put(cl);
285 }
286
bch_btree_node_read(struct btree * b)287 static void bch_btree_node_read(struct btree *b)
288 {
289 uint64_t start_time = local_clock();
290 struct closure cl;
291 struct bio *bio;
292
293 trace_bcache_btree_read(b);
294
295 closure_init_stack(&cl);
296
297 bio = bch_bbio_alloc(b->c);
298 bio->bi_rw = REQ_META|READ_SYNC;
299 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
300 bio->bi_end_io = btree_node_read_endio;
301 bio->bi_private = &cl;
302
303 bch_bio_map(bio, b->keys.set[0].data);
304
305 bch_submit_bbio(bio, b->c, &b->key, 0);
306 closure_sync(&cl);
307
308 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
309 set_btree_node_io_error(b);
310
311 bch_bbio_free(bio, b->c);
312
313 if (btree_node_io_error(b))
314 goto err;
315
316 bch_btree_node_read_done(b);
317 bch_time_stats_update(&b->c->btree_read_time, start_time);
318
319 return;
320 err:
321 bch_cache_set_error(b->c, "io error reading bucket %zu",
322 PTR_BUCKET_NR(b->c, &b->key, 0));
323 }
324
btree_complete_write(struct btree * b,struct btree_write * w)325 static void btree_complete_write(struct btree *b, struct btree_write *w)
326 {
327 if (w->prio_blocked &&
328 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
329 wake_up_allocators(b->c);
330
331 if (w->journal) {
332 atomic_dec_bug(w->journal);
333 __closure_wake_up(&b->c->journal.wait);
334 }
335
336 w->prio_blocked = 0;
337 w->journal = NULL;
338 }
339
btree_node_write_unlock(struct closure * cl)340 static void btree_node_write_unlock(struct closure *cl)
341 {
342 struct btree *b = container_of(cl, struct btree, io);
343
344 up(&b->io_mutex);
345 }
346
__btree_node_write_done(struct closure * cl)347 static void __btree_node_write_done(struct closure *cl)
348 {
349 struct btree *b = container_of(cl, struct btree, io);
350 struct btree_write *w = btree_prev_write(b);
351
352 bch_bbio_free(b->bio, b->c);
353 b->bio = NULL;
354 btree_complete_write(b, w);
355
356 if (btree_node_dirty(b))
357 schedule_delayed_work(&b->work, 30 * HZ);
358
359 closure_return_with_destructor(cl, btree_node_write_unlock);
360 }
361
btree_node_write_done(struct closure * cl)362 static void btree_node_write_done(struct closure *cl)
363 {
364 struct btree *b = container_of(cl, struct btree, io);
365 struct bio_vec *bv;
366 int n;
367
368 bio_for_each_segment_all(bv, b->bio, n)
369 __free_page(bv->bv_page);
370
371 __btree_node_write_done(cl);
372 }
373
btree_node_write_endio(struct bio * bio,int error)374 static void btree_node_write_endio(struct bio *bio, int error)
375 {
376 struct closure *cl = bio->bi_private;
377 struct btree *b = container_of(cl, struct btree, io);
378
379 if (error)
380 set_btree_node_io_error(b);
381
382 bch_bbio_count_io_errors(b->c, bio, error, "writing btree");
383 closure_put(cl);
384 }
385
do_btree_node_write(struct btree * b)386 static void do_btree_node_write(struct btree *b)
387 {
388 struct closure *cl = &b->io;
389 struct bset *i = btree_bset_last(b);
390 BKEY_PADDED(key) k;
391
392 i->version = BCACHE_BSET_VERSION;
393 i->csum = btree_csum_set(b, i);
394
395 BUG_ON(b->bio);
396 b->bio = bch_bbio_alloc(b->c);
397
398 b->bio->bi_end_io = btree_node_write_endio;
399 b->bio->bi_private = cl;
400 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
401 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
402 bch_bio_map(b->bio, i);
403
404 /*
405 * If we're appending to a leaf node, we don't technically need FUA -
406 * this write just needs to be persisted before the next journal write,
407 * which will be marked FLUSH|FUA.
408 *
409 * Similarly if we're writing a new btree root - the pointer is going to
410 * be in the next journal entry.
411 *
412 * But if we're writing a new btree node (that isn't a root) or
413 * appending to a non leaf btree node, we need either FUA or a flush
414 * when we write the parent with the new pointer. FUA is cheaper than a
415 * flush, and writes appending to leaf nodes aren't blocking anything so
416 * just make all btree node writes FUA to keep things sane.
417 */
418
419 bkey_copy(&k.key, &b->key);
420 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
421 bset_sector_offset(&b->keys, i));
422
423 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
424 int j;
425 struct bio_vec *bv;
426 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
427
428 bio_for_each_segment_all(bv, b->bio, j)
429 memcpy(page_address(bv->bv_page),
430 base + j * PAGE_SIZE, PAGE_SIZE);
431
432 bch_submit_bbio(b->bio, b->c, &k.key, 0);
433
434 continue_at(cl, btree_node_write_done, NULL);
435 } else {
436 b->bio->bi_vcnt = 0;
437 bch_bio_map(b->bio, i);
438
439 bch_submit_bbio(b->bio, b->c, &k.key, 0);
440
441 closure_sync(cl);
442 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
443 }
444 }
445
__bch_btree_node_write(struct btree * b,struct closure * parent)446 void __bch_btree_node_write(struct btree *b, struct closure *parent)
447 {
448 struct bset *i = btree_bset_last(b);
449
450 lockdep_assert_held(&b->write_lock);
451
452 trace_bcache_btree_write(b);
453
454 BUG_ON(current->bio_list);
455 BUG_ON(b->written >= btree_blocks(b));
456 BUG_ON(b->written && !i->keys);
457 BUG_ON(btree_bset_first(b)->seq != i->seq);
458 bch_check_keys(&b->keys, "writing");
459
460 cancel_delayed_work(&b->work);
461
462 /* If caller isn't waiting for write, parent refcount is cache set */
463 down(&b->io_mutex);
464 closure_init(&b->io, parent ?: &b->c->cl);
465
466 clear_bit(BTREE_NODE_dirty, &b->flags);
467 change_bit(BTREE_NODE_write_idx, &b->flags);
468
469 do_btree_node_write(b);
470
471 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
472 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
473
474 b->written += set_blocks(i, block_bytes(b->c));
475 }
476
bch_btree_node_write(struct btree * b,struct closure * parent)477 void bch_btree_node_write(struct btree *b, struct closure *parent)
478 {
479 unsigned nsets = b->keys.nsets;
480
481 lockdep_assert_held(&b->lock);
482
483 __bch_btree_node_write(b, parent);
484
485 /*
486 * do verify if there was more than one set initially (i.e. we did a
487 * sort) and we sorted down to a single set:
488 */
489 if (nsets && !b->keys.nsets)
490 bch_btree_verify(b);
491
492 bch_btree_init_next(b);
493 }
494
bch_btree_node_write_sync(struct btree * b)495 static void bch_btree_node_write_sync(struct btree *b)
496 {
497 struct closure cl;
498
499 closure_init_stack(&cl);
500
501 mutex_lock(&b->write_lock);
502 bch_btree_node_write(b, &cl);
503 mutex_unlock(&b->write_lock);
504
505 closure_sync(&cl);
506 }
507
btree_node_write_work(struct work_struct * w)508 static void btree_node_write_work(struct work_struct *w)
509 {
510 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
511
512 mutex_lock(&b->write_lock);
513 if (btree_node_dirty(b))
514 __bch_btree_node_write(b, NULL);
515 mutex_unlock(&b->write_lock);
516 }
517
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)518 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
519 {
520 struct bset *i = btree_bset_last(b);
521 struct btree_write *w = btree_current_write(b);
522
523 lockdep_assert_held(&b->write_lock);
524
525 BUG_ON(!b->written);
526 BUG_ON(!i->keys);
527
528 if (!btree_node_dirty(b))
529 schedule_delayed_work(&b->work, 30 * HZ);
530
531 set_btree_node_dirty(b);
532
533 if (journal_ref) {
534 if (w->journal &&
535 journal_pin_cmp(b->c, w->journal, journal_ref)) {
536 atomic_dec_bug(w->journal);
537 w->journal = NULL;
538 }
539
540 if (!w->journal) {
541 w->journal = journal_ref;
542 atomic_inc(w->journal);
543 }
544 }
545
546 /* Force write if set is too big */
547 if (set_bytes(i) > PAGE_SIZE - 48 &&
548 !current->bio_list)
549 bch_btree_node_write(b, NULL);
550 }
551
552 /*
553 * Btree in memory cache - allocation/freeing
554 * mca -> memory cache
555 */
556
557 #define mca_reserve(c) (((c->root && c->root->level) \
558 ? c->root->level : 1) * 8 + 16)
559 #define mca_can_free(c) \
560 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
561
mca_data_free(struct btree * b)562 static void mca_data_free(struct btree *b)
563 {
564 BUG_ON(b->io_mutex.count != 1);
565
566 bch_btree_keys_free(&b->keys);
567
568 b->c->btree_cache_used--;
569 list_move(&b->list, &b->c->btree_cache_freed);
570 }
571
mca_bucket_free(struct btree * b)572 static void mca_bucket_free(struct btree *b)
573 {
574 BUG_ON(btree_node_dirty(b));
575
576 b->key.ptr[0] = 0;
577 hlist_del_init_rcu(&b->hash);
578 list_move(&b->list, &b->c->btree_cache_freeable);
579 }
580
btree_order(struct bkey * k)581 static unsigned btree_order(struct bkey *k)
582 {
583 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
584 }
585
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)586 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
587 {
588 if (!bch_btree_keys_alloc(&b->keys,
589 max_t(unsigned,
590 ilog2(b->c->btree_pages),
591 btree_order(k)),
592 gfp)) {
593 b->c->btree_cache_used++;
594 list_move(&b->list, &b->c->btree_cache);
595 } else {
596 list_move(&b->list, &b->c->btree_cache_freed);
597 }
598 }
599
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)600 static struct btree *mca_bucket_alloc(struct cache_set *c,
601 struct bkey *k, gfp_t gfp)
602 {
603 struct btree *b = kzalloc(sizeof(struct btree), gfp);
604 if (!b)
605 return NULL;
606
607 init_rwsem(&b->lock);
608 lockdep_set_novalidate_class(&b->lock);
609 mutex_init(&b->write_lock);
610 lockdep_set_novalidate_class(&b->write_lock);
611 INIT_LIST_HEAD(&b->list);
612 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
613 b->c = c;
614 sema_init(&b->io_mutex, 1);
615
616 mca_data_alloc(b, k, gfp);
617 return b;
618 }
619
mca_reap(struct btree * b,unsigned min_order,bool flush)620 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
621 {
622 struct closure cl;
623
624 closure_init_stack(&cl);
625 lockdep_assert_held(&b->c->bucket_lock);
626
627 if (!down_write_trylock(&b->lock))
628 return -ENOMEM;
629
630 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
631
632 if (b->keys.page_order < min_order)
633 goto out_unlock;
634
635 if (!flush) {
636 if (btree_node_dirty(b))
637 goto out_unlock;
638
639 if (down_trylock(&b->io_mutex))
640 goto out_unlock;
641 up(&b->io_mutex);
642 }
643
644 mutex_lock(&b->write_lock);
645 if (btree_node_dirty(b))
646 __bch_btree_node_write(b, &cl);
647 mutex_unlock(&b->write_lock);
648
649 closure_sync(&cl);
650
651 /* wait for any in flight btree write */
652 down(&b->io_mutex);
653 up(&b->io_mutex);
654
655 return 0;
656 out_unlock:
657 rw_unlock(true, b);
658 return -ENOMEM;
659 }
660
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)661 static unsigned long bch_mca_scan(struct shrinker *shrink,
662 struct shrink_control *sc)
663 {
664 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
665 struct btree *b, *t;
666 unsigned long i, nr = sc->nr_to_scan;
667 unsigned long freed = 0;
668
669 if (c->shrinker_disabled)
670 return SHRINK_STOP;
671
672 if (c->btree_cache_alloc_lock)
673 return SHRINK_STOP;
674
675 /* Return -1 if we can't do anything right now */
676 if (sc->gfp_mask & __GFP_IO)
677 mutex_lock(&c->bucket_lock);
678 else if (!mutex_trylock(&c->bucket_lock))
679 return -1;
680
681 /*
682 * It's _really_ critical that we don't free too many btree nodes - we
683 * have to always leave ourselves a reserve. The reserve is how we
684 * guarantee that allocating memory for a new btree node can always
685 * succeed, so that inserting keys into the btree can always succeed and
686 * IO can always make forward progress:
687 */
688 nr /= c->btree_pages;
689 nr = min_t(unsigned long, nr, mca_can_free(c));
690
691 i = 0;
692 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
693 if (freed >= nr)
694 break;
695
696 if (++i > 3 &&
697 !mca_reap(b, 0, false)) {
698 mca_data_free(b);
699 rw_unlock(true, b);
700 freed++;
701 }
702 }
703
704 for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
705 if (list_empty(&c->btree_cache))
706 goto out;
707
708 b = list_first_entry(&c->btree_cache, struct btree, list);
709 list_rotate_left(&c->btree_cache);
710
711 if (!b->accessed &&
712 !mca_reap(b, 0, false)) {
713 mca_bucket_free(b);
714 mca_data_free(b);
715 rw_unlock(true, b);
716 freed++;
717 } else
718 b->accessed = 0;
719 }
720 out:
721 mutex_unlock(&c->bucket_lock);
722 return freed;
723 }
724
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)725 static unsigned long bch_mca_count(struct shrinker *shrink,
726 struct shrink_control *sc)
727 {
728 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
729
730 if (c->shrinker_disabled)
731 return 0;
732
733 if (c->btree_cache_alloc_lock)
734 return 0;
735
736 return mca_can_free(c) * c->btree_pages;
737 }
738
bch_btree_cache_free(struct cache_set * c)739 void bch_btree_cache_free(struct cache_set *c)
740 {
741 struct btree *b;
742 struct closure cl;
743 closure_init_stack(&cl);
744
745 if (c->shrink.list.next)
746 unregister_shrinker(&c->shrink);
747
748 mutex_lock(&c->bucket_lock);
749
750 #ifdef CONFIG_BCACHE_DEBUG
751 if (c->verify_data)
752 list_move(&c->verify_data->list, &c->btree_cache);
753
754 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
755 #endif
756
757 list_splice(&c->btree_cache_freeable,
758 &c->btree_cache);
759
760 while (!list_empty(&c->btree_cache)) {
761 b = list_first_entry(&c->btree_cache, struct btree, list);
762
763 if (btree_node_dirty(b))
764 btree_complete_write(b, btree_current_write(b));
765 clear_bit(BTREE_NODE_dirty, &b->flags);
766
767 mca_data_free(b);
768 }
769
770 while (!list_empty(&c->btree_cache_freed)) {
771 b = list_first_entry(&c->btree_cache_freed,
772 struct btree, list);
773 list_del(&b->list);
774 cancel_delayed_work_sync(&b->work);
775 kfree(b);
776 }
777
778 mutex_unlock(&c->bucket_lock);
779 }
780
bch_btree_cache_alloc(struct cache_set * c)781 int bch_btree_cache_alloc(struct cache_set *c)
782 {
783 unsigned i;
784
785 for (i = 0; i < mca_reserve(c); i++)
786 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
787 return -ENOMEM;
788
789 list_splice_init(&c->btree_cache,
790 &c->btree_cache_freeable);
791
792 #ifdef CONFIG_BCACHE_DEBUG
793 mutex_init(&c->verify_lock);
794
795 c->verify_ondisk = (void *)
796 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
797
798 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
799
800 if (c->verify_data &&
801 c->verify_data->keys.set->data)
802 list_del_init(&c->verify_data->list);
803 else
804 c->verify_data = NULL;
805 #endif
806
807 c->shrink.count_objects = bch_mca_count;
808 c->shrink.scan_objects = bch_mca_scan;
809 c->shrink.seeks = 4;
810 c->shrink.batch = c->btree_pages * 2;
811
812 if (register_shrinker(&c->shrink))
813 pr_warn("bcache: %s: could not register shrinker",
814 __func__);
815
816 return 0;
817 }
818
819 /* Btree in memory cache - hash table */
820
mca_hash(struct cache_set * c,struct bkey * k)821 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
822 {
823 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
824 }
825
mca_find(struct cache_set * c,struct bkey * k)826 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
827 {
828 struct btree *b;
829
830 rcu_read_lock();
831 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
832 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
833 goto out;
834 b = NULL;
835 out:
836 rcu_read_unlock();
837 return b;
838 }
839
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)840 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
841 {
842 struct task_struct *old;
843
844 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
845 if (old && old != current) {
846 if (op)
847 prepare_to_wait(&c->btree_cache_wait, &op->wait,
848 TASK_UNINTERRUPTIBLE);
849 return -EINTR;
850 }
851
852 return 0;
853 }
854
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)855 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
856 struct bkey *k)
857 {
858 struct btree *b;
859
860 trace_bcache_btree_cache_cannibalize(c);
861
862 if (mca_cannibalize_lock(c, op))
863 return ERR_PTR(-EINTR);
864
865 list_for_each_entry_reverse(b, &c->btree_cache, list)
866 if (!mca_reap(b, btree_order(k), false))
867 return b;
868
869 list_for_each_entry_reverse(b, &c->btree_cache, list)
870 if (!mca_reap(b, btree_order(k), true))
871 return b;
872
873 WARN(1, "btree cache cannibalize failed\n");
874 return ERR_PTR(-ENOMEM);
875 }
876
877 /*
878 * We can only have one thread cannibalizing other cached btree nodes at a time,
879 * or we'll deadlock. We use an open coded mutex to ensure that, which a
880 * cannibalize_bucket() will take. This means every time we unlock the root of
881 * the btree, we need to release this lock if we have it held.
882 */
bch_cannibalize_unlock(struct cache_set * c)883 static void bch_cannibalize_unlock(struct cache_set *c)
884 {
885 if (c->btree_cache_alloc_lock == current) {
886 c->btree_cache_alloc_lock = NULL;
887 wake_up(&c->btree_cache_wait);
888 }
889 }
890
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)891 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
892 struct bkey *k, int level)
893 {
894 struct btree *b;
895
896 BUG_ON(current->bio_list);
897
898 lockdep_assert_held(&c->bucket_lock);
899
900 if (mca_find(c, k))
901 return NULL;
902
903 /* btree_free() doesn't free memory; it sticks the node on the end of
904 * the list. Check if there's any freed nodes there:
905 */
906 list_for_each_entry(b, &c->btree_cache_freeable, list)
907 if (!mca_reap(b, btree_order(k), false))
908 goto out;
909
910 /* We never free struct btree itself, just the memory that holds the on
911 * disk node. Check the freed list before allocating a new one:
912 */
913 list_for_each_entry(b, &c->btree_cache_freed, list)
914 if (!mca_reap(b, 0, false)) {
915 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
916 if (!b->keys.set[0].data)
917 goto err;
918 else
919 goto out;
920 }
921
922 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
923 if (!b)
924 goto err;
925
926 BUG_ON(!down_write_trylock(&b->lock));
927 if (!b->keys.set->data)
928 goto err;
929 out:
930 BUG_ON(b->io_mutex.count != 1);
931
932 bkey_copy(&b->key, k);
933 list_move(&b->list, &c->btree_cache);
934 hlist_del_init_rcu(&b->hash);
935 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
936
937 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
938 b->parent = (void *) ~0UL;
939 b->flags = 0;
940 b->written = 0;
941 b->level = level;
942
943 if (!b->level)
944 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
945 &b->c->expensive_debug_checks);
946 else
947 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
948 &b->c->expensive_debug_checks);
949
950 return b;
951 err:
952 if (b)
953 rw_unlock(true, b);
954
955 b = mca_cannibalize(c, op, k);
956 if (!IS_ERR(b))
957 goto out;
958
959 return b;
960 }
961
962 /**
963 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
964 * in from disk if necessary.
965 *
966 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
967 *
968 * The btree node will have either a read or a write lock held, depending on
969 * level and op->lock.
970 */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)971 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
972 struct bkey *k, int level, bool write,
973 struct btree *parent)
974 {
975 int i = 0;
976 struct btree *b;
977
978 BUG_ON(level < 0);
979 retry:
980 b = mca_find(c, k);
981
982 if (!b) {
983 if (current->bio_list)
984 return ERR_PTR(-EAGAIN);
985
986 mutex_lock(&c->bucket_lock);
987 b = mca_alloc(c, op, k, level);
988 mutex_unlock(&c->bucket_lock);
989
990 if (!b)
991 goto retry;
992 if (IS_ERR(b))
993 return b;
994
995 bch_btree_node_read(b);
996
997 if (!write)
998 downgrade_write(&b->lock);
999 } else {
1000 rw_lock(write, b, level);
1001 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1002 rw_unlock(write, b);
1003 goto retry;
1004 }
1005 BUG_ON(b->level != level);
1006 }
1007
1008 b->parent = parent;
1009 b->accessed = 1;
1010
1011 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1012 prefetch(b->keys.set[i].tree);
1013 prefetch(b->keys.set[i].data);
1014 }
1015
1016 for (; i <= b->keys.nsets; i++)
1017 prefetch(b->keys.set[i].data);
1018
1019 if (btree_node_io_error(b)) {
1020 rw_unlock(write, b);
1021 return ERR_PTR(-EIO);
1022 }
1023
1024 BUG_ON(!b->written);
1025
1026 return b;
1027 }
1028
btree_node_prefetch(struct btree * parent,struct bkey * k)1029 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1030 {
1031 struct btree *b;
1032
1033 mutex_lock(&parent->c->bucket_lock);
1034 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1035 mutex_unlock(&parent->c->bucket_lock);
1036
1037 if (!IS_ERR_OR_NULL(b)) {
1038 b->parent = parent;
1039 bch_btree_node_read(b);
1040 rw_unlock(true, b);
1041 }
1042 }
1043
1044 /* Btree alloc */
1045
btree_node_free(struct btree * b)1046 static void btree_node_free(struct btree *b)
1047 {
1048 trace_bcache_btree_node_free(b);
1049
1050 BUG_ON(b == b->c->root);
1051
1052 mutex_lock(&b->write_lock);
1053
1054 if (btree_node_dirty(b))
1055 btree_complete_write(b, btree_current_write(b));
1056 clear_bit(BTREE_NODE_dirty, &b->flags);
1057
1058 mutex_unlock(&b->write_lock);
1059
1060 cancel_delayed_work(&b->work);
1061
1062 mutex_lock(&b->c->bucket_lock);
1063 bch_bucket_free(b->c, &b->key);
1064 mca_bucket_free(b);
1065 mutex_unlock(&b->c->bucket_lock);
1066 }
1067
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1068 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1069 int level, bool wait,
1070 struct btree *parent)
1071 {
1072 BKEY_PADDED(key) k;
1073 struct btree *b = ERR_PTR(-EAGAIN);
1074
1075 mutex_lock(&c->bucket_lock);
1076 retry:
1077 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1078 goto err;
1079
1080 bkey_put(c, &k.key);
1081 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1082
1083 b = mca_alloc(c, op, &k.key, level);
1084 if (IS_ERR(b))
1085 goto err_free;
1086
1087 if (!b) {
1088 cache_bug(c,
1089 "Tried to allocate bucket that was in btree cache");
1090 goto retry;
1091 }
1092
1093 b->accessed = 1;
1094 b->parent = parent;
1095 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1096
1097 mutex_unlock(&c->bucket_lock);
1098
1099 trace_bcache_btree_node_alloc(b);
1100 return b;
1101 err_free:
1102 bch_bucket_free(c, &k.key);
1103 err:
1104 mutex_unlock(&c->bucket_lock);
1105
1106 trace_bcache_btree_node_alloc_fail(c);
1107 return b;
1108 }
1109
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1110 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1111 struct btree_op *op, int level,
1112 struct btree *parent)
1113 {
1114 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1115 }
1116
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1117 static struct btree *btree_node_alloc_replacement(struct btree *b,
1118 struct btree_op *op)
1119 {
1120 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1121 if (!IS_ERR_OR_NULL(n)) {
1122 mutex_lock(&n->write_lock);
1123 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1124 bkey_copy_key(&n->key, &b->key);
1125 mutex_unlock(&n->write_lock);
1126 }
1127
1128 return n;
1129 }
1130
make_btree_freeing_key(struct btree * b,struct bkey * k)1131 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1132 {
1133 unsigned i;
1134
1135 mutex_lock(&b->c->bucket_lock);
1136
1137 atomic_inc(&b->c->prio_blocked);
1138
1139 bkey_copy(k, &b->key);
1140 bkey_copy_key(k, &ZERO_KEY);
1141
1142 for (i = 0; i < KEY_PTRS(k); i++)
1143 SET_PTR_GEN(k, i,
1144 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1145 PTR_BUCKET(b->c, &b->key, i)));
1146
1147 mutex_unlock(&b->c->bucket_lock);
1148 }
1149
btree_check_reserve(struct btree * b,struct btree_op * op)1150 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1151 {
1152 struct cache_set *c = b->c;
1153 struct cache *ca;
1154 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1155
1156 mutex_lock(&c->bucket_lock);
1157
1158 for_each_cache(ca, c, i)
1159 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1160 if (op)
1161 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1162 TASK_UNINTERRUPTIBLE);
1163 mutex_unlock(&c->bucket_lock);
1164 return -EINTR;
1165 }
1166
1167 mutex_unlock(&c->bucket_lock);
1168
1169 return mca_cannibalize_lock(b->c, op);
1170 }
1171
1172 /* Garbage collection */
1173
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1174 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1175 struct bkey *k)
1176 {
1177 uint8_t stale = 0;
1178 unsigned i;
1179 struct bucket *g;
1180
1181 /*
1182 * ptr_invalid() can't return true for the keys that mark btree nodes as
1183 * freed, but since ptr_bad() returns true we'll never actually use them
1184 * for anything and thus we don't want mark their pointers here
1185 */
1186 if (!bkey_cmp(k, &ZERO_KEY))
1187 return stale;
1188
1189 for (i = 0; i < KEY_PTRS(k); i++) {
1190 if (!ptr_available(c, k, i))
1191 continue;
1192
1193 g = PTR_BUCKET(c, k, i);
1194
1195 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1196 g->last_gc = PTR_GEN(k, i);
1197
1198 if (ptr_stale(c, k, i)) {
1199 stale = max(stale, ptr_stale(c, k, i));
1200 continue;
1201 }
1202
1203 cache_bug_on(GC_MARK(g) &&
1204 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1205 c, "inconsistent ptrs: mark = %llu, level = %i",
1206 GC_MARK(g), level);
1207
1208 if (level)
1209 SET_GC_MARK(g, GC_MARK_METADATA);
1210 else if (KEY_DIRTY(k))
1211 SET_GC_MARK(g, GC_MARK_DIRTY);
1212 else if (!GC_MARK(g))
1213 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1214
1215 /* guard against overflow */
1216 SET_GC_SECTORS_USED(g, min_t(unsigned,
1217 GC_SECTORS_USED(g) + KEY_SIZE(k),
1218 MAX_GC_SECTORS_USED));
1219
1220 BUG_ON(!GC_SECTORS_USED(g));
1221 }
1222
1223 return stale;
1224 }
1225
1226 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1227
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1228 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1229 {
1230 unsigned i;
1231
1232 for (i = 0; i < KEY_PTRS(k); i++)
1233 if (ptr_available(c, k, i) &&
1234 !ptr_stale(c, k, i)) {
1235 struct bucket *b = PTR_BUCKET(c, k, i);
1236
1237 b->gen = PTR_GEN(k, i);
1238
1239 if (level && bkey_cmp(k, &ZERO_KEY))
1240 b->prio = BTREE_PRIO;
1241 else if (!level && b->prio == BTREE_PRIO)
1242 b->prio = INITIAL_PRIO;
1243 }
1244
1245 __bch_btree_mark_key(c, level, k);
1246 }
1247
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1248 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1249 {
1250 uint8_t stale = 0;
1251 unsigned keys = 0, good_keys = 0;
1252 struct bkey *k;
1253 struct btree_iter iter;
1254 struct bset_tree *t;
1255
1256 gc->nodes++;
1257
1258 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1259 stale = max(stale, btree_mark_key(b, k));
1260 keys++;
1261
1262 if (bch_ptr_bad(&b->keys, k))
1263 continue;
1264
1265 gc->key_bytes += bkey_u64s(k);
1266 gc->nkeys++;
1267 good_keys++;
1268
1269 gc->data += KEY_SIZE(k);
1270 }
1271
1272 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1273 btree_bug_on(t->size &&
1274 bset_written(&b->keys, t) &&
1275 bkey_cmp(&b->key, &t->end) < 0,
1276 b, "found short btree key in gc");
1277
1278 if (b->c->gc_always_rewrite)
1279 return true;
1280
1281 if (stale > 10)
1282 return true;
1283
1284 if ((keys - good_keys) * 2 > keys)
1285 return true;
1286
1287 return false;
1288 }
1289
1290 #define GC_MERGE_NODES 4U
1291
1292 struct gc_merge_info {
1293 struct btree *b;
1294 unsigned keys;
1295 };
1296
1297 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1298 struct keylist *, atomic_t *, struct bkey *);
1299
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1300 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1301 struct gc_stat *gc, struct gc_merge_info *r)
1302 {
1303 unsigned i, nodes = 0, keys = 0, blocks;
1304 struct btree *new_nodes[GC_MERGE_NODES];
1305 struct keylist keylist;
1306 struct closure cl;
1307 struct bkey *k;
1308
1309 bch_keylist_init(&keylist);
1310
1311 if (btree_check_reserve(b, NULL))
1312 return 0;
1313
1314 memset(new_nodes, 0, sizeof(new_nodes));
1315 closure_init_stack(&cl);
1316
1317 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1318 keys += r[nodes++].keys;
1319
1320 blocks = btree_default_blocks(b->c) * 2 / 3;
1321
1322 if (nodes < 2 ||
1323 __set_blocks(b->keys.set[0].data, keys,
1324 block_bytes(b->c)) > blocks * (nodes - 1))
1325 return 0;
1326
1327 for (i = 0; i < nodes; i++) {
1328 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1329 if (IS_ERR_OR_NULL(new_nodes[i]))
1330 goto out_nocoalesce;
1331 }
1332
1333 /*
1334 * We have to check the reserve here, after we've allocated our new
1335 * nodes, to make sure the insert below will succeed - we also check
1336 * before as an optimization to potentially avoid a bunch of expensive
1337 * allocs/sorts
1338 */
1339 if (btree_check_reserve(b, NULL))
1340 goto out_nocoalesce;
1341
1342 for (i = 0; i < nodes; i++)
1343 mutex_lock(&new_nodes[i]->write_lock);
1344
1345 for (i = nodes - 1; i > 0; --i) {
1346 struct bset *n1 = btree_bset_first(new_nodes[i]);
1347 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1348 struct bkey *k, *last = NULL;
1349
1350 keys = 0;
1351
1352 if (i > 1) {
1353 for (k = n2->start;
1354 k < bset_bkey_last(n2);
1355 k = bkey_next(k)) {
1356 if (__set_blocks(n1, n1->keys + keys +
1357 bkey_u64s(k),
1358 block_bytes(b->c)) > blocks)
1359 break;
1360
1361 last = k;
1362 keys += bkey_u64s(k);
1363 }
1364 } else {
1365 /*
1366 * Last node we're not getting rid of - we're getting
1367 * rid of the node at r[0]. Have to try and fit all of
1368 * the remaining keys into this node; we can't ensure
1369 * they will always fit due to rounding and variable
1370 * length keys (shouldn't be possible in practice,
1371 * though)
1372 */
1373 if (__set_blocks(n1, n1->keys + n2->keys,
1374 block_bytes(b->c)) >
1375 btree_blocks(new_nodes[i]))
1376 goto out_nocoalesce;
1377
1378 keys = n2->keys;
1379 /* Take the key of the node we're getting rid of */
1380 last = &r->b->key;
1381 }
1382
1383 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1384 btree_blocks(new_nodes[i]));
1385
1386 if (last)
1387 bkey_copy_key(&new_nodes[i]->key, last);
1388
1389 memcpy(bset_bkey_last(n1),
1390 n2->start,
1391 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1392
1393 n1->keys += keys;
1394 r[i].keys = n1->keys;
1395
1396 memmove(n2->start,
1397 bset_bkey_idx(n2, keys),
1398 (void *) bset_bkey_last(n2) -
1399 (void *) bset_bkey_idx(n2, keys));
1400
1401 n2->keys -= keys;
1402
1403 if (__bch_keylist_realloc(&keylist,
1404 bkey_u64s(&new_nodes[i]->key)))
1405 goto out_nocoalesce;
1406
1407 bch_btree_node_write(new_nodes[i], &cl);
1408 bch_keylist_add(&keylist, &new_nodes[i]->key);
1409 }
1410
1411 for (i = 0; i < nodes; i++)
1412 mutex_unlock(&new_nodes[i]->write_lock);
1413
1414 closure_sync(&cl);
1415
1416 /* We emptied out this node */
1417 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1418 btree_node_free(new_nodes[0]);
1419 rw_unlock(true, new_nodes[0]);
1420 new_nodes[0] = NULL;
1421
1422 for (i = 0; i < nodes; i++) {
1423 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1424 goto out_nocoalesce;
1425
1426 make_btree_freeing_key(r[i].b, keylist.top);
1427 bch_keylist_push(&keylist);
1428 }
1429
1430 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1431 BUG_ON(!bch_keylist_empty(&keylist));
1432
1433 for (i = 0; i < nodes; i++) {
1434 btree_node_free(r[i].b);
1435 rw_unlock(true, r[i].b);
1436
1437 r[i].b = new_nodes[i];
1438 }
1439
1440 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1441 r[nodes - 1].b = ERR_PTR(-EINTR);
1442
1443 trace_bcache_btree_gc_coalesce(nodes);
1444 gc->nodes--;
1445
1446 bch_keylist_free(&keylist);
1447
1448 /* Invalidated our iterator */
1449 return -EINTR;
1450
1451 out_nocoalesce:
1452 closure_sync(&cl);
1453 bch_keylist_free(&keylist);
1454
1455 while ((k = bch_keylist_pop(&keylist)))
1456 if (!bkey_cmp(k, &ZERO_KEY))
1457 atomic_dec(&b->c->prio_blocked);
1458
1459 for (i = 0; i < nodes; i++)
1460 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1461 btree_node_free(new_nodes[i]);
1462 rw_unlock(true, new_nodes[i]);
1463 }
1464 return 0;
1465 }
1466
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1467 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1468 struct btree *replace)
1469 {
1470 struct keylist keys;
1471 struct btree *n;
1472
1473 if (btree_check_reserve(b, NULL))
1474 return 0;
1475
1476 n = btree_node_alloc_replacement(replace, NULL);
1477
1478 /* recheck reserve after allocating replacement node */
1479 if (btree_check_reserve(b, NULL)) {
1480 btree_node_free(n);
1481 rw_unlock(true, n);
1482 return 0;
1483 }
1484
1485 bch_btree_node_write_sync(n);
1486
1487 bch_keylist_init(&keys);
1488 bch_keylist_add(&keys, &n->key);
1489
1490 make_btree_freeing_key(replace, keys.top);
1491 bch_keylist_push(&keys);
1492
1493 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1494 BUG_ON(!bch_keylist_empty(&keys));
1495
1496 btree_node_free(replace);
1497 rw_unlock(true, n);
1498
1499 /* Invalidated our iterator */
1500 return -EINTR;
1501 }
1502
btree_gc_count_keys(struct btree * b)1503 static unsigned btree_gc_count_keys(struct btree *b)
1504 {
1505 struct bkey *k;
1506 struct btree_iter iter;
1507 unsigned ret = 0;
1508
1509 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1510 ret += bkey_u64s(k);
1511
1512 return ret;
1513 }
1514
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1515 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1516 struct closure *writes, struct gc_stat *gc)
1517 {
1518 int ret = 0;
1519 bool should_rewrite;
1520 struct bkey *k;
1521 struct btree_iter iter;
1522 struct gc_merge_info r[GC_MERGE_NODES];
1523 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1524
1525 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1526
1527 for (i = r; i < r + ARRAY_SIZE(r); i++)
1528 i->b = ERR_PTR(-EINTR);
1529
1530 while (1) {
1531 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1532 if (k) {
1533 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1534 true, b);
1535 if (IS_ERR(r->b)) {
1536 ret = PTR_ERR(r->b);
1537 break;
1538 }
1539
1540 r->keys = btree_gc_count_keys(r->b);
1541
1542 ret = btree_gc_coalesce(b, op, gc, r);
1543 if (ret)
1544 break;
1545 }
1546
1547 if (!last->b)
1548 break;
1549
1550 if (!IS_ERR(last->b)) {
1551 should_rewrite = btree_gc_mark_node(last->b, gc);
1552 if (should_rewrite) {
1553 ret = btree_gc_rewrite_node(b, op, last->b);
1554 if (ret)
1555 break;
1556 }
1557
1558 if (last->b->level) {
1559 ret = btree_gc_recurse(last->b, op, writes, gc);
1560 if (ret)
1561 break;
1562 }
1563
1564 bkey_copy_key(&b->c->gc_done, &last->b->key);
1565
1566 /*
1567 * Must flush leaf nodes before gc ends, since replace
1568 * operations aren't journalled
1569 */
1570 mutex_lock(&last->b->write_lock);
1571 if (btree_node_dirty(last->b))
1572 bch_btree_node_write(last->b, writes);
1573 mutex_unlock(&last->b->write_lock);
1574 rw_unlock(true, last->b);
1575 }
1576
1577 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1578 r->b = NULL;
1579
1580 if (need_resched()) {
1581 ret = -EAGAIN;
1582 break;
1583 }
1584 }
1585
1586 for (i = r; i < r + ARRAY_SIZE(r); i++)
1587 if (!IS_ERR_OR_NULL(i->b)) {
1588 mutex_lock(&i->b->write_lock);
1589 if (btree_node_dirty(i->b))
1590 bch_btree_node_write(i->b, writes);
1591 mutex_unlock(&i->b->write_lock);
1592 rw_unlock(true, i->b);
1593 }
1594
1595 return ret;
1596 }
1597
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1598 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1599 struct closure *writes, struct gc_stat *gc)
1600 {
1601 struct btree *n = NULL;
1602 int ret = 0;
1603 bool should_rewrite;
1604
1605 should_rewrite = btree_gc_mark_node(b, gc);
1606 if (should_rewrite) {
1607 n = btree_node_alloc_replacement(b, NULL);
1608
1609 if (!IS_ERR_OR_NULL(n)) {
1610 bch_btree_node_write_sync(n);
1611
1612 bch_btree_set_root(n);
1613 btree_node_free(b);
1614 rw_unlock(true, n);
1615
1616 return -EINTR;
1617 }
1618 }
1619
1620 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1621
1622 if (b->level) {
1623 ret = btree_gc_recurse(b, op, writes, gc);
1624 if (ret)
1625 return ret;
1626 }
1627
1628 bkey_copy_key(&b->c->gc_done, &b->key);
1629
1630 return ret;
1631 }
1632
btree_gc_start(struct cache_set * c)1633 static void btree_gc_start(struct cache_set *c)
1634 {
1635 struct cache *ca;
1636 struct bucket *b;
1637 unsigned i;
1638
1639 if (!c->gc_mark_valid)
1640 return;
1641
1642 mutex_lock(&c->bucket_lock);
1643
1644 c->gc_mark_valid = 0;
1645 c->gc_done = ZERO_KEY;
1646
1647 for_each_cache(ca, c, i)
1648 for_each_bucket(b, ca) {
1649 b->last_gc = b->gen;
1650 if (!atomic_read(&b->pin)) {
1651 SET_GC_MARK(b, 0);
1652 SET_GC_SECTORS_USED(b, 0);
1653 }
1654 }
1655
1656 mutex_unlock(&c->bucket_lock);
1657 }
1658
bch_btree_gc_finish(struct cache_set * c)1659 static size_t bch_btree_gc_finish(struct cache_set *c)
1660 {
1661 size_t available = 0;
1662 struct bucket *b;
1663 struct cache *ca;
1664 unsigned i;
1665
1666 mutex_lock(&c->bucket_lock);
1667
1668 set_gc_sectors(c);
1669 c->gc_mark_valid = 1;
1670 c->need_gc = 0;
1671
1672 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1673 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1674 GC_MARK_METADATA);
1675
1676 /* don't reclaim buckets to which writeback keys point */
1677 rcu_read_lock();
1678 for (i = 0; i < c->nr_uuids; i++) {
1679 struct bcache_device *d = c->devices[i];
1680 struct cached_dev *dc;
1681 struct keybuf_key *w, *n;
1682 unsigned j;
1683
1684 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1685 continue;
1686 dc = container_of(d, struct cached_dev, disk);
1687
1688 spin_lock(&dc->writeback_keys.lock);
1689 rbtree_postorder_for_each_entry_safe(w, n,
1690 &dc->writeback_keys.keys, node)
1691 for (j = 0; j < KEY_PTRS(&w->key); j++)
1692 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1693 GC_MARK_DIRTY);
1694 spin_unlock(&dc->writeback_keys.lock);
1695 }
1696 rcu_read_unlock();
1697
1698 for_each_cache(ca, c, i) {
1699 uint64_t *i;
1700
1701 ca->invalidate_needs_gc = 0;
1702
1703 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1704 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1705
1706 for (i = ca->prio_buckets;
1707 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1708 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1709
1710 for_each_bucket(b, ca) {
1711 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1712
1713 if (atomic_read(&b->pin))
1714 continue;
1715
1716 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1717
1718 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1719 available++;
1720 }
1721 }
1722
1723 mutex_unlock(&c->bucket_lock);
1724 return available;
1725 }
1726
bch_btree_gc(struct cache_set * c)1727 static void bch_btree_gc(struct cache_set *c)
1728 {
1729 int ret;
1730 unsigned long available;
1731 struct gc_stat stats;
1732 struct closure writes;
1733 struct btree_op op;
1734 uint64_t start_time = local_clock();
1735
1736 trace_bcache_gc_start(c);
1737
1738 memset(&stats, 0, sizeof(struct gc_stat));
1739 closure_init_stack(&writes);
1740 bch_btree_op_init(&op, SHRT_MAX);
1741
1742 btree_gc_start(c);
1743
1744 do {
1745 ret = btree_root(gc_root, c, &op, &writes, &stats);
1746 closure_sync(&writes);
1747 cond_resched();
1748
1749 if (ret && ret != -EAGAIN)
1750 pr_warn("gc failed!");
1751 } while (ret);
1752
1753 available = bch_btree_gc_finish(c);
1754 wake_up_allocators(c);
1755
1756 bch_time_stats_update(&c->btree_gc_time, start_time);
1757
1758 stats.key_bytes *= sizeof(uint64_t);
1759 stats.data <<= 9;
1760 stats.in_use = (c->nbuckets - available) * 100 / c->nbuckets;
1761 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1762
1763 trace_bcache_gc_end(c);
1764
1765 bch_moving_gc(c);
1766 }
1767
bch_gc_thread(void * arg)1768 static int bch_gc_thread(void *arg)
1769 {
1770 struct cache_set *c = arg;
1771 struct cache *ca;
1772 unsigned i;
1773
1774 while (1) {
1775 again:
1776 bch_btree_gc(c);
1777
1778 set_current_state(TASK_INTERRUPTIBLE);
1779 if (kthread_should_stop())
1780 break;
1781
1782 mutex_lock(&c->bucket_lock);
1783
1784 for_each_cache(ca, c, i)
1785 if (ca->invalidate_needs_gc) {
1786 mutex_unlock(&c->bucket_lock);
1787 set_current_state(TASK_RUNNING);
1788 goto again;
1789 }
1790
1791 mutex_unlock(&c->bucket_lock);
1792
1793 try_to_freeze();
1794 schedule();
1795 }
1796
1797 return 0;
1798 }
1799
bch_gc_thread_start(struct cache_set * c)1800 int bch_gc_thread_start(struct cache_set *c)
1801 {
1802 c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
1803 if (IS_ERR(c->gc_thread))
1804 return PTR_ERR(c->gc_thread);
1805
1806 set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
1807 return 0;
1808 }
1809
1810 /* Initial partial gc */
1811
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1812 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1813 {
1814 int ret = 0;
1815 struct bkey *k, *p = NULL;
1816 struct btree_iter iter;
1817
1818 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1819 bch_initial_mark_key(b->c, b->level, k);
1820
1821 bch_initial_mark_key(b->c, b->level + 1, &b->key);
1822
1823 if (b->level) {
1824 bch_btree_iter_init(&b->keys, &iter, NULL);
1825
1826 do {
1827 k = bch_btree_iter_next_filter(&iter, &b->keys,
1828 bch_ptr_bad);
1829 if (k)
1830 btree_node_prefetch(b, k);
1831
1832 if (p)
1833 ret = btree(check_recurse, p, b, op);
1834
1835 p = k;
1836 } while (p && !ret);
1837 }
1838
1839 return ret;
1840 }
1841
bch_btree_check(struct cache_set * c)1842 int bch_btree_check(struct cache_set *c)
1843 {
1844 struct btree_op op;
1845
1846 bch_btree_op_init(&op, SHRT_MAX);
1847
1848 return btree_root(check_recurse, c, &op);
1849 }
1850
bch_initial_gc_finish(struct cache_set * c)1851 void bch_initial_gc_finish(struct cache_set *c)
1852 {
1853 struct cache *ca;
1854 struct bucket *b;
1855 unsigned i;
1856
1857 bch_btree_gc_finish(c);
1858
1859 mutex_lock(&c->bucket_lock);
1860
1861 /*
1862 * We need to put some unused buckets directly on the prio freelist in
1863 * order to get the allocator thread started - it needs freed buckets in
1864 * order to rewrite the prios and gens, and it needs to rewrite prios
1865 * and gens in order to free buckets.
1866 *
1867 * This is only safe for buckets that have no live data in them, which
1868 * there should always be some of.
1869 */
1870 for_each_cache(ca, c, i) {
1871 for_each_bucket(b, ca) {
1872 if (fifo_full(&ca->free[RESERVE_PRIO]))
1873 break;
1874
1875 if (bch_can_invalidate_bucket(ca, b) &&
1876 !GC_MARK(b)) {
1877 __bch_invalidate_one_bucket(ca, b);
1878 fifo_push(&ca->free[RESERVE_PRIO],
1879 b - ca->buckets);
1880 }
1881 }
1882 }
1883
1884 mutex_unlock(&c->bucket_lock);
1885 }
1886
1887 /* Btree insertion */
1888
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)1889 static bool btree_insert_key(struct btree *b, struct bkey *k,
1890 struct bkey *replace_key)
1891 {
1892 unsigned status;
1893
1894 BUG_ON(bkey_cmp(k, &b->key) > 0);
1895
1896 status = bch_btree_insert_key(&b->keys, k, replace_key);
1897 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1898 bch_check_keys(&b->keys, "%u for %s", status,
1899 replace_key ? "replace" : "insert");
1900
1901 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1902 status);
1903 return true;
1904 } else
1905 return false;
1906 }
1907
insert_u64s_remaining(struct btree * b)1908 static size_t insert_u64s_remaining(struct btree *b)
1909 {
1910 long ret = bch_btree_keys_u64s_remaining(&b->keys);
1911
1912 /*
1913 * Might land in the middle of an existing extent and have to split it
1914 */
1915 if (b->keys.ops->is_extents)
1916 ret -= KEY_MAX_U64S;
1917
1918 return max(ret, 0L);
1919 }
1920
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)1921 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1922 struct keylist *insert_keys,
1923 struct bkey *replace_key)
1924 {
1925 bool ret = false;
1926 int oldsize = bch_count_data(&b->keys);
1927
1928 while (!bch_keylist_empty(insert_keys)) {
1929 struct bkey *k = insert_keys->keys;
1930
1931 if (bkey_u64s(k) > insert_u64s_remaining(b))
1932 break;
1933
1934 if (bkey_cmp(k, &b->key) <= 0) {
1935 if (!b->level)
1936 bkey_put(b->c, k);
1937
1938 ret |= btree_insert_key(b, k, replace_key);
1939 bch_keylist_pop_front(insert_keys);
1940 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1941 BKEY_PADDED(key) temp;
1942 bkey_copy(&temp.key, insert_keys->keys);
1943
1944 bch_cut_back(&b->key, &temp.key);
1945 bch_cut_front(&b->key, insert_keys->keys);
1946
1947 ret |= btree_insert_key(b, &temp.key, replace_key);
1948 break;
1949 } else {
1950 break;
1951 }
1952 }
1953
1954 if (!ret)
1955 op->insert_collision = true;
1956
1957 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1958
1959 BUG_ON(bch_count_data(&b->keys) < oldsize);
1960 return ret;
1961 }
1962
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)1963 static int btree_split(struct btree *b, struct btree_op *op,
1964 struct keylist *insert_keys,
1965 struct bkey *replace_key)
1966 {
1967 bool split;
1968 struct btree *n1, *n2 = NULL, *n3 = NULL;
1969 uint64_t start_time = local_clock();
1970 struct closure cl;
1971 struct keylist parent_keys;
1972
1973 closure_init_stack(&cl);
1974 bch_keylist_init(&parent_keys);
1975
1976 if (btree_check_reserve(b, op)) {
1977 if (!b->level)
1978 return -EINTR;
1979 else
1980 WARN(1, "insufficient reserve for split\n");
1981 }
1982
1983 n1 = btree_node_alloc_replacement(b, op);
1984 if (IS_ERR(n1))
1985 goto err;
1986
1987 split = set_blocks(btree_bset_first(n1),
1988 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1989
1990 if (split) {
1991 unsigned keys = 0;
1992
1993 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1994
1995 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1996 if (IS_ERR(n2))
1997 goto err_free1;
1998
1999 if (!b->parent) {
2000 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2001 if (IS_ERR(n3))
2002 goto err_free2;
2003 }
2004
2005 mutex_lock(&n1->write_lock);
2006 mutex_lock(&n2->write_lock);
2007
2008 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2009
2010 /*
2011 * Has to be a linear search because we don't have an auxiliary
2012 * search tree yet
2013 */
2014
2015 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2016 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2017 keys));
2018
2019 bkey_copy_key(&n1->key,
2020 bset_bkey_idx(btree_bset_first(n1), keys));
2021 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2022
2023 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2024 btree_bset_first(n1)->keys = keys;
2025
2026 memcpy(btree_bset_first(n2)->start,
2027 bset_bkey_last(btree_bset_first(n1)),
2028 btree_bset_first(n2)->keys * sizeof(uint64_t));
2029
2030 bkey_copy_key(&n2->key, &b->key);
2031
2032 bch_keylist_add(&parent_keys, &n2->key);
2033 bch_btree_node_write(n2, &cl);
2034 mutex_unlock(&n2->write_lock);
2035 rw_unlock(true, n2);
2036 } else {
2037 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2038
2039 mutex_lock(&n1->write_lock);
2040 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2041 }
2042
2043 bch_keylist_add(&parent_keys, &n1->key);
2044 bch_btree_node_write(n1, &cl);
2045 mutex_unlock(&n1->write_lock);
2046
2047 if (n3) {
2048 /* Depth increases, make a new root */
2049 mutex_lock(&n3->write_lock);
2050 bkey_copy_key(&n3->key, &MAX_KEY);
2051 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2052 bch_btree_node_write(n3, &cl);
2053 mutex_unlock(&n3->write_lock);
2054
2055 closure_sync(&cl);
2056 bch_btree_set_root(n3);
2057 rw_unlock(true, n3);
2058 } else if (!b->parent) {
2059 /* Root filled up but didn't need to be split */
2060 closure_sync(&cl);
2061 bch_btree_set_root(n1);
2062 } else {
2063 /* Split a non root node */
2064 closure_sync(&cl);
2065 make_btree_freeing_key(b, parent_keys.top);
2066 bch_keylist_push(&parent_keys);
2067
2068 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2069 BUG_ON(!bch_keylist_empty(&parent_keys));
2070 }
2071
2072 btree_node_free(b);
2073 rw_unlock(true, n1);
2074
2075 bch_time_stats_update(&b->c->btree_split_time, start_time);
2076
2077 return 0;
2078 err_free2:
2079 bkey_put(b->c, &n2->key);
2080 btree_node_free(n2);
2081 rw_unlock(true, n2);
2082 err_free1:
2083 bkey_put(b->c, &n1->key);
2084 btree_node_free(n1);
2085 rw_unlock(true, n1);
2086 err:
2087 WARN(1, "bcache: btree split failed (level %u)", b->level);
2088
2089 if (n3 == ERR_PTR(-EAGAIN) ||
2090 n2 == ERR_PTR(-EAGAIN) ||
2091 n1 == ERR_PTR(-EAGAIN))
2092 return -EAGAIN;
2093
2094 return -ENOMEM;
2095 }
2096
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2097 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2098 struct keylist *insert_keys,
2099 atomic_t *journal_ref,
2100 struct bkey *replace_key)
2101 {
2102 struct closure cl;
2103
2104 BUG_ON(b->level && replace_key);
2105
2106 closure_init_stack(&cl);
2107
2108 mutex_lock(&b->write_lock);
2109
2110 if (write_block(b) != btree_bset_last(b) &&
2111 b->keys.last_set_unwritten)
2112 bch_btree_init_next(b); /* just wrote a set */
2113
2114 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2115 mutex_unlock(&b->write_lock);
2116 goto split;
2117 }
2118
2119 BUG_ON(write_block(b) != btree_bset_last(b));
2120
2121 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2122 if (!b->level)
2123 bch_btree_leaf_dirty(b, journal_ref);
2124 else
2125 bch_btree_node_write(b, &cl);
2126 }
2127
2128 mutex_unlock(&b->write_lock);
2129
2130 /* wait for btree node write if necessary, after unlock */
2131 closure_sync(&cl);
2132
2133 return 0;
2134 split:
2135 if (current->bio_list) {
2136 op->lock = b->c->root->level + 1;
2137 return -EAGAIN;
2138 } else if (op->lock <= b->c->root->level) {
2139 op->lock = b->c->root->level + 1;
2140 return -EINTR;
2141 } else {
2142 /* Invalidated all iterators */
2143 int ret = btree_split(b, op, insert_keys, replace_key);
2144
2145 if (bch_keylist_empty(insert_keys))
2146 return 0;
2147 else if (!ret)
2148 return -EINTR;
2149 return ret;
2150 }
2151 }
2152
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2153 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2154 struct bkey *check_key)
2155 {
2156 int ret = -EINTR;
2157 uint64_t btree_ptr = b->key.ptr[0];
2158 unsigned long seq = b->seq;
2159 struct keylist insert;
2160 bool upgrade = op->lock == -1;
2161
2162 bch_keylist_init(&insert);
2163
2164 if (upgrade) {
2165 rw_unlock(false, b);
2166 rw_lock(true, b, b->level);
2167
2168 if (b->key.ptr[0] != btree_ptr ||
2169 b->seq != seq + 1) {
2170 op->lock = b->level;
2171 goto out;
2172 }
2173 }
2174
2175 SET_KEY_PTRS(check_key, 1);
2176 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2177
2178 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2179
2180 bch_keylist_add(&insert, check_key);
2181
2182 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2183
2184 BUG_ON(!ret && !bch_keylist_empty(&insert));
2185 out:
2186 if (upgrade)
2187 downgrade_write(&b->lock);
2188 return ret;
2189 }
2190
2191 struct btree_insert_op {
2192 struct btree_op op;
2193 struct keylist *keys;
2194 atomic_t *journal_ref;
2195 struct bkey *replace_key;
2196 };
2197
btree_insert_fn(struct btree_op * b_op,struct btree * b)2198 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2199 {
2200 struct btree_insert_op *op = container_of(b_op,
2201 struct btree_insert_op, op);
2202
2203 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2204 op->journal_ref, op->replace_key);
2205 if (ret && !bch_keylist_empty(op->keys))
2206 return ret;
2207 else
2208 return MAP_DONE;
2209 }
2210
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2211 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2212 atomic_t *journal_ref, struct bkey *replace_key)
2213 {
2214 struct btree_insert_op op;
2215 int ret = 0;
2216
2217 BUG_ON(current->bio_list);
2218 BUG_ON(bch_keylist_empty(keys));
2219
2220 bch_btree_op_init(&op.op, 0);
2221 op.keys = keys;
2222 op.journal_ref = journal_ref;
2223 op.replace_key = replace_key;
2224
2225 while (!ret && !bch_keylist_empty(keys)) {
2226 op.op.lock = 0;
2227 ret = bch_btree_map_leaf_nodes(&op.op, c,
2228 &START_KEY(keys->keys),
2229 btree_insert_fn);
2230 }
2231
2232 if (ret) {
2233 struct bkey *k;
2234
2235 pr_err("error %i", ret);
2236
2237 while ((k = bch_keylist_pop(keys)))
2238 bkey_put(c, k);
2239 } else if (op.op.insert_collision)
2240 ret = -ESRCH;
2241
2242 return ret;
2243 }
2244
bch_btree_set_root(struct btree * b)2245 void bch_btree_set_root(struct btree *b)
2246 {
2247 unsigned i;
2248 struct closure cl;
2249
2250 closure_init_stack(&cl);
2251
2252 trace_bcache_btree_set_root(b);
2253
2254 BUG_ON(!b->written);
2255
2256 for (i = 0; i < KEY_PTRS(&b->key); i++)
2257 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2258
2259 mutex_lock(&b->c->bucket_lock);
2260 list_del_init(&b->list);
2261 mutex_unlock(&b->c->bucket_lock);
2262
2263 b->c->root = b;
2264
2265 bch_journal_meta(b->c, &cl);
2266 closure_sync(&cl);
2267 }
2268
2269 /* Map across nodes or keys */
2270
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2271 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2272 struct bkey *from,
2273 btree_map_nodes_fn *fn, int flags)
2274 {
2275 int ret = MAP_CONTINUE;
2276
2277 if (b->level) {
2278 struct bkey *k;
2279 struct btree_iter iter;
2280
2281 bch_btree_iter_init(&b->keys, &iter, from);
2282
2283 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2284 bch_ptr_bad))) {
2285 ret = btree(map_nodes_recurse, k, b,
2286 op, from, fn, flags);
2287 from = NULL;
2288
2289 if (ret != MAP_CONTINUE)
2290 return ret;
2291 }
2292 }
2293
2294 if (!b->level || flags == MAP_ALL_NODES)
2295 ret = fn(op, b);
2296
2297 return ret;
2298 }
2299
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2300 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2301 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2302 {
2303 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2304 }
2305
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2306 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2307 struct bkey *from, btree_map_keys_fn *fn,
2308 int flags)
2309 {
2310 int ret = MAP_CONTINUE;
2311 struct bkey *k;
2312 struct btree_iter iter;
2313
2314 bch_btree_iter_init(&b->keys, &iter, from);
2315
2316 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2317 ret = !b->level
2318 ? fn(op, b, k)
2319 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2320 from = NULL;
2321
2322 if (ret != MAP_CONTINUE)
2323 return ret;
2324 }
2325
2326 if (!b->level && (flags & MAP_END_KEY))
2327 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2328 KEY_OFFSET(&b->key), 0));
2329
2330 return ret;
2331 }
2332
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2333 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2334 struct bkey *from, btree_map_keys_fn *fn, int flags)
2335 {
2336 return btree_root(map_keys_recurse, c, op, from, fn, flags);
2337 }
2338
2339 /* Keybuf code */
2340
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2341 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2342 {
2343 /* Overlapping keys compare equal */
2344 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2345 return -1;
2346 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2347 return 1;
2348 return 0;
2349 }
2350
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2351 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2352 struct keybuf_key *r)
2353 {
2354 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2355 }
2356
2357 struct refill {
2358 struct btree_op op;
2359 unsigned nr_found;
2360 struct keybuf *buf;
2361 struct bkey *end;
2362 keybuf_pred_fn *pred;
2363 };
2364
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2365 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2366 struct bkey *k)
2367 {
2368 struct refill *refill = container_of(op, struct refill, op);
2369 struct keybuf *buf = refill->buf;
2370 int ret = MAP_CONTINUE;
2371
2372 if (bkey_cmp(k, refill->end) >= 0) {
2373 ret = MAP_DONE;
2374 goto out;
2375 }
2376
2377 if (!KEY_SIZE(k)) /* end key */
2378 goto out;
2379
2380 if (refill->pred(buf, k)) {
2381 struct keybuf_key *w;
2382
2383 spin_lock(&buf->lock);
2384
2385 w = array_alloc(&buf->freelist);
2386 if (!w) {
2387 spin_unlock(&buf->lock);
2388 return MAP_DONE;
2389 }
2390
2391 w->private = NULL;
2392 bkey_copy(&w->key, k);
2393
2394 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2395 array_free(&buf->freelist, w);
2396 else
2397 refill->nr_found++;
2398
2399 if (array_freelist_empty(&buf->freelist))
2400 ret = MAP_DONE;
2401
2402 spin_unlock(&buf->lock);
2403 }
2404 out:
2405 buf->last_scanned = *k;
2406 return ret;
2407 }
2408
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2409 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2410 struct bkey *end, keybuf_pred_fn *pred)
2411 {
2412 struct bkey start = buf->last_scanned;
2413 struct refill refill;
2414
2415 cond_resched();
2416
2417 bch_btree_op_init(&refill.op, -1);
2418 refill.nr_found = 0;
2419 refill.buf = buf;
2420 refill.end = end;
2421 refill.pred = pred;
2422
2423 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2424 refill_keybuf_fn, MAP_END_KEY);
2425
2426 trace_bcache_keyscan(refill.nr_found,
2427 KEY_INODE(&start), KEY_OFFSET(&start),
2428 KEY_INODE(&buf->last_scanned),
2429 KEY_OFFSET(&buf->last_scanned));
2430
2431 spin_lock(&buf->lock);
2432
2433 if (!RB_EMPTY_ROOT(&buf->keys)) {
2434 struct keybuf_key *w;
2435 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2436 buf->start = START_KEY(&w->key);
2437
2438 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2439 buf->end = w->key;
2440 } else {
2441 buf->start = MAX_KEY;
2442 buf->end = MAX_KEY;
2443 }
2444
2445 spin_unlock(&buf->lock);
2446 }
2447
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2448 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2449 {
2450 rb_erase(&w->node, &buf->keys);
2451 array_free(&buf->freelist, w);
2452 }
2453
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2454 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2455 {
2456 spin_lock(&buf->lock);
2457 __bch_keybuf_del(buf, w);
2458 spin_unlock(&buf->lock);
2459 }
2460
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2461 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2462 struct bkey *end)
2463 {
2464 bool ret = false;
2465 struct keybuf_key *p, *w, s;
2466 s.key = *start;
2467
2468 if (bkey_cmp(end, &buf->start) <= 0 ||
2469 bkey_cmp(start, &buf->end) >= 0)
2470 return false;
2471
2472 spin_lock(&buf->lock);
2473 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2474
2475 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2476 p = w;
2477 w = RB_NEXT(w, node);
2478
2479 if (p->private)
2480 ret = true;
2481 else
2482 __bch_keybuf_del(buf, p);
2483 }
2484
2485 spin_unlock(&buf->lock);
2486 return ret;
2487 }
2488
bch_keybuf_next(struct keybuf * buf)2489 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2490 {
2491 struct keybuf_key *w;
2492 spin_lock(&buf->lock);
2493
2494 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2495
2496 while (w && w->private)
2497 w = RB_NEXT(w, node);
2498
2499 if (w)
2500 w->private = ERR_PTR(-EINTR);
2501
2502 spin_unlock(&buf->lock);
2503 return w;
2504 }
2505
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2506 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2507 struct keybuf *buf,
2508 struct bkey *end,
2509 keybuf_pred_fn *pred)
2510 {
2511 struct keybuf_key *ret;
2512
2513 while (1) {
2514 ret = bch_keybuf_next(buf);
2515 if (ret)
2516 break;
2517
2518 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2519 pr_debug("scan finished");
2520 break;
2521 }
2522
2523 bch_refill_keybuf(c, buf, end, pred);
2524 }
2525
2526 return ret;
2527 }
2528
bch_keybuf_init(struct keybuf * buf)2529 void bch_keybuf_init(struct keybuf *buf)
2530 {
2531 buf->last_scanned = MAX_KEY;
2532 buf->keys = RB_ROOT;
2533
2534 spin_lock_init(&buf->lock);
2535 array_allocator_init(&buf->freelist);
2536 }
2537