• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 #define MAX_GC_TIMES		100
94 #define MIN_GC_NODES		100
95 #define GC_SLEEP_MS		100
96 
97 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98 
99 #define PTR_HASH(c, k)							\
100 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101 
102 static struct workqueue_struct *btree_io_wq;
103 
104 #define insert_lock(s, b)	((b)->level <= (s)->lock)
105 
106 
write_block(struct btree * b)107 static inline struct bset *write_block(struct btree *b)
108 {
109 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache);
110 }
111 
bch_btree_init_next(struct btree * b)112 static void bch_btree_init_next(struct btree *b)
113 {
114 	/* If not a leaf node, always sort */
115 	if (b->level && b->keys.nsets)
116 		bch_btree_sort(&b->keys, &b->c->sort);
117 	else
118 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
119 
120 	if (b->written < btree_blocks(b))
121 		bch_bset_init_next(&b->keys, write_block(b),
122 				   bset_magic(&b->c->cache->sb));
123 
124 }
125 
126 /* Btree key manipulation */
127 
bkey_put(struct cache_set * c,struct bkey * k)128 void bkey_put(struct cache_set *c, struct bkey *k)
129 {
130 	unsigned int i;
131 
132 	for (i = 0; i < KEY_PTRS(k); i++)
133 		if (ptr_available(c, k, i))
134 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
135 }
136 
137 /* Btree IO */
138 
btree_csum_set(struct btree * b,struct bset * i)139 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
140 {
141 	uint64_t crc = b->key.ptr[0];
142 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
143 
144 	crc = bch_crc64_update(crc, data, end - data);
145 	return crc ^ 0xffffffffffffffffULL;
146 }
147 
bch_btree_node_read_done(struct btree * b)148 void bch_btree_node_read_done(struct btree *b)
149 {
150 	const char *err = "bad btree header";
151 	struct bset *i = btree_bset_first(b);
152 	struct btree_iter *iter;
153 
154 	/*
155 	 * c->fill_iter can allocate an iterator with more memory space
156 	 * than static MAX_BSETS.
157 	 * See the comment arount cache_set->fill_iter.
158 	 */
159 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
160 	iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
161 	iter->used = 0;
162 
163 #ifdef CONFIG_BCACHE_DEBUG
164 	iter->b = &b->keys;
165 #endif
166 
167 	if (!i->seq)
168 		goto err;
169 
170 	for (;
171 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
172 	     i = write_block(b)) {
173 		err = "unsupported bset version";
174 		if (i->version > BCACHE_BSET_VERSION)
175 			goto err;
176 
177 		err = "bad btree header";
178 		if (b->written + set_blocks(i, block_bytes(b->c->cache)) >
179 		    btree_blocks(b))
180 			goto err;
181 
182 		err = "bad magic";
183 		if (i->magic != bset_magic(&b->c->cache->sb))
184 			goto err;
185 
186 		err = "bad checksum";
187 		switch (i->version) {
188 		case 0:
189 			if (i->csum != csum_set(i))
190 				goto err;
191 			break;
192 		case BCACHE_BSET_VERSION:
193 			if (i->csum != btree_csum_set(b, i))
194 				goto err;
195 			break;
196 		}
197 
198 		err = "empty set";
199 		if (i != b->keys.set[0].data && !i->keys)
200 			goto err;
201 
202 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
203 
204 		b->written += set_blocks(i, block_bytes(b->c->cache));
205 	}
206 
207 	err = "corrupted btree";
208 	for (i = write_block(b);
209 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
210 	     i = ((void *) i) + block_bytes(b->c->cache))
211 		if (i->seq == b->keys.set[0].data->seq)
212 			goto err;
213 
214 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
215 
216 	i = b->keys.set[0].data;
217 	err = "short btree key";
218 	if (b->keys.set[0].size &&
219 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
220 		goto err;
221 
222 	if (b->written < btree_blocks(b))
223 		bch_bset_init_next(&b->keys, write_block(b),
224 				   bset_magic(&b->c->cache->sb));
225 out:
226 	mempool_free(iter, &b->c->fill_iter);
227 	return;
228 err:
229 	set_btree_node_io_error(b);
230 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
231 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
232 			    bset_block_offset(b, i), i->keys);
233 	goto out;
234 }
235 
btree_node_read_endio(struct bio * bio)236 static void btree_node_read_endio(struct bio *bio)
237 {
238 	struct closure *cl = bio->bi_private;
239 
240 	closure_put(cl);
241 }
242 
bch_btree_node_read(struct btree * b)243 static void bch_btree_node_read(struct btree *b)
244 {
245 	uint64_t start_time = local_clock();
246 	struct closure cl;
247 	struct bio *bio;
248 
249 	trace_bcache_btree_read(b);
250 
251 	closure_init_stack(&cl);
252 
253 	bio = bch_bbio_alloc(b->c);
254 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
255 	bio->bi_end_io	= btree_node_read_endio;
256 	bio->bi_private	= &cl;
257 	bio->bi_opf = REQ_OP_READ | REQ_META;
258 
259 	bch_bio_map(bio, b->keys.set[0].data);
260 
261 	bch_submit_bbio(bio, b->c, &b->key, 0);
262 	closure_sync(&cl);
263 
264 	if (bio->bi_status)
265 		set_btree_node_io_error(b);
266 
267 	bch_bbio_free(bio, b->c);
268 
269 	if (btree_node_io_error(b))
270 		goto err;
271 
272 	bch_btree_node_read_done(b);
273 	bch_time_stats_update(&b->c->btree_read_time, start_time);
274 
275 	return;
276 err:
277 	bch_cache_set_error(b->c, "io error reading bucket %zu",
278 			    PTR_BUCKET_NR(b->c, &b->key, 0));
279 }
280 
btree_complete_write(struct btree * b,struct btree_write * w)281 static void btree_complete_write(struct btree *b, struct btree_write *w)
282 {
283 	if (w->prio_blocked &&
284 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
285 		wake_up_allocators(b->c);
286 
287 	if (w->journal) {
288 		atomic_dec_bug(w->journal);
289 		__closure_wake_up(&b->c->journal.wait);
290 	}
291 
292 	w->prio_blocked	= 0;
293 	w->journal	= NULL;
294 }
295 
btree_node_write_unlock(struct closure * cl)296 static void btree_node_write_unlock(struct closure *cl)
297 {
298 	struct btree *b = container_of(cl, struct btree, io);
299 
300 	up(&b->io_mutex);
301 }
302 
__btree_node_write_done(struct closure * cl)303 static void __btree_node_write_done(struct closure *cl)
304 {
305 	struct btree *b = container_of(cl, struct btree, io);
306 	struct btree_write *w = btree_prev_write(b);
307 
308 	bch_bbio_free(b->bio, b->c);
309 	b->bio = NULL;
310 	btree_complete_write(b, w);
311 
312 	if (btree_node_dirty(b))
313 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
314 
315 	closure_return_with_destructor(cl, btree_node_write_unlock);
316 }
317 
btree_node_write_done(struct closure * cl)318 static void btree_node_write_done(struct closure *cl)
319 {
320 	struct btree *b = container_of(cl, struct btree, io);
321 
322 	bio_free_pages(b->bio);
323 	__btree_node_write_done(cl);
324 }
325 
btree_node_write_endio(struct bio * bio)326 static void btree_node_write_endio(struct bio *bio)
327 {
328 	struct closure *cl = bio->bi_private;
329 	struct btree *b = container_of(cl, struct btree, io);
330 
331 	if (bio->bi_status)
332 		set_btree_node_io_error(b);
333 
334 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
335 	closure_put(cl);
336 }
337 
do_btree_node_write(struct btree * b)338 static void do_btree_node_write(struct btree *b)
339 {
340 	struct closure *cl = &b->io;
341 	struct bset *i = btree_bset_last(b);
342 	BKEY_PADDED(key) k;
343 
344 	i->version	= BCACHE_BSET_VERSION;
345 	i->csum		= btree_csum_set(b, i);
346 
347 	BUG_ON(b->bio);
348 	b->bio = bch_bbio_alloc(b->c);
349 
350 	b->bio->bi_end_io	= btree_node_write_endio;
351 	b->bio->bi_private	= cl;
352 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c->cache));
353 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
354 	bch_bio_map(b->bio, i);
355 
356 	/*
357 	 * If we're appending to a leaf node, we don't technically need FUA -
358 	 * this write just needs to be persisted before the next journal write,
359 	 * which will be marked FLUSH|FUA.
360 	 *
361 	 * Similarly if we're writing a new btree root - the pointer is going to
362 	 * be in the next journal entry.
363 	 *
364 	 * But if we're writing a new btree node (that isn't a root) or
365 	 * appending to a non leaf btree node, we need either FUA or a flush
366 	 * when we write the parent with the new pointer. FUA is cheaper than a
367 	 * flush, and writes appending to leaf nodes aren't blocking anything so
368 	 * just make all btree node writes FUA to keep things sane.
369 	 */
370 
371 	bkey_copy(&k.key, &b->key);
372 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
373 		       bset_sector_offset(&b->keys, i));
374 
375 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
376 		struct bio_vec *bv;
377 		void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
378 		struct bvec_iter_all iter_all;
379 
380 		bio_for_each_segment_all(bv, b->bio, iter_all) {
381 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
382 			addr += PAGE_SIZE;
383 		}
384 
385 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
386 
387 		continue_at(cl, btree_node_write_done, NULL);
388 	} else {
389 		/*
390 		 * No problem for multipage bvec since the bio is
391 		 * just allocated
392 		 */
393 		b->bio->bi_vcnt = 0;
394 		bch_bio_map(b->bio, i);
395 
396 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
397 
398 		closure_sync(cl);
399 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
400 	}
401 }
402 
__bch_btree_node_write(struct btree * b,struct closure * parent)403 void __bch_btree_node_write(struct btree *b, struct closure *parent)
404 {
405 	struct bset *i = btree_bset_last(b);
406 
407 	lockdep_assert_held(&b->write_lock);
408 
409 	trace_bcache_btree_write(b);
410 
411 	BUG_ON(current->bio_list);
412 	BUG_ON(b->written >= btree_blocks(b));
413 	BUG_ON(b->written && !i->keys);
414 	BUG_ON(btree_bset_first(b)->seq != i->seq);
415 	bch_check_keys(&b->keys, "writing");
416 
417 	cancel_delayed_work(&b->work);
418 
419 	/* If caller isn't waiting for write, parent refcount is cache set */
420 	down(&b->io_mutex);
421 	closure_init(&b->io, parent ?: &b->c->cl);
422 
423 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
424 	change_bit(BTREE_NODE_write_idx, &b->flags);
425 
426 	do_btree_node_write(b);
427 
428 	atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
429 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
430 
431 	b->written += set_blocks(i, block_bytes(b->c->cache));
432 }
433 
bch_btree_node_write(struct btree * b,struct closure * parent)434 void bch_btree_node_write(struct btree *b, struct closure *parent)
435 {
436 	unsigned int nsets = b->keys.nsets;
437 
438 	lockdep_assert_held(&b->lock);
439 
440 	__bch_btree_node_write(b, parent);
441 
442 	/*
443 	 * do verify if there was more than one set initially (i.e. we did a
444 	 * sort) and we sorted down to a single set:
445 	 */
446 	if (nsets && !b->keys.nsets)
447 		bch_btree_verify(b);
448 
449 	bch_btree_init_next(b);
450 }
451 
bch_btree_node_write_sync(struct btree * b)452 static void bch_btree_node_write_sync(struct btree *b)
453 {
454 	struct closure cl;
455 
456 	closure_init_stack(&cl);
457 
458 	mutex_lock(&b->write_lock);
459 	bch_btree_node_write(b, &cl);
460 	mutex_unlock(&b->write_lock);
461 
462 	closure_sync(&cl);
463 }
464 
btree_node_write_work(struct work_struct * w)465 static void btree_node_write_work(struct work_struct *w)
466 {
467 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
468 
469 	mutex_lock(&b->write_lock);
470 	if (btree_node_dirty(b))
471 		__bch_btree_node_write(b, NULL);
472 	mutex_unlock(&b->write_lock);
473 }
474 
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)475 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
476 {
477 	struct bset *i = btree_bset_last(b);
478 	struct btree_write *w = btree_current_write(b);
479 
480 	lockdep_assert_held(&b->write_lock);
481 
482 	BUG_ON(!b->written);
483 	BUG_ON(!i->keys);
484 
485 	if (!btree_node_dirty(b))
486 		queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
487 
488 	set_btree_node_dirty(b);
489 
490 	/*
491 	 * w->journal is always the oldest journal pin of all bkeys
492 	 * in the leaf node, to make sure the oldest jset seq won't
493 	 * be increased before this btree node is flushed.
494 	 */
495 	if (journal_ref) {
496 		if (w->journal &&
497 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
498 			atomic_dec_bug(w->journal);
499 			w->journal = NULL;
500 		}
501 
502 		if (!w->journal) {
503 			w->journal = journal_ref;
504 			atomic_inc(w->journal);
505 		}
506 	}
507 
508 	/* Force write if set is too big */
509 	if (set_bytes(i) > PAGE_SIZE - 48 &&
510 	    !current->bio_list)
511 		bch_btree_node_write(b, NULL);
512 }
513 
514 /*
515  * Btree in memory cache - allocation/freeing
516  * mca -> memory cache
517  */
518 
519 #define mca_reserve(c)	(((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 			  ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c)						\
522 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
523 
mca_data_free(struct btree * b)524 static void mca_data_free(struct btree *b)
525 {
526 	BUG_ON(b->io_mutex.count != 1);
527 
528 	bch_btree_keys_free(&b->keys);
529 
530 	b->c->btree_cache_used--;
531 	list_move(&b->list, &b->c->btree_cache_freed);
532 }
533 
mca_bucket_free(struct btree * b)534 static void mca_bucket_free(struct btree *b)
535 {
536 	BUG_ON(btree_node_dirty(b));
537 
538 	b->key.ptr[0] = 0;
539 	hlist_del_init_rcu(&b->hash);
540 	list_move(&b->list, &b->c->btree_cache_freeable);
541 }
542 
btree_order(struct bkey * k)543 static unsigned int btree_order(struct bkey *k)
544 {
545 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
546 }
547 
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)548 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
549 {
550 	if (!bch_btree_keys_alloc(&b->keys,
551 				  max_t(unsigned int,
552 					ilog2(b->c->btree_pages),
553 					btree_order(k)),
554 				  gfp)) {
555 		b->c->btree_cache_used++;
556 		list_move(&b->list, &b->c->btree_cache);
557 	} else {
558 		list_move(&b->list, &b->c->btree_cache_freed);
559 	}
560 }
561 
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)562 static struct btree *mca_bucket_alloc(struct cache_set *c,
563 				      struct bkey *k, gfp_t gfp)
564 {
565 	/*
566 	 * kzalloc() is necessary here for initialization,
567 	 * see code comments in bch_btree_keys_init().
568 	 */
569 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
570 
571 	if (!b)
572 		return NULL;
573 
574 	init_rwsem(&b->lock);
575 	lockdep_set_novalidate_class(&b->lock);
576 	mutex_init(&b->write_lock);
577 	lockdep_set_novalidate_class(&b->write_lock);
578 	INIT_LIST_HEAD(&b->list);
579 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
580 	b->c = c;
581 	sema_init(&b->io_mutex, 1);
582 
583 	mca_data_alloc(b, k, gfp);
584 	return b;
585 }
586 
mca_reap(struct btree * b,unsigned int min_order,bool flush)587 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
588 {
589 	struct closure cl;
590 
591 	closure_init_stack(&cl);
592 	lockdep_assert_held(&b->c->bucket_lock);
593 
594 	if (!down_write_trylock(&b->lock))
595 		return -ENOMEM;
596 
597 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
598 
599 	if (b->keys.page_order < min_order)
600 		goto out_unlock;
601 
602 	if (!flush) {
603 		if (btree_node_dirty(b))
604 			goto out_unlock;
605 
606 		if (down_trylock(&b->io_mutex))
607 			goto out_unlock;
608 		up(&b->io_mutex);
609 	}
610 
611 retry:
612 	/*
613 	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
614 	 * __bch_btree_node_write(). To avoid an extra flush, acquire
615 	 * b->write_lock before checking BTREE_NODE_dirty bit.
616 	 */
617 	mutex_lock(&b->write_lock);
618 	/*
619 	 * If this btree node is selected in btree_flush_write() by journal
620 	 * code, delay and retry until the node is flushed by journal code
621 	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
622 	 */
623 	if (btree_node_journal_flush(b)) {
624 		pr_debug("bnode %p is flushing by journal, retry\n", b);
625 		mutex_unlock(&b->write_lock);
626 		udelay(1);
627 		goto retry;
628 	}
629 
630 	if (btree_node_dirty(b))
631 		__bch_btree_node_write(b, &cl);
632 	mutex_unlock(&b->write_lock);
633 
634 	closure_sync(&cl);
635 
636 	/* wait for any in flight btree write */
637 	down(&b->io_mutex);
638 	up(&b->io_mutex);
639 
640 	return 0;
641 out_unlock:
642 	rw_unlock(true, b);
643 	return -ENOMEM;
644 }
645 
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)646 static unsigned long bch_mca_scan(struct shrinker *shrink,
647 				  struct shrink_control *sc)
648 {
649 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
650 	struct btree *b, *t;
651 	unsigned long i, nr = sc->nr_to_scan;
652 	unsigned long freed = 0;
653 	unsigned int btree_cache_used;
654 
655 	if (c->shrinker_disabled)
656 		return SHRINK_STOP;
657 
658 	if (c->btree_cache_alloc_lock)
659 		return SHRINK_STOP;
660 
661 	/* Return -1 if we can't do anything right now */
662 	if (sc->gfp_mask & __GFP_IO)
663 		mutex_lock(&c->bucket_lock);
664 	else if (!mutex_trylock(&c->bucket_lock))
665 		return -1;
666 
667 	/*
668 	 * It's _really_ critical that we don't free too many btree nodes - we
669 	 * have to always leave ourselves a reserve. The reserve is how we
670 	 * guarantee that allocating memory for a new btree node can always
671 	 * succeed, so that inserting keys into the btree can always succeed and
672 	 * IO can always make forward progress:
673 	 */
674 	nr /= c->btree_pages;
675 	if (nr == 0)
676 		nr = 1;
677 	nr = min_t(unsigned long, nr, mca_can_free(c));
678 
679 	i = 0;
680 	btree_cache_used = c->btree_cache_used;
681 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
682 		if (nr <= 0)
683 			goto out;
684 
685 		if (!mca_reap(b, 0, false)) {
686 			mca_data_free(b);
687 			rw_unlock(true, b);
688 			freed++;
689 		}
690 		nr--;
691 		i++;
692 	}
693 
694 	list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
695 		if (nr <= 0 || i >= btree_cache_used)
696 			goto out;
697 
698 		if (!mca_reap(b, 0, false)) {
699 			mca_bucket_free(b);
700 			mca_data_free(b);
701 			rw_unlock(true, b);
702 			freed++;
703 		}
704 
705 		nr--;
706 		i++;
707 	}
708 out:
709 	mutex_unlock(&c->bucket_lock);
710 	return freed * c->btree_pages;
711 }
712 
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)713 static unsigned long bch_mca_count(struct shrinker *shrink,
714 				   struct shrink_control *sc)
715 {
716 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
717 
718 	if (c->shrinker_disabled)
719 		return 0;
720 
721 	if (c->btree_cache_alloc_lock)
722 		return 0;
723 
724 	return mca_can_free(c) * c->btree_pages;
725 }
726 
bch_btree_cache_free(struct cache_set * c)727 void bch_btree_cache_free(struct cache_set *c)
728 {
729 	struct btree *b;
730 	struct closure cl;
731 
732 	closure_init_stack(&cl);
733 
734 	if (c->shrink.list.next)
735 		unregister_shrinker(&c->shrink);
736 
737 	mutex_lock(&c->bucket_lock);
738 
739 #ifdef CONFIG_BCACHE_DEBUG
740 	if (c->verify_data)
741 		list_move(&c->verify_data->list, &c->btree_cache);
742 
743 	free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb)));
744 #endif
745 
746 	list_splice(&c->btree_cache_freeable,
747 		    &c->btree_cache);
748 
749 	while (!list_empty(&c->btree_cache)) {
750 		b = list_first_entry(&c->btree_cache, struct btree, list);
751 
752 		/*
753 		 * This function is called by cache_set_free(), no I/O
754 		 * request on cache now, it is unnecessary to acquire
755 		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
756 		 */
757 		if (btree_node_dirty(b)) {
758 			btree_complete_write(b, btree_current_write(b));
759 			clear_bit(BTREE_NODE_dirty, &b->flags);
760 		}
761 		mca_data_free(b);
762 	}
763 
764 	while (!list_empty(&c->btree_cache_freed)) {
765 		b = list_first_entry(&c->btree_cache_freed,
766 				     struct btree, list);
767 		list_del(&b->list);
768 		cancel_delayed_work_sync(&b->work);
769 		kfree(b);
770 	}
771 
772 	mutex_unlock(&c->bucket_lock);
773 }
774 
bch_btree_cache_alloc(struct cache_set * c)775 int bch_btree_cache_alloc(struct cache_set *c)
776 {
777 	unsigned int i;
778 
779 	for (i = 0; i < mca_reserve(c); i++)
780 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
781 			return -ENOMEM;
782 
783 	list_splice_init(&c->btree_cache,
784 			 &c->btree_cache_freeable);
785 
786 #ifdef CONFIG_BCACHE_DEBUG
787 	mutex_init(&c->verify_lock);
788 
789 	c->verify_ondisk = (void *)
790 		__get_free_pages(GFP_KERNEL|__GFP_COMP,
791 				 ilog2(meta_bucket_pages(&c->cache->sb)));
792 	if (!c->verify_ondisk) {
793 		/*
794 		 * Don't worry about the mca_rereserve buckets
795 		 * allocated in previous for-loop, they will be
796 		 * handled properly in bch_cache_set_unregister().
797 		 */
798 		return -ENOMEM;
799 	}
800 
801 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
802 
803 	if (c->verify_data &&
804 	    c->verify_data->keys.set->data)
805 		list_del_init(&c->verify_data->list);
806 	else
807 		c->verify_data = NULL;
808 #endif
809 
810 	c->shrink.count_objects = bch_mca_count;
811 	c->shrink.scan_objects = bch_mca_scan;
812 	c->shrink.seeks = 4;
813 	c->shrink.batch = c->btree_pages * 2;
814 
815 	if (register_shrinker(&c->shrink))
816 		pr_warn("bcache: %s: could not register shrinker\n",
817 				__func__);
818 
819 	return 0;
820 }
821 
822 /* Btree in memory cache - hash table */
823 
mca_hash(struct cache_set * c,struct bkey * k)824 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
825 {
826 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
827 }
828 
mca_find(struct cache_set * c,struct bkey * k)829 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
830 {
831 	struct btree *b;
832 
833 	rcu_read_lock();
834 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
835 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
836 			goto out;
837 	b = NULL;
838 out:
839 	rcu_read_unlock();
840 	return b;
841 }
842 
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)843 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
844 {
845 	spin_lock(&c->btree_cannibalize_lock);
846 	if (likely(c->btree_cache_alloc_lock == NULL)) {
847 		c->btree_cache_alloc_lock = current;
848 	} else if (c->btree_cache_alloc_lock != current) {
849 		if (op)
850 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
851 					TASK_UNINTERRUPTIBLE);
852 		spin_unlock(&c->btree_cannibalize_lock);
853 		return -EINTR;
854 	}
855 	spin_unlock(&c->btree_cannibalize_lock);
856 
857 	return 0;
858 }
859 
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)860 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
861 				     struct bkey *k)
862 {
863 	struct btree *b;
864 
865 	trace_bcache_btree_cache_cannibalize(c);
866 
867 	if (mca_cannibalize_lock(c, op))
868 		return ERR_PTR(-EINTR);
869 
870 	list_for_each_entry_reverse(b, &c->btree_cache, list)
871 		if (!mca_reap(b, btree_order(k), false))
872 			return b;
873 
874 	list_for_each_entry_reverse(b, &c->btree_cache, list)
875 		if (!mca_reap(b, btree_order(k), true))
876 			return b;
877 
878 	WARN(1, "btree cache cannibalize failed\n");
879 	return ERR_PTR(-ENOMEM);
880 }
881 
882 /*
883  * We can only have one thread cannibalizing other cached btree nodes at a time,
884  * or we'll deadlock. We use an open coded mutex to ensure that, which a
885  * cannibalize_bucket() will take. This means every time we unlock the root of
886  * the btree, we need to release this lock if we have it held.
887  */
bch_cannibalize_unlock(struct cache_set * c)888 void bch_cannibalize_unlock(struct cache_set *c)
889 {
890 	spin_lock(&c->btree_cannibalize_lock);
891 	if (c->btree_cache_alloc_lock == current) {
892 		c->btree_cache_alloc_lock = NULL;
893 		wake_up(&c->btree_cache_wait);
894 	}
895 	spin_unlock(&c->btree_cannibalize_lock);
896 }
897 
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)898 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
899 			       struct bkey *k, int level)
900 {
901 	struct btree *b;
902 
903 	BUG_ON(current->bio_list);
904 
905 	lockdep_assert_held(&c->bucket_lock);
906 
907 	if (mca_find(c, k))
908 		return NULL;
909 
910 	/* btree_free() doesn't free memory; it sticks the node on the end of
911 	 * the list. Check if there's any freed nodes there:
912 	 */
913 	list_for_each_entry(b, &c->btree_cache_freeable, list)
914 		if (!mca_reap(b, btree_order(k), false))
915 			goto out;
916 
917 	/* We never free struct btree itself, just the memory that holds the on
918 	 * disk node. Check the freed list before allocating a new one:
919 	 */
920 	list_for_each_entry(b, &c->btree_cache_freed, list)
921 		if (!mca_reap(b, 0, false)) {
922 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
923 			if (!b->keys.set[0].data)
924 				goto err;
925 			else
926 				goto out;
927 		}
928 
929 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
930 	if (!b)
931 		goto err;
932 
933 	BUG_ON(!down_write_trylock(&b->lock));
934 	if (!b->keys.set->data)
935 		goto err;
936 out:
937 	BUG_ON(b->io_mutex.count != 1);
938 
939 	bkey_copy(&b->key, k);
940 	list_move(&b->list, &c->btree_cache);
941 	hlist_del_init_rcu(&b->hash);
942 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
943 
944 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
945 	b->parent	= (void *) ~0UL;
946 	b->flags	= 0;
947 	b->written	= 0;
948 	b->level	= level;
949 
950 	if (!b->level)
951 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
952 				    &b->c->expensive_debug_checks);
953 	else
954 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
955 				    &b->c->expensive_debug_checks);
956 
957 	return b;
958 err:
959 	if (b)
960 		rw_unlock(true, b);
961 
962 	b = mca_cannibalize(c, op, k);
963 	if (!IS_ERR(b))
964 		goto out;
965 
966 	return b;
967 }
968 
969 /*
970  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
971  * in from disk if necessary.
972  *
973  * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
974  *
975  * The btree node will have either a read or a write lock held, depending on
976  * level and op->lock.
977  *
978  * Note: Only error code or btree pointer will be returned, it is unncessary
979  *       for callers to check NULL pointer.
980  */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)981 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
982 				 struct bkey *k, int level, bool write,
983 				 struct btree *parent)
984 {
985 	int i = 0;
986 	struct btree *b;
987 
988 	BUG_ON(level < 0);
989 retry:
990 	b = mca_find(c, k);
991 
992 	if (!b) {
993 		if (current->bio_list)
994 			return ERR_PTR(-EAGAIN);
995 
996 		mutex_lock(&c->bucket_lock);
997 		b = mca_alloc(c, op, k, level);
998 		mutex_unlock(&c->bucket_lock);
999 
1000 		if (!b)
1001 			goto retry;
1002 		if (IS_ERR(b))
1003 			return b;
1004 
1005 		bch_btree_node_read(b);
1006 
1007 		if (!write)
1008 			downgrade_write(&b->lock);
1009 	} else {
1010 		rw_lock(write, b, level);
1011 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1012 			rw_unlock(write, b);
1013 			goto retry;
1014 		}
1015 		BUG_ON(b->level != level);
1016 	}
1017 
1018 	if (btree_node_io_error(b)) {
1019 		rw_unlock(write, b);
1020 		return ERR_PTR(-EIO);
1021 	}
1022 
1023 	BUG_ON(!b->written);
1024 
1025 	b->parent = parent;
1026 
1027 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1028 		prefetch(b->keys.set[i].tree);
1029 		prefetch(b->keys.set[i].data);
1030 	}
1031 
1032 	for (; i <= b->keys.nsets; i++)
1033 		prefetch(b->keys.set[i].data);
1034 
1035 	return b;
1036 }
1037 
btree_node_prefetch(struct btree * parent,struct bkey * k)1038 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1039 {
1040 	struct btree *b;
1041 
1042 	mutex_lock(&parent->c->bucket_lock);
1043 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1044 	mutex_unlock(&parent->c->bucket_lock);
1045 
1046 	if (!IS_ERR_OR_NULL(b)) {
1047 		b->parent = parent;
1048 		bch_btree_node_read(b);
1049 		rw_unlock(true, b);
1050 	}
1051 }
1052 
1053 /* Btree alloc */
1054 
btree_node_free(struct btree * b)1055 static void btree_node_free(struct btree *b)
1056 {
1057 	trace_bcache_btree_node_free(b);
1058 
1059 	BUG_ON(b == b->c->root);
1060 
1061 retry:
1062 	mutex_lock(&b->write_lock);
1063 	/*
1064 	 * If the btree node is selected and flushing in btree_flush_write(),
1065 	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1066 	 * then it is safe to free the btree node here. Otherwise this btree
1067 	 * node will be in race condition.
1068 	 */
1069 	if (btree_node_journal_flush(b)) {
1070 		mutex_unlock(&b->write_lock);
1071 		pr_debug("bnode %p journal_flush set, retry\n", b);
1072 		udelay(1);
1073 		goto retry;
1074 	}
1075 
1076 	if (btree_node_dirty(b)) {
1077 		btree_complete_write(b, btree_current_write(b));
1078 		clear_bit(BTREE_NODE_dirty, &b->flags);
1079 	}
1080 
1081 	mutex_unlock(&b->write_lock);
1082 
1083 	cancel_delayed_work(&b->work);
1084 
1085 	mutex_lock(&b->c->bucket_lock);
1086 	bch_bucket_free(b->c, &b->key);
1087 	mca_bucket_free(b);
1088 	mutex_unlock(&b->c->bucket_lock);
1089 }
1090 
1091 /*
1092  * Only error code or btree pointer will be returned, it is unncessary for
1093  * callers to check NULL pointer.
1094  */
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1095 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1096 				     int level, bool wait,
1097 				     struct btree *parent)
1098 {
1099 	BKEY_PADDED(key) k;
1100 	struct btree *b;
1101 
1102 	mutex_lock(&c->bucket_lock);
1103 retry:
1104 	/* return ERR_PTR(-EAGAIN) when it fails */
1105 	b = ERR_PTR(-EAGAIN);
1106 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait))
1107 		goto err;
1108 
1109 	bkey_put(c, &k.key);
1110 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1111 
1112 	b = mca_alloc(c, op, &k.key, level);
1113 	if (IS_ERR(b))
1114 		goto err_free;
1115 
1116 	if (!b) {
1117 		cache_bug(c,
1118 			"Tried to allocate bucket that was in btree cache");
1119 		goto retry;
1120 	}
1121 
1122 	b->parent = parent;
1123 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1124 
1125 	mutex_unlock(&c->bucket_lock);
1126 
1127 	trace_bcache_btree_node_alloc(b);
1128 	return b;
1129 err_free:
1130 	bch_bucket_free(c, &k.key);
1131 err:
1132 	mutex_unlock(&c->bucket_lock);
1133 
1134 	trace_bcache_btree_node_alloc_fail(c);
1135 	return b;
1136 }
1137 
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1138 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1139 					  struct btree_op *op, int level,
1140 					  struct btree *parent)
1141 {
1142 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1143 }
1144 
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1145 static struct btree *btree_node_alloc_replacement(struct btree *b,
1146 						  struct btree_op *op)
1147 {
1148 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1149 
1150 	if (!IS_ERR(n)) {
1151 		mutex_lock(&n->write_lock);
1152 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1153 		bkey_copy_key(&n->key, &b->key);
1154 		mutex_unlock(&n->write_lock);
1155 	}
1156 
1157 	return n;
1158 }
1159 
make_btree_freeing_key(struct btree * b,struct bkey * k)1160 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1161 {
1162 	unsigned int i;
1163 
1164 	mutex_lock(&b->c->bucket_lock);
1165 
1166 	atomic_inc(&b->c->prio_blocked);
1167 
1168 	bkey_copy(k, &b->key);
1169 	bkey_copy_key(k, &ZERO_KEY);
1170 
1171 	for (i = 0; i < KEY_PTRS(k); i++)
1172 		SET_PTR_GEN(k, i,
1173 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1174 					PTR_BUCKET(b->c, &b->key, i)));
1175 
1176 	mutex_unlock(&b->c->bucket_lock);
1177 }
1178 
btree_check_reserve(struct btree * b,struct btree_op * op)1179 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1180 {
1181 	struct cache_set *c = b->c;
1182 	struct cache *ca = c->cache;
1183 	unsigned int reserve = (c->root->level - b->level) * 2 + 1;
1184 
1185 	mutex_lock(&c->bucket_lock);
1186 
1187 	if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1188 		if (op)
1189 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
1190 					TASK_UNINTERRUPTIBLE);
1191 		mutex_unlock(&c->bucket_lock);
1192 		return -EINTR;
1193 	}
1194 
1195 	mutex_unlock(&c->bucket_lock);
1196 
1197 	return mca_cannibalize_lock(b->c, op);
1198 }
1199 
1200 /* Garbage collection */
1201 
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1202 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1203 				    struct bkey *k)
1204 {
1205 	uint8_t stale = 0;
1206 	unsigned int i;
1207 	struct bucket *g;
1208 
1209 	/*
1210 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1211 	 * freed, but since ptr_bad() returns true we'll never actually use them
1212 	 * for anything and thus we don't want mark their pointers here
1213 	 */
1214 	if (!bkey_cmp(k, &ZERO_KEY))
1215 		return stale;
1216 
1217 	for (i = 0; i < KEY_PTRS(k); i++) {
1218 		if (!ptr_available(c, k, i))
1219 			continue;
1220 
1221 		g = PTR_BUCKET(c, k, i);
1222 
1223 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1224 			g->last_gc = PTR_GEN(k, i);
1225 
1226 		if (ptr_stale(c, k, i)) {
1227 			stale = max(stale, ptr_stale(c, k, i));
1228 			continue;
1229 		}
1230 
1231 		cache_bug_on(GC_MARK(g) &&
1232 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1233 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1234 			     GC_MARK(g), level);
1235 
1236 		if (level)
1237 			SET_GC_MARK(g, GC_MARK_METADATA);
1238 		else if (KEY_DIRTY(k))
1239 			SET_GC_MARK(g, GC_MARK_DIRTY);
1240 		else if (!GC_MARK(g))
1241 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1242 
1243 		/* guard against overflow */
1244 		SET_GC_SECTORS_USED(g, min_t(unsigned int,
1245 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1246 					     MAX_GC_SECTORS_USED));
1247 
1248 		BUG_ON(!GC_SECTORS_USED(g));
1249 	}
1250 
1251 	return stale;
1252 }
1253 
1254 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1255 
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1256 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1257 {
1258 	unsigned int i;
1259 
1260 	for (i = 0; i < KEY_PTRS(k); i++)
1261 		if (ptr_available(c, k, i) &&
1262 		    !ptr_stale(c, k, i)) {
1263 			struct bucket *b = PTR_BUCKET(c, k, i);
1264 
1265 			b->gen = PTR_GEN(k, i);
1266 
1267 			if (level && bkey_cmp(k, &ZERO_KEY))
1268 				b->prio = BTREE_PRIO;
1269 			else if (!level && b->prio == BTREE_PRIO)
1270 				b->prio = INITIAL_PRIO;
1271 		}
1272 
1273 	__bch_btree_mark_key(c, level, k);
1274 }
1275 
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1276 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1277 {
1278 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1279 }
1280 
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1281 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1282 {
1283 	uint8_t stale = 0;
1284 	unsigned int keys = 0, good_keys = 0;
1285 	struct bkey *k;
1286 	struct btree_iter iter;
1287 	struct bset_tree *t;
1288 
1289 	gc->nodes++;
1290 
1291 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1292 		stale = max(stale, btree_mark_key(b, k));
1293 		keys++;
1294 
1295 		if (bch_ptr_bad(&b->keys, k))
1296 			continue;
1297 
1298 		gc->key_bytes += bkey_u64s(k);
1299 		gc->nkeys++;
1300 		good_keys++;
1301 
1302 		gc->data += KEY_SIZE(k);
1303 	}
1304 
1305 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1306 		btree_bug_on(t->size &&
1307 			     bset_written(&b->keys, t) &&
1308 			     bkey_cmp(&b->key, &t->end) < 0,
1309 			     b, "found short btree key in gc");
1310 
1311 	if (b->c->gc_always_rewrite)
1312 		return true;
1313 
1314 	if (stale > 10)
1315 		return true;
1316 
1317 	if ((keys - good_keys) * 2 > keys)
1318 		return true;
1319 
1320 	return false;
1321 }
1322 
1323 #define GC_MERGE_NODES	4U
1324 
1325 struct gc_merge_info {
1326 	struct btree	*b;
1327 	unsigned int	keys;
1328 };
1329 
1330 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1331 				 struct keylist *insert_keys,
1332 				 atomic_t *journal_ref,
1333 				 struct bkey *replace_key);
1334 
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1335 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1336 			     struct gc_stat *gc, struct gc_merge_info *r)
1337 {
1338 	unsigned int i, nodes = 0, keys = 0, blocks;
1339 	struct btree *new_nodes[GC_MERGE_NODES];
1340 	struct keylist keylist;
1341 	struct closure cl;
1342 	struct bkey *k;
1343 
1344 	bch_keylist_init(&keylist);
1345 
1346 	if (btree_check_reserve(b, NULL))
1347 		return 0;
1348 
1349 	memset(new_nodes, 0, sizeof(new_nodes));
1350 	closure_init_stack(&cl);
1351 
1352 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1353 		keys += r[nodes++].keys;
1354 
1355 	blocks = btree_default_blocks(b->c) * 2 / 3;
1356 
1357 	if (nodes < 2 ||
1358 	    __set_blocks(b->keys.set[0].data, keys,
1359 			 block_bytes(b->c->cache)) > blocks * (nodes - 1))
1360 		return 0;
1361 
1362 	for (i = 0; i < nodes; i++) {
1363 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1364 		if (IS_ERR(new_nodes[i]))
1365 			goto out_nocoalesce;
1366 	}
1367 
1368 	/*
1369 	 * We have to check the reserve here, after we've allocated our new
1370 	 * nodes, to make sure the insert below will succeed - we also check
1371 	 * before as an optimization to potentially avoid a bunch of expensive
1372 	 * allocs/sorts
1373 	 */
1374 	if (btree_check_reserve(b, NULL))
1375 		goto out_nocoalesce;
1376 
1377 	for (i = 0; i < nodes; i++)
1378 		mutex_lock(&new_nodes[i]->write_lock);
1379 
1380 	for (i = nodes - 1; i > 0; --i) {
1381 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1382 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1383 		struct bkey *k, *last = NULL;
1384 
1385 		keys = 0;
1386 
1387 		if (i > 1) {
1388 			for (k = n2->start;
1389 			     k < bset_bkey_last(n2);
1390 			     k = bkey_next(k)) {
1391 				if (__set_blocks(n1, n1->keys + keys +
1392 						 bkey_u64s(k),
1393 						 block_bytes(b->c->cache)) > blocks)
1394 					break;
1395 
1396 				last = k;
1397 				keys += bkey_u64s(k);
1398 			}
1399 		} else {
1400 			/*
1401 			 * Last node we're not getting rid of - we're getting
1402 			 * rid of the node at r[0]. Have to try and fit all of
1403 			 * the remaining keys into this node; we can't ensure
1404 			 * they will always fit due to rounding and variable
1405 			 * length keys (shouldn't be possible in practice,
1406 			 * though)
1407 			 */
1408 			if (__set_blocks(n1, n1->keys + n2->keys,
1409 					 block_bytes(b->c->cache)) >
1410 			    btree_blocks(new_nodes[i]))
1411 				goto out_unlock_nocoalesce;
1412 
1413 			keys = n2->keys;
1414 			/* Take the key of the node we're getting rid of */
1415 			last = &r->b->key;
1416 		}
1417 
1418 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1419 		       btree_blocks(new_nodes[i]));
1420 
1421 		if (last)
1422 			bkey_copy_key(&new_nodes[i]->key, last);
1423 
1424 		memcpy(bset_bkey_last(n1),
1425 		       n2->start,
1426 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1427 
1428 		n1->keys += keys;
1429 		r[i].keys = n1->keys;
1430 
1431 		memmove(n2->start,
1432 			bset_bkey_idx(n2, keys),
1433 			(void *) bset_bkey_last(n2) -
1434 			(void *) bset_bkey_idx(n2, keys));
1435 
1436 		n2->keys -= keys;
1437 
1438 		if (__bch_keylist_realloc(&keylist,
1439 					  bkey_u64s(&new_nodes[i]->key)))
1440 			goto out_unlock_nocoalesce;
1441 
1442 		bch_btree_node_write(new_nodes[i], &cl);
1443 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1444 	}
1445 
1446 	for (i = 0; i < nodes; i++)
1447 		mutex_unlock(&new_nodes[i]->write_lock);
1448 
1449 	closure_sync(&cl);
1450 
1451 	/* We emptied out this node */
1452 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1453 	btree_node_free(new_nodes[0]);
1454 	rw_unlock(true, new_nodes[0]);
1455 	new_nodes[0] = NULL;
1456 
1457 	for (i = 0; i < nodes; i++) {
1458 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1459 			goto out_nocoalesce;
1460 
1461 		make_btree_freeing_key(r[i].b, keylist.top);
1462 		bch_keylist_push(&keylist);
1463 	}
1464 
1465 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1466 	BUG_ON(!bch_keylist_empty(&keylist));
1467 
1468 	for (i = 0; i < nodes; i++) {
1469 		btree_node_free(r[i].b);
1470 		rw_unlock(true, r[i].b);
1471 
1472 		r[i].b = new_nodes[i];
1473 	}
1474 
1475 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1476 	r[nodes - 1].b = ERR_PTR(-EINTR);
1477 
1478 	trace_bcache_btree_gc_coalesce(nodes);
1479 	gc->nodes--;
1480 
1481 	bch_keylist_free(&keylist);
1482 
1483 	/* Invalidated our iterator */
1484 	return -EINTR;
1485 
1486 out_unlock_nocoalesce:
1487 	for (i = 0; i < nodes; i++)
1488 		mutex_unlock(&new_nodes[i]->write_lock);
1489 
1490 out_nocoalesce:
1491 	closure_sync(&cl);
1492 
1493 	while ((k = bch_keylist_pop(&keylist)))
1494 		if (!bkey_cmp(k, &ZERO_KEY))
1495 			atomic_dec(&b->c->prio_blocked);
1496 	bch_keylist_free(&keylist);
1497 
1498 	for (i = 0; i < nodes; i++)
1499 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1500 			btree_node_free(new_nodes[i]);
1501 			rw_unlock(true, new_nodes[i]);
1502 		}
1503 	return 0;
1504 }
1505 
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1506 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1507 				 struct btree *replace)
1508 {
1509 	struct keylist keys;
1510 	struct btree *n;
1511 
1512 	if (btree_check_reserve(b, NULL))
1513 		return 0;
1514 
1515 	n = btree_node_alloc_replacement(replace, NULL);
1516 	if (IS_ERR(n))
1517 		return 0;
1518 
1519 	/* recheck reserve after allocating replacement node */
1520 	if (btree_check_reserve(b, NULL)) {
1521 		btree_node_free(n);
1522 		rw_unlock(true, n);
1523 		return 0;
1524 	}
1525 
1526 	bch_btree_node_write_sync(n);
1527 
1528 	bch_keylist_init(&keys);
1529 	bch_keylist_add(&keys, &n->key);
1530 
1531 	make_btree_freeing_key(replace, keys.top);
1532 	bch_keylist_push(&keys);
1533 
1534 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1535 	BUG_ON(!bch_keylist_empty(&keys));
1536 
1537 	btree_node_free(replace);
1538 	rw_unlock(true, n);
1539 
1540 	/* Invalidated our iterator */
1541 	return -EINTR;
1542 }
1543 
btree_gc_count_keys(struct btree * b)1544 static unsigned int btree_gc_count_keys(struct btree *b)
1545 {
1546 	struct bkey *k;
1547 	struct btree_iter iter;
1548 	unsigned int ret = 0;
1549 
1550 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1551 		ret += bkey_u64s(k);
1552 
1553 	return ret;
1554 }
1555 
btree_gc_min_nodes(struct cache_set * c)1556 static size_t btree_gc_min_nodes(struct cache_set *c)
1557 {
1558 	size_t min_nodes;
1559 
1560 	/*
1561 	 * Since incremental GC would stop 100ms when front
1562 	 * side I/O comes, so when there are many btree nodes,
1563 	 * if GC only processes constant (100) nodes each time,
1564 	 * GC would last a long time, and the front side I/Os
1565 	 * would run out of the buckets (since no new bucket
1566 	 * can be allocated during GC), and be blocked again.
1567 	 * So GC should not process constant nodes, but varied
1568 	 * nodes according to the number of btree nodes, which
1569 	 * realized by dividing GC into constant(100) times,
1570 	 * so when there are many btree nodes, GC can process
1571 	 * more nodes each time, otherwise, GC will process less
1572 	 * nodes each time (but no less than MIN_GC_NODES)
1573 	 */
1574 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1575 	if (min_nodes < MIN_GC_NODES)
1576 		min_nodes = MIN_GC_NODES;
1577 
1578 	return min_nodes;
1579 }
1580 
1581 
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1582 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1583 			    struct closure *writes, struct gc_stat *gc)
1584 {
1585 	int ret = 0;
1586 	bool should_rewrite;
1587 	struct bkey *k;
1588 	struct btree_iter iter;
1589 	struct gc_merge_info r[GC_MERGE_NODES];
1590 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1591 
1592 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1593 
1594 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1595 		i->b = ERR_PTR(-EINTR);
1596 
1597 	while (1) {
1598 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1599 		if (k) {
1600 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1601 						  true, b);
1602 			if (IS_ERR(r->b)) {
1603 				ret = PTR_ERR(r->b);
1604 				break;
1605 			}
1606 
1607 			r->keys = btree_gc_count_keys(r->b);
1608 
1609 			ret = btree_gc_coalesce(b, op, gc, r);
1610 			if (ret)
1611 				break;
1612 		}
1613 
1614 		if (!last->b)
1615 			break;
1616 
1617 		if (!IS_ERR(last->b)) {
1618 			should_rewrite = btree_gc_mark_node(last->b, gc);
1619 			if (should_rewrite) {
1620 				ret = btree_gc_rewrite_node(b, op, last->b);
1621 				if (ret)
1622 					break;
1623 			}
1624 
1625 			if (last->b->level) {
1626 				ret = btree_gc_recurse(last->b, op, writes, gc);
1627 				if (ret)
1628 					break;
1629 			}
1630 
1631 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1632 
1633 			/*
1634 			 * Must flush leaf nodes before gc ends, since replace
1635 			 * operations aren't journalled
1636 			 */
1637 			mutex_lock(&last->b->write_lock);
1638 			if (btree_node_dirty(last->b))
1639 				bch_btree_node_write(last->b, writes);
1640 			mutex_unlock(&last->b->write_lock);
1641 			rw_unlock(true, last->b);
1642 		}
1643 
1644 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1645 		r->b = NULL;
1646 
1647 		if (atomic_read(&b->c->search_inflight) &&
1648 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1649 			gc->nodes_pre =  gc->nodes;
1650 			ret = -EAGAIN;
1651 			break;
1652 		}
1653 
1654 		if (need_resched()) {
1655 			ret = -EAGAIN;
1656 			break;
1657 		}
1658 	}
1659 
1660 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1661 		if (!IS_ERR_OR_NULL(i->b)) {
1662 			mutex_lock(&i->b->write_lock);
1663 			if (btree_node_dirty(i->b))
1664 				bch_btree_node_write(i->b, writes);
1665 			mutex_unlock(&i->b->write_lock);
1666 			rw_unlock(true, i->b);
1667 		}
1668 
1669 	return ret;
1670 }
1671 
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1672 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1673 			     struct closure *writes, struct gc_stat *gc)
1674 {
1675 	struct btree *n = NULL;
1676 	int ret = 0;
1677 	bool should_rewrite;
1678 
1679 	should_rewrite = btree_gc_mark_node(b, gc);
1680 	if (should_rewrite) {
1681 		n = btree_node_alloc_replacement(b, NULL);
1682 
1683 		if (!IS_ERR(n)) {
1684 			bch_btree_node_write_sync(n);
1685 
1686 			bch_btree_set_root(n);
1687 			btree_node_free(b);
1688 			rw_unlock(true, n);
1689 
1690 			return -EINTR;
1691 		}
1692 	}
1693 
1694 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1695 
1696 	if (b->level) {
1697 		ret = btree_gc_recurse(b, op, writes, gc);
1698 		if (ret)
1699 			return ret;
1700 	}
1701 
1702 	bkey_copy_key(&b->c->gc_done, &b->key);
1703 
1704 	return ret;
1705 }
1706 
btree_gc_start(struct cache_set * c)1707 static void btree_gc_start(struct cache_set *c)
1708 {
1709 	struct cache *ca;
1710 	struct bucket *b;
1711 
1712 	if (!c->gc_mark_valid)
1713 		return;
1714 
1715 	mutex_lock(&c->bucket_lock);
1716 
1717 	c->gc_mark_valid = 0;
1718 	c->gc_done = ZERO_KEY;
1719 
1720 	ca = c->cache;
1721 	for_each_bucket(b, ca) {
1722 		b->last_gc = b->gen;
1723 		if (!atomic_read(&b->pin)) {
1724 			SET_GC_MARK(b, 0);
1725 			SET_GC_SECTORS_USED(b, 0);
1726 		}
1727 	}
1728 
1729 	mutex_unlock(&c->bucket_lock);
1730 }
1731 
bch_btree_gc_finish(struct cache_set * c)1732 static void bch_btree_gc_finish(struct cache_set *c)
1733 {
1734 	struct bucket *b;
1735 	struct cache *ca;
1736 	unsigned int i, j;
1737 	uint64_t *k;
1738 
1739 	mutex_lock(&c->bucket_lock);
1740 
1741 	set_gc_sectors(c);
1742 	c->gc_mark_valid = 1;
1743 	c->need_gc	= 0;
1744 
1745 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1746 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1747 			    GC_MARK_METADATA);
1748 
1749 	/* don't reclaim buckets to which writeback keys point */
1750 	rcu_read_lock();
1751 	for (i = 0; i < c->devices_max_used; i++) {
1752 		struct bcache_device *d = c->devices[i];
1753 		struct cached_dev *dc;
1754 		struct keybuf_key *w, *n;
1755 
1756 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1757 			continue;
1758 		dc = container_of(d, struct cached_dev, disk);
1759 
1760 		spin_lock(&dc->writeback_keys.lock);
1761 		rbtree_postorder_for_each_entry_safe(w, n,
1762 					&dc->writeback_keys.keys, node)
1763 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1764 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1765 					    GC_MARK_DIRTY);
1766 		spin_unlock(&dc->writeback_keys.lock);
1767 	}
1768 	rcu_read_unlock();
1769 
1770 	c->avail_nbuckets = 0;
1771 
1772 	ca = c->cache;
1773 	ca->invalidate_needs_gc = 0;
1774 
1775 	for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1776 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1777 
1778 	for (k = ca->prio_buckets;
1779 	     k < ca->prio_buckets + prio_buckets(ca) * 2; k++)
1780 		SET_GC_MARK(ca->buckets + *k, GC_MARK_METADATA);
1781 
1782 	for_each_bucket(b, ca) {
1783 		c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1784 
1785 		if (atomic_read(&b->pin))
1786 			continue;
1787 
1788 		BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1789 
1790 		if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1791 			c->avail_nbuckets++;
1792 	}
1793 
1794 	mutex_unlock(&c->bucket_lock);
1795 }
1796 
bch_btree_gc(struct cache_set * c)1797 static void bch_btree_gc(struct cache_set *c)
1798 {
1799 	int ret;
1800 	struct gc_stat stats;
1801 	struct closure writes;
1802 	struct btree_op op;
1803 	uint64_t start_time = local_clock();
1804 
1805 	trace_bcache_gc_start(c);
1806 
1807 	memset(&stats, 0, sizeof(struct gc_stat));
1808 	closure_init_stack(&writes);
1809 	bch_btree_op_init(&op, SHRT_MAX);
1810 
1811 	btree_gc_start(c);
1812 
1813 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1814 	do {
1815 		ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1816 		closure_sync(&writes);
1817 		cond_resched();
1818 
1819 		if (ret == -EAGAIN)
1820 			schedule_timeout_interruptible(msecs_to_jiffies
1821 						       (GC_SLEEP_MS));
1822 		else if (ret)
1823 			pr_warn("gc failed!\n");
1824 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1825 
1826 	bch_btree_gc_finish(c);
1827 	wake_up_allocators(c);
1828 
1829 	bch_time_stats_update(&c->btree_gc_time, start_time);
1830 
1831 	stats.key_bytes *= sizeof(uint64_t);
1832 	stats.data	<<= 9;
1833 	bch_update_bucket_in_use(c, &stats);
1834 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1835 
1836 	trace_bcache_gc_end(c);
1837 
1838 	bch_moving_gc(c);
1839 }
1840 
gc_should_run(struct cache_set * c)1841 static bool gc_should_run(struct cache_set *c)
1842 {
1843 	struct cache *ca = c->cache;
1844 
1845 	if (ca->invalidate_needs_gc)
1846 		return true;
1847 
1848 	if (atomic_read(&c->sectors_to_gc) < 0)
1849 		return true;
1850 
1851 	return false;
1852 }
1853 
bch_gc_thread(void * arg)1854 static int bch_gc_thread(void *arg)
1855 {
1856 	struct cache_set *c = arg;
1857 
1858 	while (1) {
1859 		wait_event_interruptible(c->gc_wait,
1860 			   kthread_should_stop() ||
1861 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1862 			   gc_should_run(c));
1863 
1864 		if (kthread_should_stop() ||
1865 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1866 			break;
1867 
1868 		set_gc_sectors(c);
1869 		bch_btree_gc(c);
1870 	}
1871 
1872 	wait_for_kthread_stop();
1873 	return 0;
1874 }
1875 
bch_gc_thread_start(struct cache_set * c)1876 int bch_gc_thread_start(struct cache_set *c)
1877 {
1878 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1879 	return PTR_ERR_OR_ZERO(c->gc_thread);
1880 }
1881 
1882 /* Initial partial gc */
1883 
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1884 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1885 {
1886 	int ret = 0;
1887 	struct bkey *k, *p = NULL;
1888 	struct btree_iter iter;
1889 
1890 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1891 		bch_initial_mark_key(b->c, b->level, k);
1892 
1893 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1894 
1895 	if (b->level) {
1896 		bch_btree_iter_init(&b->keys, &iter, NULL);
1897 
1898 		do {
1899 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1900 						       bch_ptr_bad);
1901 			if (k) {
1902 				btree_node_prefetch(b, k);
1903 				/*
1904 				 * initiallize c->gc_stats.nodes
1905 				 * for incremental GC
1906 				 */
1907 				b->c->gc_stats.nodes++;
1908 			}
1909 
1910 			if (p)
1911 				ret = bcache_btree(check_recurse, p, b, op);
1912 
1913 			p = k;
1914 		} while (p && !ret);
1915 	}
1916 
1917 	return ret;
1918 }
1919 
1920 
bch_btree_check_thread(void * arg)1921 static int bch_btree_check_thread(void *arg)
1922 {
1923 	int ret;
1924 	struct btree_check_info *info = arg;
1925 	struct btree_check_state *check_state = info->state;
1926 	struct cache_set *c = check_state->c;
1927 	struct btree_iter iter;
1928 	struct bkey *k, *p;
1929 	int cur_idx, prev_idx, skip_nr;
1930 
1931 	k = p = NULL;
1932 	cur_idx = prev_idx = 0;
1933 	ret = 0;
1934 
1935 	/* root node keys are checked before thread created */
1936 	bch_btree_iter_init(&c->root->keys, &iter, NULL);
1937 	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1938 	BUG_ON(!k);
1939 
1940 	p = k;
1941 	while (k) {
1942 		/*
1943 		 * Fetch a root node key index, skip the keys which
1944 		 * should be fetched by other threads, then check the
1945 		 * sub-tree indexed by the fetched key.
1946 		 */
1947 		spin_lock(&check_state->idx_lock);
1948 		cur_idx = check_state->key_idx;
1949 		check_state->key_idx++;
1950 		spin_unlock(&check_state->idx_lock);
1951 
1952 		skip_nr = cur_idx - prev_idx;
1953 
1954 		while (skip_nr) {
1955 			k = bch_btree_iter_next_filter(&iter,
1956 						       &c->root->keys,
1957 						       bch_ptr_bad);
1958 			if (k)
1959 				p = k;
1960 			else {
1961 				/*
1962 				 * No more keys to check in root node,
1963 				 * current checking threads are enough,
1964 				 * stop creating more.
1965 				 */
1966 				atomic_set(&check_state->enough, 1);
1967 				/* Update check_state->enough earlier */
1968 				smp_mb__after_atomic();
1969 				goto out;
1970 			}
1971 			skip_nr--;
1972 			cond_resched();
1973 		}
1974 
1975 		if (p) {
1976 			struct btree_op op;
1977 
1978 			btree_node_prefetch(c->root, p);
1979 			c->gc_stats.nodes++;
1980 			bch_btree_op_init(&op, 0);
1981 			ret = bcache_btree(check_recurse, p, c->root, &op);
1982 			/*
1983 			 * The op may be added to cache_set's btree_cache_wait
1984 			 * in mca_cannibalize(), must ensure it is removed from
1985 			 * the list and release btree_cache_alloc_lock before
1986 			 * free op memory.
1987 			 * Otherwise, the btree_cache_wait will be damaged.
1988 			 */
1989 			bch_cannibalize_unlock(c);
1990 			finish_wait(&c->btree_cache_wait, &(&op)->wait);
1991 			if (ret)
1992 				goto out;
1993 		}
1994 		p = NULL;
1995 		prev_idx = cur_idx;
1996 		cond_resched();
1997 	}
1998 
1999 out:
2000 	info->result = ret;
2001 	/* update check_state->started among all CPUs */
2002 	smp_mb__before_atomic();
2003 	if (atomic_dec_and_test(&check_state->started))
2004 		wake_up(&check_state->wait);
2005 
2006 	return ret;
2007 }
2008 
2009 
2010 
bch_btree_chkthread_nr(void)2011 static int bch_btree_chkthread_nr(void)
2012 {
2013 	int n = num_online_cpus()/2;
2014 
2015 	if (n == 0)
2016 		n = 1;
2017 	else if (n > BCH_BTR_CHKTHREAD_MAX)
2018 		n = BCH_BTR_CHKTHREAD_MAX;
2019 
2020 	return n;
2021 }
2022 
bch_btree_check(struct cache_set * c)2023 int bch_btree_check(struct cache_set *c)
2024 {
2025 	int ret = 0;
2026 	int i;
2027 	struct bkey *k = NULL;
2028 	struct btree_iter iter;
2029 	struct btree_check_state check_state;
2030 
2031 	/* check and mark root node keys */
2032 	for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2033 		bch_initial_mark_key(c, c->root->level, k);
2034 
2035 	bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2036 
2037 	if (c->root->level == 0)
2038 		return 0;
2039 
2040 	memset(&check_state, 0, sizeof(struct btree_check_state));
2041 	check_state.c = c;
2042 	check_state.total_threads = bch_btree_chkthread_nr();
2043 	check_state.key_idx = 0;
2044 	spin_lock_init(&check_state.idx_lock);
2045 	atomic_set(&check_state.started, 0);
2046 	atomic_set(&check_state.enough, 0);
2047 	init_waitqueue_head(&check_state.wait);
2048 
2049 	rw_lock(0, c->root, c->root->level);
2050 	/*
2051 	 * Run multiple threads to check btree nodes in parallel,
2052 	 * if check_state.enough is non-zero, it means current
2053 	 * running check threads are enough, unncessary to create
2054 	 * more.
2055 	 */
2056 	for (i = 0; i < check_state.total_threads; i++) {
2057 		/* fetch latest check_state.enough earlier */
2058 		smp_mb__before_atomic();
2059 		if (atomic_read(&check_state.enough))
2060 			break;
2061 
2062 		check_state.infos[i].result = 0;
2063 		check_state.infos[i].state = &check_state;
2064 
2065 		check_state.infos[i].thread =
2066 			kthread_run(bch_btree_check_thread,
2067 				    &check_state.infos[i],
2068 				    "bch_btrchk[%d]", i);
2069 		if (IS_ERR(check_state.infos[i].thread)) {
2070 			pr_err("fails to run thread bch_btrchk[%d]\n", i);
2071 			for (--i; i >= 0; i--)
2072 				kthread_stop(check_state.infos[i].thread);
2073 			ret = -ENOMEM;
2074 			goto out;
2075 		}
2076 		atomic_inc(&check_state.started);
2077 	}
2078 
2079 	/*
2080 	 * Must wait for all threads to stop.
2081 	 */
2082 	wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
2083 
2084 	for (i = 0; i < check_state.total_threads; i++) {
2085 		if (check_state.infos[i].result) {
2086 			ret = check_state.infos[i].result;
2087 			goto out;
2088 		}
2089 	}
2090 
2091 out:
2092 	rw_unlock(0, c->root);
2093 	return ret;
2094 }
2095 
bch_initial_gc_finish(struct cache_set * c)2096 void bch_initial_gc_finish(struct cache_set *c)
2097 {
2098 	struct cache *ca = c->cache;
2099 	struct bucket *b;
2100 
2101 	bch_btree_gc_finish(c);
2102 
2103 	mutex_lock(&c->bucket_lock);
2104 
2105 	/*
2106 	 * We need to put some unused buckets directly on the prio freelist in
2107 	 * order to get the allocator thread started - it needs freed buckets in
2108 	 * order to rewrite the prios and gens, and it needs to rewrite prios
2109 	 * and gens in order to free buckets.
2110 	 *
2111 	 * This is only safe for buckets that have no live data in them, which
2112 	 * there should always be some of.
2113 	 */
2114 	for_each_bucket(b, ca) {
2115 		if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2116 		    fifo_full(&ca->free[RESERVE_BTREE]))
2117 			break;
2118 
2119 		if (bch_can_invalidate_bucket(ca, b) &&
2120 		    !GC_MARK(b)) {
2121 			__bch_invalidate_one_bucket(ca, b);
2122 			if (!fifo_push(&ca->free[RESERVE_PRIO],
2123 			   b - ca->buckets))
2124 				fifo_push(&ca->free[RESERVE_BTREE],
2125 					  b - ca->buckets);
2126 		}
2127 	}
2128 
2129 	mutex_unlock(&c->bucket_lock);
2130 }
2131 
2132 /* Btree insertion */
2133 
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2134 static bool btree_insert_key(struct btree *b, struct bkey *k,
2135 			     struct bkey *replace_key)
2136 {
2137 	unsigned int status;
2138 
2139 	BUG_ON(bkey_cmp(k, &b->key) > 0);
2140 
2141 	status = bch_btree_insert_key(&b->keys, k, replace_key);
2142 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2143 		bch_check_keys(&b->keys, "%u for %s", status,
2144 			       replace_key ? "replace" : "insert");
2145 
2146 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2147 					      status);
2148 		return true;
2149 	} else
2150 		return false;
2151 }
2152 
insert_u64s_remaining(struct btree * b)2153 static size_t insert_u64s_remaining(struct btree *b)
2154 {
2155 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
2156 
2157 	/*
2158 	 * Might land in the middle of an existing extent and have to split it
2159 	 */
2160 	if (b->keys.ops->is_extents)
2161 		ret -= KEY_MAX_U64S;
2162 
2163 	return max(ret, 0L);
2164 }
2165 
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2166 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2167 				  struct keylist *insert_keys,
2168 				  struct bkey *replace_key)
2169 {
2170 	bool ret = false;
2171 	int oldsize = bch_count_data(&b->keys);
2172 
2173 	while (!bch_keylist_empty(insert_keys)) {
2174 		struct bkey *k = insert_keys->keys;
2175 
2176 		if (bkey_u64s(k) > insert_u64s_remaining(b))
2177 			break;
2178 
2179 		if (bkey_cmp(k, &b->key) <= 0) {
2180 			if (!b->level)
2181 				bkey_put(b->c, k);
2182 
2183 			ret |= btree_insert_key(b, k, replace_key);
2184 			bch_keylist_pop_front(insert_keys);
2185 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2186 			BKEY_PADDED(key) temp;
2187 			bkey_copy(&temp.key, insert_keys->keys);
2188 
2189 			bch_cut_back(&b->key, &temp.key);
2190 			bch_cut_front(&b->key, insert_keys->keys);
2191 
2192 			ret |= btree_insert_key(b, &temp.key, replace_key);
2193 			break;
2194 		} else {
2195 			break;
2196 		}
2197 	}
2198 
2199 	if (!ret)
2200 		op->insert_collision = true;
2201 
2202 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2203 
2204 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2205 	return ret;
2206 }
2207 
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2208 static int btree_split(struct btree *b, struct btree_op *op,
2209 		       struct keylist *insert_keys,
2210 		       struct bkey *replace_key)
2211 {
2212 	bool split;
2213 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2214 	uint64_t start_time = local_clock();
2215 	struct closure cl;
2216 	struct keylist parent_keys;
2217 
2218 	closure_init_stack(&cl);
2219 	bch_keylist_init(&parent_keys);
2220 
2221 	if (btree_check_reserve(b, op)) {
2222 		if (!b->level)
2223 			return -EINTR;
2224 		else
2225 			WARN(1, "insufficient reserve for split\n");
2226 	}
2227 
2228 	n1 = btree_node_alloc_replacement(b, op);
2229 	if (IS_ERR(n1))
2230 		goto err;
2231 
2232 	split = set_blocks(btree_bset_first(n1),
2233 			   block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5;
2234 
2235 	if (split) {
2236 		unsigned int keys = 0;
2237 
2238 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2239 
2240 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2241 		if (IS_ERR(n2))
2242 			goto err_free1;
2243 
2244 		if (!b->parent) {
2245 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2246 			if (IS_ERR(n3))
2247 				goto err_free2;
2248 		}
2249 
2250 		mutex_lock(&n1->write_lock);
2251 		mutex_lock(&n2->write_lock);
2252 
2253 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2254 
2255 		/*
2256 		 * Has to be a linear search because we don't have an auxiliary
2257 		 * search tree yet
2258 		 */
2259 
2260 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2261 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2262 							keys));
2263 
2264 		bkey_copy_key(&n1->key,
2265 			      bset_bkey_idx(btree_bset_first(n1), keys));
2266 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2267 
2268 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2269 		btree_bset_first(n1)->keys = keys;
2270 
2271 		memcpy(btree_bset_first(n2)->start,
2272 		       bset_bkey_last(btree_bset_first(n1)),
2273 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2274 
2275 		bkey_copy_key(&n2->key, &b->key);
2276 
2277 		bch_keylist_add(&parent_keys, &n2->key);
2278 		bch_btree_node_write(n2, &cl);
2279 		mutex_unlock(&n2->write_lock);
2280 		rw_unlock(true, n2);
2281 	} else {
2282 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2283 
2284 		mutex_lock(&n1->write_lock);
2285 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2286 	}
2287 
2288 	bch_keylist_add(&parent_keys, &n1->key);
2289 	bch_btree_node_write(n1, &cl);
2290 	mutex_unlock(&n1->write_lock);
2291 
2292 	if (n3) {
2293 		/* Depth increases, make a new root */
2294 		mutex_lock(&n3->write_lock);
2295 		bkey_copy_key(&n3->key, &MAX_KEY);
2296 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2297 		bch_btree_node_write(n3, &cl);
2298 		mutex_unlock(&n3->write_lock);
2299 
2300 		closure_sync(&cl);
2301 		bch_btree_set_root(n3);
2302 		rw_unlock(true, n3);
2303 	} else if (!b->parent) {
2304 		/* Root filled up but didn't need to be split */
2305 		closure_sync(&cl);
2306 		bch_btree_set_root(n1);
2307 	} else {
2308 		/* Split a non root node */
2309 		closure_sync(&cl);
2310 		make_btree_freeing_key(b, parent_keys.top);
2311 		bch_keylist_push(&parent_keys);
2312 
2313 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2314 		BUG_ON(!bch_keylist_empty(&parent_keys));
2315 	}
2316 
2317 	btree_node_free(b);
2318 	rw_unlock(true, n1);
2319 
2320 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2321 
2322 	return 0;
2323 err_free2:
2324 	bkey_put(b->c, &n2->key);
2325 	btree_node_free(n2);
2326 	rw_unlock(true, n2);
2327 err_free1:
2328 	bkey_put(b->c, &n1->key);
2329 	btree_node_free(n1);
2330 	rw_unlock(true, n1);
2331 err:
2332 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2333 
2334 	if (n3 == ERR_PTR(-EAGAIN) ||
2335 	    n2 == ERR_PTR(-EAGAIN) ||
2336 	    n1 == ERR_PTR(-EAGAIN))
2337 		return -EAGAIN;
2338 
2339 	return -ENOMEM;
2340 }
2341 
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2342 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2343 				 struct keylist *insert_keys,
2344 				 atomic_t *journal_ref,
2345 				 struct bkey *replace_key)
2346 {
2347 	struct closure cl;
2348 
2349 	BUG_ON(b->level && replace_key);
2350 
2351 	closure_init_stack(&cl);
2352 
2353 	mutex_lock(&b->write_lock);
2354 
2355 	if (write_block(b) != btree_bset_last(b) &&
2356 	    b->keys.last_set_unwritten)
2357 		bch_btree_init_next(b); /* just wrote a set */
2358 
2359 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2360 		mutex_unlock(&b->write_lock);
2361 		goto split;
2362 	}
2363 
2364 	BUG_ON(write_block(b) != btree_bset_last(b));
2365 
2366 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2367 		if (!b->level)
2368 			bch_btree_leaf_dirty(b, journal_ref);
2369 		else
2370 			bch_btree_node_write(b, &cl);
2371 	}
2372 
2373 	mutex_unlock(&b->write_lock);
2374 
2375 	/* wait for btree node write if necessary, after unlock */
2376 	closure_sync(&cl);
2377 
2378 	return 0;
2379 split:
2380 	if (current->bio_list) {
2381 		op->lock = b->c->root->level + 1;
2382 		return -EAGAIN;
2383 	} else if (op->lock <= b->c->root->level) {
2384 		op->lock = b->c->root->level + 1;
2385 		return -EINTR;
2386 	} else {
2387 		/* Invalidated all iterators */
2388 		int ret = btree_split(b, op, insert_keys, replace_key);
2389 
2390 		if (bch_keylist_empty(insert_keys))
2391 			return 0;
2392 		else if (!ret)
2393 			return -EINTR;
2394 		return ret;
2395 	}
2396 }
2397 
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2398 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2399 			       struct bkey *check_key)
2400 {
2401 	int ret = -EINTR;
2402 	uint64_t btree_ptr = b->key.ptr[0];
2403 	unsigned long seq = b->seq;
2404 	struct keylist insert;
2405 	bool upgrade = op->lock == -1;
2406 
2407 	bch_keylist_init(&insert);
2408 
2409 	if (upgrade) {
2410 		rw_unlock(false, b);
2411 		rw_lock(true, b, b->level);
2412 
2413 		if (b->key.ptr[0] != btree_ptr ||
2414 		    b->seq != seq + 1) {
2415 			op->lock = b->level;
2416 			goto out;
2417 		}
2418 	}
2419 
2420 	SET_KEY_PTRS(check_key, 1);
2421 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2422 
2423 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2424 
2425 	bch_keylist_add(&insert, check_key);
2426 
2427 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2428 
2429 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2430 out:
2431 	if (upgrade)
2432 		downgrade_write(&b->lock);
2433 	return ret;
2434 }
2435 
2436 struct btree_insert_op {
2437 	struct btree_op	op;
2438 	struct keylist	*keys;
2439 	atomic_t	*journal_ref;
2440 	struct bkey	*replace_key;
2441 };
2442 
btree_insert_fn(struct btree_op * b_op,struct btree * b)2443 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2444 {
2445 	struct btree_insert_op *op = container_of(b_op,
2446 					struct btree_insert_op, op);
2447 
2448 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2449 					op->journal_ref, op->replace_key);
2450 	if (ret && !bch_keylist_empty(op->keys))
2451 		return ret;
2452 	else
2453 		return MAP_DONE;
2454 }
2455 
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2456 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2457 		     atomic_t *journal_ref, struct bkey *replace_key)
2458 {
2459 	struct btree_insert_op op;
2460 	int ret = 0;
2461 
2462 	BUG_ON(current->bio_list);
2463 	BUG_ON(bch_keylist_empty(keys));
2464 
2465 	bch_btree_op_init(&op.op, 0);
2466 	op.keys		= keys;
2467 	op.journal_ref	= journal_ref;
2468 	op.replace_key	= replace_key;
2469 
2470 	while (!ret && !bch_keylist_empty(keys)) {
2471 		op.op.lock = 0;
2472 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2473 					       &START_KEY(keys->keys),
2474 					       btree_insert_fn);
2475 	}
2476 
2477 	if (ret) {
2478 		struct bkey *k;
2479 
2480 		pr_err("error %i\n", ret);
2481 
2482 		while ((k = bch_keylist_pop(keys)))
2483 			bkey_put(c, k);
2484 	} else if (op.op.insert_collision)
2485 		ret = -ESRCH;
2486 
2487 	return ret;
2488 }
2489 
bch_btree_set_root(struct btree * b)2490 void bch_btree_set_root(struct btree *b)
2491 {
2492 	unsigned int i;
2493 	struct closure cl;
2494 
2495 	closure_init_stack(&cl);
2496 
2497 	trace_bcache_btree_set_root(b);
2498 
2499 	BUG_ON(!b->written);
2500 
2501 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2502 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2503 
2504 	mutex_lock(&b->c->bucket_lock);
2505 	list_del_init(&b->list);
2506 	mutex_unlock(&b->c->bucket_lock);
2507 
2508 	b->c->root = b;
2509 
2510 	bch_journal_meta(b->c, &cl);
2511 	closure_sync(&cl);
2512 }
2513 
2514 /* Map across nodes or keys */
2515 
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2516 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2517 				       struct bkey *from,
2518 				       btree_map_nodes_fn *fn, int flags)
2519 {
2520 	int ret = MAP_CONTINUE;
2521 
2522 	if (b->level) {
2523 		struct bkey *k;
2524 		struct btree_iter iter;
2525 
2526 		bch_btree_iter_init(&b->keys, &iter, from);
2527 
2528 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2529 						       bch_ptr_bad))) {
2530 			ret = bcache_btree(map_nodes_recurse, k, b,
2531 				    op, from, fn, flags);
2532 			from = NULL;
2533 
2534 			if (ret != MAP_CONTINUE)
2535 				return ret;
2536 		}
2537 	}
2538 
2539 	if (!b->level || flags == MAP_ALL_NODES)
2540 		ret = fn(op, b);
2541 
2542 	return ret;
2543 }
2544 
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2545 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2546 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2547 {
2548 	return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2549 }
2550 
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2551 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2552 				      struct bkey *from, btree_map_keys_fn *fn,
2553 				      int flags)
2554 {
2555 	int ret = MAP_CONTINUE;
2556 	struct bkey *k;
2557 	struct btree_iter iter;
2558 
2559 	bch_btree_iter_init(&b->keys, &iter, from);
2560 
2561 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2562 		ret = !b->level
2563 			? fn(op, b, k)
2564 			: bcache_btree(map_keys_recurse, k,
2565 				       b, op, from, fn, flags);
2566 		from = NULL;
2567 
2568 		if (ret != MAP_CONTINUE)
2569 			return ret;
2570 	}
2571 
2572 	if (!b->level && (flags & MAP_END_KEY))
2573 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2574 				     KEY_OFFSET(&b->key), 0));
2575 
2576 	return ret;
2577 }
2578 
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2579 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2580 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2581 {
2582 	return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2583 }
2584 
2585 /* Keybuf code */
2586 
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2587 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2588 {
2589 	/* Overlapping keys compare equal */
2590 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2591 		return -1;
2592 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2593 		return 1;
2594 	return 0;
2595 }
2596 
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2597 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2598 					    struct keybuf_key *r)
2599 {
2600 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2601 }
2602 
2603 struct refill {
2604 	struct btree_op	op;
2605 	unsigned int	nr_found;
2606 	struct keybuf	*buf;
2607 	struct bkey	*end;
2608 	keybuf_pred_fn	*pred;
2609 };
2610 
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2611 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2612 			    struct bkey *k)
2613 {
2614 	struct refill *refill = container_of(op, struct refill, op);
2615 	struct keybuf *buf = refill->buf;
2616 	int ret = MAP_CONTINUE;
2617 
2618 	if (bkey_cmp(k, refill->end) > 0) {
2619 		ret = MAP_DONE;
2620 		goto out;
2621 	}
2622 
2623 	if (!KEY_SIZE(k)) /* end key */
2624 		goto out;
2625 
2626 	if (refill->pred(buf, k)) {
2627 		struct keybuf_key *w;
2628 
2629 		spin_lock(&buf->lock);
2630 
2631 		w = array_alloc(&buf->freelist);
2632 		if (!w) {
2633 			spin_unlock(&buf->lock);
2634 			return MAP_DONE;
2635 		}
2636 
2637 		w->private = NULL;
2638 		bkey_copy(&w->key, k);
2639 
2640 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2641 			array_free(&buf->freelist, w);
2642 		else
2643 			refill->nr_found++;
2644 
2645 		if (array_freelist_empty(&buf->freelist))
2646 			ret = MAP_DONE;
2647 
2648 		spin_unlock(&buf->lock);
2649 	}
2650 out:
2651 	buf->last_scanned = *k;
2652 	return ret;
2653 }
2654 
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2655 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2656 		       struct bkey *end, keybuf_pred_fn *pred)
2657 {
2658 	struct bkey start = buf->last_scanned;
2659 	struct refill refill;
2660 
2661 	cond_resched();
2662 
2663 	bch_btree_op_init(&refill.op, -1);
2664 	refill.nr_found	= 0;
2665 	refill.buf	= buf;
2666 	refill.end	= end;
2667 	refill.pred	= pred;
2668 
2669 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2670 			   refill_keybuf_fn, MAP_END_KEY);
2671 
2672 	trace_bcache_keyscan(refill.nr_found,
2673 			     KEY_INODE(&start), KEY_OFFSET(&start),
2674 			     KEY_INODE(&buf->last_scanned),
2675 			     KEY_OFFSET(&buf->last_scanned));
2676 
2677 	spin_lock(&buf->lock);
2678 
2679 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2680 		struct keybuf_key *w;
2681 
2682 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2683 		buf->start	= START_KEY(&w->key);
2684 
2685 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2686 		buf->end	= w->key;
2687 	} else {
2688 		buf->start	= MAX_KEY;
2689 		buf->end	= MAX_KEY;
2690 	}
2691 
2692 	spin_unlock(&buf->lock);
2693 }
2694 
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2695 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2696 {
2697 	rb_erase(&w->node, &buf->keys);
2698 	array_free(&buf->freelist, w);
2699 }
2700 
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2701 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2702 {
2703 	spin_lock(&buf->lock);
2704 	__bch_keybuf_del(buf, w);
2705 	spin_unlock(&buf->lock);
2706 }
2707 
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2708 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2709 				  struct bkey *end)
2710 {
2711 	bool ret = false;
2712 	struct keybuf_key *p, *w, s;
2713 
2714 	s.key = *start;
2715 
2716 	if (bkey_cmp(end, &buf->start) <= 0 ||
2717 	    bkey_cmp(start, &buf->end) >= 0)
2718 		return false;
2719 
2720 	spin_lock(&buf->lock);
2721 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2722 
2723 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2724 		p = w;
2725 		w = RB_NEXT(w, node);
2726 
2727 		if (p->private)
2728 			ret = true;
2729 		else
2730 			__bch_keybuf_del(buf, p);
2731 	}
2732 
2733 	spin_unlock(&buf->lock);
2734 	return ret;
2735 }
2736 
bch_keybuf_next(struct keybuf * buf)2737 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2738 {
2739 	struct keybuf_key *w;
2740 
2741 	spin_lock(&buf->lock);
2742 
2743 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2744 
2745 	while (w && w->private)
2746 		w = RB_NEXT(w, node);
2747 
2748 	if (w)
2749 		w->private = ERR_PTR(-EINTR);
2750 
2751 	spin_unlock(&buf->lock);
2752 	return w;
2753 }
2754 
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2755 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2756 					  struct keybuf *buf,
2757 					  struct bkey *end,
2758 					  keybuf_pred_fn *pred)
2759 {
2760 	struct keybuf_key *ret;
2761 
2762 	while (1) {
2763 		ret = bch_keybuf_next(buf);
2764 		if (ret)
2765 			break;
2766 
2767 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2768 			pr_debug("scan finished\n");
2769 			break;
2770 		}
2771 
2772 		bch_refill_keybuf(c, buf, end, pred);
2773 	}
2774 
2775 	return ret;
2776 }
2777 
bch_keybuf_init(struct keybuf * buf)2778 void bch_keybuf_init(struct keybuf *buf)
2779 {
2780 	buf->last_scanned	= MAX_KEY;
2781 	buf->keys		= RB_ROOT;
2782 
2783 	spin_lock_init(&buf->lock);
2784 	array_allocator_init(&buf->freelist);
2785 }
2786 
bch_btree_exit(void)2787 void bch_btree_exit(void)
2788 {
2789 	if (btree_io_wq)
2790 		destroy_workqueue(btree_io_wq);
2791 }
2792 
bch_btree_init(void)2793 int __init bch_btree_init(void)
2794 {
2795 	btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
2796 	if (!btree_io_wq)
2797 		return -ENOMEM;
2798 
2799 	return 0;
2800 }
2801