• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 #define MAX_GC_TIMES		100
94 #define MIN_GC_NODES		100
95 #define GC_SLEEP_MS		100
96 
97 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
98 
99 #define PTR_HASH(c, k)							\
100 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101 
102 #define insert_lock(s, b)	((b)->level <= (s)->lock)
103 
104 /*
105  * These macros are for recursing down the btree - they handle the details of
106  * locking and looking up nodes in the cache for you. They're best treated as
107  * mere syntax when reading code that uses them.
108  *
109  * op->lock determines whether we take a read or a write lock at a given depth.
110  * If you've got a read lock and find that you need a write lock (i.e. you're
111  * going to have to split), set op->lock and return -EINTR; btree_root() will
112  * call you again and you'll have the correct lock.
113  */
114 
115 /**
116  * btree - recurse down the btree on a specified key
117  * @fn:		function to call, which will be passed the child node
118  * @key:	key to recurse on
119  * @b:		parent btree node
120  * @op:		pointer to struct btree_op
121  */
122 #define btree(fn, key, b, op, ...)					\
123 ({									\
124 	int _r, l = (b)->level - 1;					\
125 	bool _w = l <= (op)->lock;					\
126 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
127 						  _w, b);		\
128 	if (!IS_ERR(_child)) {						\
129 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
130 		rw_unlock(_w, _child);					\
131 	} else								\
132 		_r = PTR_ERR(_child);					\
133 	_r;								\
134 })
135 
136 /**
137  * btree_root - call a function on the root of the btree
138  * @fn:		function to call, which will be passed the child node
139  * @c:		cache set
140  * @op:		pointer to struct btree_op
141  */
142 #define btree_root(fn, c, op, ...)					\
143 ({									\
144 	int _r = -EINTR;						\
145 	do {								\
146 		struct btree *_b = (c)->root;				\
147 		bool _w = insert_lock(op, _b);				\
148 		rw_lock(_w, _b, _b->level);				\
149 		if (_b == (c)->root &&					\
150 		    _w == insert_lock(op, _b)) {			\
151 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
152 		}							\
153 		rw_unlock(_w, _b);					\
154 		bch_cannibalize_unlock(c);				\
155 		if (_r == -EINTR)					\
156 			schedule();					\
157 	} while (_r == -EINTR);						\
158 									\
159 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
160 	_r;								\
161 })
162 
write_block(struct btree * b)163 static inline struct bset *write_block(struct btree *b)
164 {
165 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
166 }
167 
bch_btree_init_next(struct btree * b)168 static void bch_btree_init_next(struct btree *b)
169 {
170 	/* If not a leaf node, always sort */
171 	if (b->level && b->keys.nsets)
172 		bch_btree_sort(&b->keys, &b->c->sort);
173 	else
174 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
175 
176 	if (b->written < btree_blocks(b))
177 		bch_bset_init_next(&b->keys, write_block(b),
178 				   bset_magic(&b->c->sb));
179 
180 }
181 
182 /* Btree key manipulation */
183 
bkey_put(struct cache_set * c,struct bkey * k)184 void bkey_put(struct cache_set *c, struct bkey *k)
185 {
186 	unsigned int i;
187 
188 	for (i = 0; i < KEY_PTRS(k); i++)
189 		if (ptr_available(c, k, i))
190 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
191 }
192 
193 /* Btree IO */
194 
btree_csum_set(struct btree * b,struct bset * i)195 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
196 {
197 	uint64_t crc = b->key.ptr[0];
198 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
199 
200 	crc = bch_crc64_update(crc, data, end - data);
201 	return crc ^ 0xffffffffffffffffULL;
202 }
203 
bch_btree_node_read_done(struct btree * b)204 void bch_btree_node_read_done(struct btree *b)
205 {
206 	const char *err = "bad btree header";
207 	struct bset *i = btree_bset_first(b);
208 	struct btree_iter *iter;
209 
210 	/*
211 	 * c->fill_iter can allocate an iterator with more memory space
212 	 * than static MAX_BSETS.
213 	 * See the comment arount cache_set->fill_iter.
214 	 */
215 	iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
216 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
217 	iter->used = 0;
218 
219 #ifdef CONFIG_BCACHE_DEBUG
220 	iter->b = &b->keys;
221 #endif
222 
223 	if (!i->seq)
224 		goto err;
225 
226 	for (;
227 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
228 	     i = write_block(b)) {
229 		err = "unsupported bset version";
230 		if (i->version > BCACHE_BSET_VERSION)
231 			goto err;
232 
233 		err = "bad btree header";
234 		if (b->written + set_blocks(i, block_bytes(b->c)) >
235 		    btree_blocks(b))
236 			goto err;
237 
238 		err = "bad magic";
239 		if (i->magic != bset_magic(&b->c->sb))
240 			goto err;
241 
242 		err = "bad checksum";
243 		switch (i->version) {
244 		case 0:
245 			if (i->csum != csum_set(i))
246 				goto err;
247 			break;
248 		case BCACHE_BSET_VERSION:
249 			if (i->csum != btree_csum_set(b, i))
250 				goto err;
251 			break;
252 		}
253 
254 		err = "empty set";
255 		if (i != b->keys.set[0].data && !i->keys)
256 			goto err;
257 
258 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
259 
260 		b->written += set_blocks(i, block_bytes(b->c));
261 	}
262 
263 	err = "corrupted btree";
264 	for (i = write_block(b);
265 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
266 	     i = ((void *) i) + block_bytes(b->c))
267 		if (i->seq == b->keys.set[0].data->seq)
268 			goto err;
269 
270 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
271 
272 	i = b->keys.set[0].data;
273 	err = "short btree key";
274 	if (b->keys.set[0].size &&
275 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
276 		goto err;
277 
278 	if (b->written < btree_blocks(b))
279 		bch_bset_init_next(&b->keys, write_block(b),
280 				   bset_magic(&b->c->sb));
281 out:
282 	mempool_free(iter, &b->c->fill_iter);
283 	return;
284 err:
285 	set_btree_node_io_error(b);
286 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
287 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
288 			    bset_block_offset(b, i), i->keys);
289 	goto out;
290 }
291 
btree_node_read_endio(struct bio * bio)292 static void btree_node_read_endio(struct bio *bio)
293 {
294 	struct closure *cl = bio->bi_private;
295 
296 	closure_put(cl);
297 }
298 
bch_btree_node_read(struct btree * b)299 static void bch_btree_node_read(struct btree *b)
300 {
301 	uint64_t start_time = local_clock();
302 	struct closure cl;
303 	struct bio *bio;
304 
305 	trace_bcache_btree_read(b);
306 
307 	closure_init_stack(&cl);
308 
309 	bio = bch_bbio_alloc(b->c);
310 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
311 	bio->bi_end_io	= btree_node_read_endio;
312 	bio->bi_private	= &cl;
313 	bio->bi_opf = REQ_OP_READ | REQ_META;
314 
315 	bch_bio_map(bio, b->keys.set[0].data);
316 
317 	bch_submit_bbio(bio, b->c, &b->key, 0);
318 	closure_sync(&cl);
319 
320 	if (bio->bi_status)
321 		set_btree_node_io_error(b);
322 
323 	bch_bbio_free(bio, b->c);
324 
325 	if (btree_node_io_error(b))
326 		goto err;
327 
328 	bch_btree_node_read_done(b);
329 	bch_time_stats_update(&b->c->btree_read_time, start_time);
330 
331 	return;
332 err:
333 	bch_cache_set_error(b->c, "io error reading bucket %zu",
334 			    PTR_BUCKET_NR(b->c, &b->key, 0));
335 }
336 
btree_complete_write(struct btree * b,struct btree_write * w)337 static void btree_complete_write(struct btree *b, struct btree_write *w)
338 {
339 	if (w->prio_blocked &&
340 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
341 		wake_up_allocators(b->c);
342 
343 	if (w->journal) {
344 		atomic_dec_bug(w->journal);
345 		__closure_wake_up(&b->c->journal.wait);
346 	}
347 
348 	w->prio_blocked	= 0;
349 	w->journal	= NULL;
350 }
351 
btree_node_write_unlock(struct closure * cl)352 static void btree_node_write_unlock(struct closure *cl)
353 {
354 	struct btree *b = container_of(cl, struct btree, io);
355 
356 	up(&b->io_mutex);
357 }
358 
__btree_node_write_done(struct closure * cl)359 static void __btree_node_write_done(struct closure *cl)
360 {
361 	struct btree *b = container_of(cl, struct btree, io);
362 	struct btree_write *w = btree_prev_write(b);
363 
364 	bch_bbio_free(b->bio, b->c);
365 	b->bio = NULL;
366 	btree_complete_write(b, w);
367 
368 	if (btree_node_dirty(b))
369 		schedule_delayed_work(&b->work, 30 * HZ);
370 
371 	closure_return_with_destructor(cl, btree_node_write_unlock);
372 }
373 
btree_node_write_done(struct closure * cl)374 static void btree_node_write_done(struct closure *cl)
375 {
376 	struct btree *b = container_of(cl, struct btree, io);
377 
378 	bio_free_pages(b->bio);
379 	__btree_node_write_done(cl);
380 }
381 
btree_node_write_endio(struct bio * bio)382 static void btree_node_write_endio(struct bio *bio)
383 {
384 	struct closure *cl = bio->bi_private;
385 	struct btree *b = container_of(cl, struct btree, io);
386 
387 	if (bio->bi_status)
388 		set_btree_node_io_error(b);
389 
390 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
391 	closure_put(cl);
392 }
393 
do_btree_node_write(struct btree * b)394 static void do_btree_node_write(struct btree *b)
395 {
396 	struct closure *cl = &b->io;
397 	struct bset *i = btree_bset_last(b);
398 	BKEY_PADDED(key) k;
399 
400 	i->version	= BCACHE_BSET_VERSION;
401 	i->csum		= btree_csum_set(b, i);
402 
403 	BUG_ON(b->bio);
404 	b->bio = bch_bbio_alloc(b->c);
405 
406 	b->bio->bi_end_io	= btree_node_write_endio;
407 	b->bio->bi_private	= cl;
408 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
409 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
410 	bch_bio_map(b->bio, i);
411 
412 	/*
413 	 * If we're appending to a leaf node, we don't technically need FUA -
414 	 * this write just needs to be persisted before the next journal write,
415 	 * which will be marked FLUSH|FUA.
416 	 *
417 	 * Similarly if we're writing a new btree root - the pointer is going to
418 	 * be in the next journal entry.
419 	 *
420 	 * But if we're writing a new btree node (that isn't a root) or
421 	 * appending to a non leaf btree node, we need either FUA or a flush
422 	 * when we write the parent with the new pointer. FUA is cheaper than a
423 	 * flush, and writes appending to leaf nodes aren't blocking anything so
424 	 * just make all btree node writes FUA to keep things sane.
425 	 */
426 
427 	bkey_copy(&k.key, &b->key);
428 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
429 		       bset_sector_offset(&b->keys, i));
430 
431 	if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
432 		struct bio_vec *bv;
433 		void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
434 		struct bvec_iter_all iter_all;
435 
436 		bio_for_each_segment_all(bv, b->bio, iter_all) {
437 			memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
438 			addr += PAGE_SIZE;
439 		}
440 
441 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
442 
443 		continue_at(cl, btree_node_write_done, NULL);
444 	} else {
445 		/*
446 		 * No problem for multipage bvec since the bio is
447 		 * just allocated
448 		 */
449 		b->bio->bi_vcnt = 0;
450 		bch_bio_map(b->bio, i);
451 
452 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
453 
454 		closure_sync(cl);
455 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
456 	}
457 }
458 
__bch_btree_node_write(struct btree * b,struct closure * parent)459 void __bch_btree_node_write(struct btree *b, struct closure *parent)
460 {
461 	struct bset *i = btree_bset_last(b);
462 
463 	lockdep_assert_held(&b->write_lock);
464 
465 	trace_bcache_btree_write(b);
466 
467 	BUG_ON(current->bio_list);
468 	BUG_ON(b->written >= btree_blocks(b));
469 	BUG_ON(b->written && !i->keys);
470 	BUG_ON(btree_bset_first(b)->seq != i->seq);
471 	bch_check_keys(&b->keys, "writing");
472 
473 	cancel_delayed_work(&b->work);
474 
475 	/* If caller isn't waiting for write, parent refcount is cache set */
476 	down(&b->io_mutex);
477 	closure_init(&b->io, parent ?: &b->c->cl);
478 
479 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
480 	change_bit(BTREE_NODE_write_idx, &b->flags);
481 
482 	do_btree_node_write(b);
483 
484 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
485 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
486 
487 	b->written += set_blocks(i, block_bytes(b->c));
488 }
489 
bch_btree_node_write(struct btree * b,struct closure * parent)490 void bch_btree_node_write(struct btree *b, struct closure *parent)
491 {
492 	unsigned int nsets = b->keys.nsets;
493 
494 	lockdep_assert_held(&b->lock);
495 
496 	__bch_btree_node_write(b, parent);
497 
498 	/*
499 	 * do verify if there was more than one set initially (i.e. we did a
500 	 * sort) and we sorted down to a single set:
501 	 */
502 	if (nsets && !b->keys.nsets)
503 		bch_btree_verify(b);
504 
505 	bch_btree_init_next(b);
506 }
507 
bch_btree_node_write_sync(struct btree * b)508 static void bch_btree_node_write_sync(struct btree *b)
509 {
510 	struct closure cl;
511 
512 	closure_init_stack(&cl);
513 
514 	mutex_lock(&b->write_lock);
515 	bch_btree_node_write(b, &cl);
516 	mutex_unlock(&b->write_lock);
517 
518 	closure_sync(&cl);
519 }
520 
btree_node_write_work(struct work_struct * w)521 static void btree_node_write_work(struct work_struct *w)
522 {
523 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
524 
525 	mutex_lock(&b->write_lock);
526 	if (btree_node_dirty(b))
527 		__bch_btree_node_write(b, NULL);
528 	mutex_unlock(&b->write_lock);
529 }
530 
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)531 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
532 {
533 	struct bset *i = btree_bset_last(b);
534 	struct btree_write *w = btree_current_write(b);
535 
536 	lockdep_assert_held(&b->write_lock);
537 
538 	BUG_ON(!b->written);
539 	BUG_ON(!i->keys);
540 
541 	if (!btree_node_dirty(b))
542 		schedule_delayed_work(&b->work, 30 * HZ);
543 
544 	set_btree_node_dirty(b);
545 
546 	if (journal_ref) {
547 		if (w->journal &&
548 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
549 			atomic_dec_bug(w->journal);
550 			w->journal = NULL;
551 		}
552 
553 		if (!w->journal) {
554 			w->journal = journal_ref;
555 			atomic_inc(w->journal);
556 		}
557 	}
558 
559 	/* Force write if set is too big */
560 	if (set_bytes(i) > PAGE_SIZE - 48 &&
561 	    !current->bio_list)
562 		bch_btree_node_write(b, NULL);
563 }
564 
565 /*
566  * Btree in memory cache - allocation/freeing
567  * mca -> memory cache
568  */
569 
570 #define mca_reserve(c)	(((c->root && c->root->level)		\
571 			  ? c->root->level : 1) * 8 + 16)
572 #define mca_can_free(c)						\
573 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
574 
mca_data_free(struct btree * b)575 static void mca_data_free(struct btree *b)
576 {
577 	BUG_ON(b->io_mutex.count != 1);
578 
579 	bch_btree_keys_free(&b->keys);
580 
581 	b->c->btree_cache_used--;
582 	list_move(&b->list, &b->c->btree_cache_freed);
583 }
584 
mca_bucket_free(struct btree * b)585 static void mca_bucket_free(struct btree *b)
586 {
587 	BUG_ON(btree_node_dirty(b));
588 
589 	b->key.ptr[0] = 0;
590 	hlist_del_init_rcu(&b->hash);
591 	list_move(&b->list, &b->c->btree_cache_freeable);
592 }
593 
btree_order(struct bkey * k)594 static unsigned int btree_order(struct bkey *k)
595 {
596 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
597 }
598 
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)599 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
600 {
601 	if (!bch_btree_keys_alloc(&b->keys,
602 				  max_t(unsigned int,
603 					ilog2(b->c->btree_pages),
604 					btree_order(k)),
605 				  gfp)) {
606 		b->c->btree_cache_used++;
607 		list_move(&b->list, &b->c->btree_cache);
608 	} else {
609 		list_move(&b->list, &b->c->btree_cache_freed);
610 	}
611 }
612 
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)613 static struct btree *mca_bucket_alloc(struct cache_set *c,
614 				      struct bkey *k, gfp_t gfp)
615 {
616 	/*
617 	 * kzalloc() is necessary here for initialization,
618 	 * see code comments in bch_btree_keys_init().
619 	 */
620 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
621 
622 	if (!b)
623 		return NULL;
624 
625 	init_rwsem(&b->lock);
626 	lockdep_set_novalidate_class(&b->lock);
627 	mutex_init(&b->write_lock);
628 	lockdep_set_novalidate_class(&b->write_lock);
629 	INIT_LIST_HEAD(&b->list);
630 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
631 	b->c = c;
632 	sema_init(&b->io_mutex, 1);
633 
634 	mca_data_alloc(b, k, gfp);
635 	return b;
636 }
637 
mca_reap(struct btree * b,unsigned int min_order,bool flush)638 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
639 {
640 	struct closure cl;
641 
642 	closure_init_stack(&cl);
643 	lockdep_assert_held(&b->c->bucket_lock);
644 
645 	if (!down_write_trylock(&b->lock))
646 		return -ENOMEM;
647 
648 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
649 
650 	if (b->keys.page_order < min_order)
651 		goto out_unlock;
652 
653 	if (!flush) {
654 		if (btree_node_dirty(b))
655 			goto out_unlock;
656 
657 		if (down_trylock(&b->io_mutex))
658 			goto out_unlock;
659 		up(&b->io_mutex);
660 	}
661 
662 retry:
663 	/*
664 	 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
665 	 * __bch_btree_node_write(). To avoid an extra flush, acquire
666 	 * b->write_lock before checking BTREE_NODE_dirty bit.
667 	 */
668 	mutex_lock(&b->write_lock);
669 	/*
670 	 * If this btree node is selected in btree_flush_write() by journal
671 	 * code, delay and retry until the node is flushed by journal code
672 	 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
673 	 */
674 	if (btree_node_journal_flush(b)) {
675 		pr_debug("bnode %p is flushing by journal, retry", b);
676 		mutex_unlock(&b->write_lock);
677 		udelay(1);
678 		goto retry;
679 	}
680 
681 	if (btree_node_dirty(b))
682 		__bch_btree_node_write(b, &cl);
683 	mutex_unlock(&b->write_lock);
684 
685 	closure_sync(&cl);
686 
687 	/* wait for any in flight btree write */
688 	down(&b->io_mutex);
689 	up(&b->io_mutex);
690 
691 	return 0;
692 out_unlock:
693 	rw_unlock(true, b);
694 	return -ENOMEM;
695 }
696 
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)697 static unsigned long bch_mca_scan(struct shrinker *shrink,
698 				  struct shrink_control *sc)
699 {
700 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
701 	struct btree *b, *t;
702 	unsigned long i, nr = sc->nr_to_scan;
703 	unsigned long freed = 0;
704 	unsigned int btree_cache_used;
705 
706 	if (c->shrinker_disabled)
707 		return SHRINK_STOP;
708 
709 	if (c->btree_cache_alloc_lock)
710 		return SHRINK_STOP;
711 
712 	/* Return -1 if we can't do anything right now */
713 	if (sc->gfp_mask & __GFP_IO)
714 		mutex_lock(&c->bucket_lock);
715 	else if (!mutex_trylock(&c->bucket_lock))
716 		return -1;
717 
718 	/*
719 	 * It's _really_ critical that we don't free too many btree nodes - we
720 	 * have to always leave ourselves a reserve. The reserve is how we
721 	 * guarantee that allocating memory for a new btree node can always
722 	 * succeed, so that inserting keys into the btree can always succeed and
723 	 * IO can always make forward progress:
724 	 */
725 	nr /= c->btree_pages;
726 	if (nr == 0)
727 		nr = 1;
728 	nr = min_t(unsigned long, nr, mca_can_free(c));
729 
730 	i = 0;
731 	btree_cache_used = c->btree_cache_used;
732 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
733 		if (nr <= 0)
734 			goto out;
735 
736 		if (++i > 3 &&
737 		    !mca_reap(b, 0, false)) {
738 			mca_data_free(b);
739 			rw_unlock(true, b);
740 			freed++;
741 		}
742 		nr--;
743 	}
744 
745 	for (;  (nr--) && i < btree_cache_used; i++) {
746 		if (list_empty(&c->btree_cache))
747 			goto out;
748 
749 		b = list_first_entry(&c->btree_cache, struct btree, list);
750 		list_rotate_left(&c->btree_cache);
751 
752 		if (!b->accessed &&
753 		    !mca_reap(b, 0, false)) {
754 			mca_bucket_free(b);
755 			mca_data_free(b);
756 			rw_unlock(true, b);
757 			freed++;
758 		} else
759 			b->accessed = 0;
760 	}
761 out:
762 	mutex_unlock(&c->bucket_lock);
763 	return freed * c->btree_pages;
764 }
765 
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)766 static unsigned long bch_mca_count(struct shrinker *shrink,
767 				   struct shrink_control *sc)
768 {
769 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
770 
771 	if (c->shrinker_disabled)
772 		return 0;
773 
774 	if (c->btree_cache_alloc_lock)
775 		return 0;
776 
777 	return mca_can_free(c) * c->btree_pages;
778 }
779 
bch_btree_cache_free(struct cache_set * c)780 void bch_btree_cache_free(struct cache_set *c)
781 {
782 	struct btree *b;
783 	struct closure cl;
784 
785 	closure_init_stack(&cl);
786 
787 	if (c->shrink.list.next)
788 		unregister_shrinker(&c->shrink);
789 
790 	mutex_lock(&c->bucket_lock);
791 
792 #ifdef CONFIG_BCACHE_DEBUG
793 	if (c->verify_data)
794 		list_move(&c->verify_data->list, &c->btree_cache);
795 
796 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
797 #endif
798 
799 	list_splice(&c->btree_cache_freeable,
800 		    &c->btree_cache);
801 
802 	while (!list_empty(&c->btree_cache)) {
803 		b = list_first_entry(&c->btree_cache, struct btree, list);
804 
805 		/*
806 		 * This function is called by cache_set_free(), no I/O
807 		 * request on cache now, it is unnecessary to acquire
808 		 * b->write_lock before clearing BTREE_NODE_dirty anymore.
809 		 */
810 		if (btree_node_dirty(b)) {
811 			btree_complete_write(b, btree_current_write(b));
812 			clear_bit(BTREE_NODE_dirty, &b->flags);
813 		}
814 		mca_data_free(b);
815 	}
816 
817 	while (!list_empty(&c->btree_cache_freed)) {
818 		b = list_first_entry(&c->btree_cache_freed,
819 				     struct btree, list);
820 		list_del(&b->list);
821 		cancel_delayed_work_sync(&b->work);
822 		kfree(b);
823 	}
824 
825 	mutex_unlock(&c->bucket_lock);
826 }
827 
bch_btree_cache_alloc(struct cache_set * c)828 int bch_btree_cache_alloc(struct cache_set *c)
829 {
830 	unsigned int i;
831 
832 	for (i = 0; i < mca_reserve(c); i++)
833 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
834 			return -ENOMEM;
835 
836 	list_splice_init(&c->btree_cache,
837 			 &c->btree_cache_freeable);
838 
839 #ifdef CONFIG_BCACHE_DEBUG
840 	mutex_init(&c->verify_lock);
841 
842 	c->verify_ondisk = (void *)
843 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
844 
845 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
846 
847 	if (c->verify_data &&
848 	    c->verify_data->keys.set->data)
849 		list_del_init(&c->verify_data->list);
850 	else
851 		c->verify_data = NULL;
852 #endif
853 
854 	c->shrink.count_objects = bch_mca_count;
855 	c->shrink.scan_objects = bch_mca_scan;
856 	c->shrink.seeks = 4;
857 	c->shrink.batch = c->btree_pages * 2;
858 
859 	if (register_shrinker(&c->shrink))
860 		pr_warn("bcache: %s: could not register shrinker",
861 				__func__);
862 
863 	return 0;
864 }
865 
866 /* Btree in memory cache - hash table */
867 
mca_hash(struct cache_set * c,struct bkey * k)868 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
869 {
870 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
871 }
872 
mca_find(struct cache_set * c,struct bkey * k)873 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
874 {
875 	struct btree *b;
876 
877 	rcu_read_lock();
878 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
879 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
880 			goto out;
881 	b = NULL;
882 out:
883 	rcu_read_unlock();
884 	return b;
885 }
886 
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)887 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
888 {
889 	struct task_struct *old;
890 
891 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
892 	if (old && old != current) {
893 		if (op)
894 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
895 					TASK_UNINTERRUPTIBLE);
896 		return -EINTR;
897 	}
898 
899 	return 0;
900 }
901 
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)902 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
903 				     struct bkey *k)
904 {
905 	struct btree *b;
906 
907 	trace_bcache_btree_cache_cannibalize(c);
908 
909 	if (mca_cannibalize_lock(c, op))
910 		return ERR_PTR(-EINTR);
911 
912 	list_for_each_entry_reverse(b, &c->btree_cache, list)
913 		if (!mca_reap(b, btree_order(k), false))
914 			return b;
915 
916 	list_for_each_entry_reverse(b, &c->btree_cache, list)
917 		if (!mca_reap(b, btree_order(k), true))
918 			return b;
919 
920 	WARN(1, "btree cache cannibalize failed\n");
921 	return ERR_PTR(-ENOMEM);
922 }
923 
924 /*
925  * We can only have one thread cannibalizing other cached btree nodes at a time,
926  * or we'll deadlock. We use an open coded mutex to ensure that, which a
927  * cannibalize_bucket() will take. This means every time we unlock the root of
928  * the btree, we need to release this lock if we have it held.
929  */
bch_cannibalize_unlock(struct cache_set * c)930 static void bch_cannibalize_unlock(struct cache_set *c)
931 {
932 	if (c->btree_cache_alloc_lock == current) {
933 		c->btree_cache_alloc_lock = NULL;
934 		wake_up(&c->btree_cache_wait);
935 	}
936 }
937 
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)938 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
939 			       struct bkey *k, int level)
940 {
941 	struct btree *b;
942 
943 	BUG_ON(current->bio_list);
944 
945 	lockdep_assert_held(&c->bucket_lock);
946 
947 	if (mca_find(c, k))
948 		return NULL;
949 
950 	/* btree_free() doesn't free memory; it sticks the node on the end of
951 	 * the list. Check if there's any freed nodes there:
952 	 */
953 	list_for_each_entry(b, &c->btree_cache_freeable, list)
954 		if (!mca_reap(b, btree_order(k), false))
955 			goto out;
956 
957 	/* We never free struct btree itself, just the memory that holds the on
958 	 * disk node. Check the freed list before allocating a new one:
959 	 */
960 	list_for_each_entry(b, &c->btree_cache_freed, list)
961 		if (!mca_reap(b, 0, false)) {
962 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
963 			if (!b->keys.set[0].data)
964 				goto err;
965 			else
966 				goto out;
967 		}
968 
969 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
970 	if (!b)
971 		goto err;
972 
973 	BUG_ON(!down_write_trylock(&b->lock));
974 	if (!b->keys.set->data)
975 		goto err;
976 out:
977 	BUG_ON(b->io_mutex.count != 1);
978 
979 	bkey_copy(&b->key, k);
980 	list_move(&b->list, &c->btree_cache);
981 	hlist_del_init_rcu(&b->hash);
982 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
983 
984 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
985 	b->parent	= (void *) ~0UL;
986 	b->flags	= 0;
987 	b->written	= 0;
988 	b->level	= level;
989 
990 	if (!b->level)
991 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
992 				    &b->c->expensive_debug_checks);
993 	else
994 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
995 				    &b->c->expensive_debug_checks);
996 
997 	return b;
998 err:
999 	if (b)
1000 		rw_unlock(true, b);
1001 
1002 	b = mca_cannibalize(c, op, k);
1003 	if (!IS_ERR(b))
1004 		goto out;
1005 
1006 	return b;
1007 }
1008 
1009 /*
1010  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
1011  * in from disk if necessary.
1012  *
1013  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
1014  *
1015  * The btree node will have either a read or a write lock held, depending on
1016  * level and op->lock.
1017  */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)1018 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1019 				 struct bkey *k, int level, bool write,
1020 				 struct btree *parent)
1021 {
1022 	int i = 0;
1023 	struct btree *b;
1024 
1025 	BUG_ON(level < 0);
1026 retry:
1027 	b = mca_find(c, k);
1028 
1029 	if (!b) {
1030 		if (current->bio_list)
1031 			return ERR_PTR(-EAGAIN);
1032 
1033 		mutex_lock(&c->bucket_lock);
1034 		b = mca_alloc(c, op, k, level);
1035 		mutex_unlock(&c->bucket_lock);
1036 
1037 		if (!b)
1038 			goto retry;
1039 		if (IS_ERR(b))
1040 			return b;
1041 
1042 		bch_btree_node_read(b);
1043 
1044 		if (!write)
1045 			downgrade_write(&b->lock);
1046 	} else {
1047 		rw_lock(write, b, level);
1048 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1049 			rw_unlock(write, b);
1050 			goto retry;
1051 		}
1052 		BUG_ON(b->level != level);
1053 	}
1054 
1055 	if (btree_node_io_error(b)) {
1056 		rw_unlock(write, b);
1057 		return ERR_PTR(-EIO);
1058 	}
1059 
1060 	BUG_ON(!b->written);
1061 
1062 	b->parent = parent;
1063 	b->accessed = 1;
1064 
1065 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1066 		prefetch(b->keys.set[i].tree);
1067 		prefetch(b->keys.set[i].data);
1068 	}
1069 
1070 	for (; i <= b->keys.nsets; i++)
1071 		prefetch(b->keys.set[i].data);
1072 
1073 	return b;
1074 }
1075 
btree_node_prefetch(struct btree * parent,struct bkey * k)1076 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1077 {
1078 	struct btree *b;
1079 
1080 	mutex_lock(&parent->c->bucket_lock);
1081 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1082 	mutex_unlock(&parent->c->bucket_lock);
1083 
1084 	if (!IS_ERR_OR_NULL(b)) {
1085 		b->parent = parent;
1086 		bch_btree_node_read(b);
1087 		rw_unlock(true, b);
1088 	}
1089 }
1090 
1091 /* Btree alloc */
1092 
btree_node_free(struct btree * b)1093 static void btree_node_free(struct btree *b)
1094 {
1095 	trace_bcache_btree_node_free(b);
1096 
1097 	BUG_ON(b == b->c->root);
1098 
1099 retry:
1100 	mutex_lock(&b->write_lock);
1101 	/*
1102 	 * If the btree node is selected and flushing in btree_flush_write(),
1103 	 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1104 	 * then it is safe to free the btree node here. Otherwise this btree
1105 	 * node will be in race condition.
1106 	 */
1107 	if (btree_node_journal_flush(b)) {
1108 		mutex_unlock(&b->write_lock);
1109 		pr_debug("bnode %p journal_flush set, retry", b);
1110 		udelay(1);
1111 		goto retry;
1112 	}
1113 
1114 	if (btree_node_dirty(b)) {
1115 		btree_complete_write(b, btree_current_write(b));
1116 		clear_bit(BTREE_NODE_dirty, &b->flags);
1117 	}
1118 
1119 	mutex_unlock(&b->write_lock);
1120 
1121 	cancel_delayed_work(&b->work);
1122 
1123 	mutex_lock(&b->c->bucket_lock);
1124 	bch_bucket_free(b->c, &b->key);
1125 	mca_bucket_free(b);
1126 	mutex_unlock(&b->c->bucket_lock);
1127 }
1128 
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1129 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1130 				     int level, bool wait,
1131 				     struct btree *parent)
1132 {
1133 	BKEY_PADDED(key) k;
1134 	struct btree *b = ERR_PTR(-EAGAIN);
1135 
1136 	mutex_lock(&c->bucket_lock);
1137 retry:
1138 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1139 		goto err;
1140 
1141 	bkey_put(c, &k.key);
1142 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1143 
1144 	b = mca_alloc(c, op, &k.key, level);
1145 	if (IS_ERR(b))
1146 		goto err_free;
1147 
1148 	if (!b) {
1149 		cache_bug(c,
1150 			"Tried to allocate bucket that was in btree cache");
1151 		goto retry;
1152 	}
1153 
1154 	b->accessed = 1;
1155 	b->parent = parent;
1156 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1157 
1158 	mutex_unlock(&c->bucket_lock);
1159 
1160 	trace_bcache_btree_node_alloc(b);
1161 	return b;
1162 err_free:
1163 	bch_bucket_free(c, &k.key);
1164 err:
1165 	mutex_unlock(&c->bucket_lock);
1166 
1167 	trace_bcache_btree_node_alloc_fail(c);
1168 	return b;
1169 }
1170 
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1171 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1172 					  struct btree_op *op, int level,
1173 					  struct btree *parent)
1174 {
1175 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1176 }
1177 
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1178 static struct btree *btree_node_alloc_replacement(struct btree *b,
1179 						  struct btree_op *op)
1180 {
1181 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1182 
1183 	if (!IS_ERR_OR_NULL(n)) {
1184 		mutex_lock(&n->write_lock);
1185 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1186 		bkey_copy_key(&n->key, &b->key);
1187 		mutex_unlock(&n->write_lock);
1188 	}
1189 
1190 	return n;
1191 }
1192 
make_btree_freeing_key(struct btree * b,struct bkey * k)1193 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1194 {
1195 	unsigned int i;
1196 
1197 	mutex_lock(&b->c->bucket_lock);
1198 
1199 	atomic_inc(&b->c->prio_blocked);
1200 
1201 	bkey_copy(k, &b->key);
1202 	bkey_copy_key(k, &ZERO_KEY);
1203 
1204 	for (i = 0; i < KEY_PTRS(k); i++)
1205 		SET_PTR_GEN(k, i,
1206 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1207 					PTR_BUCKET(b->c, &b->key, i)));
1208 
1209 	mutex_unlock(&b->c->bucket_lock);
1210 }
1211 
btree_check_reserve(struct btree * b,struct btree_op * op)1212 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1213 {
1214 	struct cache_set *c = b->c;
1215 	struct cache *ca;
1216 	unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1217 
1218 	mutex_lock(&c->bucket_lock);
1219 
1220 	for_each_cache(ca, c, i)
1221 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1222 			if (op)
1223 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
1224 						TASK_UNINTERRUPTIBLE);
1225 			mutex_unlock(&c->bucket_lock);
1226 			return -EINTR;
1227 		}
1228 
1229 	mutex_unlock(&c->bucket_lock);
1230 
1231 	return mca_cannibalize_lock(b->c, op);
1232 }
1233 
1234 /* Garbage collection */
1235 
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1236 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1237 				    struct bkey *k)
1238 {
1239 	uint8_t stale = 0;
1240 	unsigned int i;
1241 	struct bucket *g;
1242 
1243 	/*
1244 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1245 	 * freed, but since ptr_bad() returns true we'll never actually use them
1246 	 * for anything and thus we don't want mark their pointers here
1247 	 */
1248 	if (!bkey_cmp(k, &ZERO_KEY))
1249 		return stale;
1250 
1251 	for (i = 0; i < KEY_PTRS(k); i++) {
1252 		if (!ptr_available(c, k, i))
1253 			continue;
1254 
1255 		g = PTR_BUCKET(c, k, i);
1256 
1257 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1258 			g->last_gc = PTR_GEN(k, i);
1259 
1260 		if (ptr_stale(c, k, i)) {
1261 			stale = max(stale, ptr_stale(c, k, i));
1262 			continue;
1263 		}
1264 
1265 		cache_bug_on(GC_MARK(g) &&
1266 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1267 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1268 			     GC_MARK(g), level);
1269 
1270 		if (level)
1271 			SET_GC_MARK(g, GC_MARK_METADATA);
1272 		else if (KEY_DIRTY(k))
1273 			SET_GC_MARK(g, GC_MARK_DIRTY);
1274 		else if (!GC_MARK(g))
1275 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1276 
1277 		/* guard against overflow */
1278 		SET_GC_SECTORS_USED(g, min_t(unsigned int,
1279 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1280 					     MAX_GC_SECTORS_USED));
1281 
1282 		BUG_ON(!GC_SECTORS_USED(g));
1283 	}
1284 
1285 	return stale;
1286 }
1287 
1288 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1289 
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1290 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1291 {
1292 	unsigned int i;
1293 
1294 	for (i = 0; i < KEY_PTRS(k); i++)
1295 		if (ptr_available(c, k, i) &&
1296 		    !ptr_stale(c, k, i)) {
1297 			struct bucket *b = PTR_BUCKET(c, k, i);
1298 
1299 			b->gen = PTR_GEN(k, i);
1300 
1301 			if (level && bkey_cmp(k, &ZERO_KEY))
1302 				b->prio = BTREE_PRIO;
1303 			else if (!level && b->prio == BTREE_PRIO)
1304 				b->prio = INITIAL_PRIO;
1305 		}
1306 
1307 	__bch_btree_mark_key(c, level, k);
1308 }
1309 
bch_update_bucket_in_use(struct cache_set * c,struct gc_stat * stats)1310 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1311 {
1312 	stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1313 }
1314 
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1315 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1316 {
1317 	uint8_t stale = 0;
1318 	unsigned int keys = 0, good_keys = 0;
1319 	struct bkey *k;
1320 	struct btree_iter iter;
1321 	struct bset_tree *t;
1322 
1323 	gc->nodes++;
1324 
1325 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1326 		stale = max(stale, btree_mark_key(b, k));
1327 		keys++;
1328 
1329 		if (bch_ptr_bad(&b->keys, k))
1330 			continue;
1331 
1332 		gc->key_bytes += bkey_u64s(k);
1333 		gc->nkeys++;
1334 		good_keys++;
1335 
1336 		gc->data += KEY_SIZE(k);
1337 	}
1338 
1339 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1340 		btree_bug_on(t->size &&
1341 			     bset_written(&b->keys, t) &&
1342 			     bkey_cmp(&b->key, &t->end) < 0,
1343 			     b, "found short btree key in gc");
1344 
1345 	if (b->c->gc_always_rewrite)
1346 		return true;
1347 
1348 	if (stale > 10)
1349 		return true;
1350 
1351 	if ((keys - good_keys) * 2 > keys)
1352 		return true;
1353 
1354 	return false;
1355 }
1356 
1357 #define GC_MERGE_NODES	4U
1358 
1359 struct gc_merge_info {
1360 	struct btree	*b;
1361 	unsigned int	keys;
1362 };
1363 
1364 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1365 				 struct keylist *insert_keys,
1366 				 atomic_t *journal_ref,
1367 				 struct bkey *replace_key);
1368 
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1369 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1370 			     struct gc_stat *gc, struct gc_merge_info *r)
1371 {
1372 	unsigned int i, nodes = 0, keys = 0, blocks;
1373 	struct btree *new_nodes[GC_MERGE_NODES];
1374 	struct keylist keylist;
1375 	struct closure cl;
1376 	struct bkey *k;
1377 
1378 	bch_keylist_init(&keylist);
1379 
1380 	if (btree_check_reserve(b, NULL))
1381 		return 0;
1382 
1383 	memset(new_nodes, 0, sizeof(new_nodes));
1384 	closure_init_stack(&cl);
1385 
1386 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1387 		keys += r[nodes++].keys;
1388 
1389 	blocks = btree_default_blocks(b->c) * 2 / 3;
1390 
1391 	if (nodes < 2 ||
1392 	    __set_blocks(b->keys.set[0].data, keys,
1393 			 block_bytes(b->c)) > blocks * (nodes - 1))
1394 		return 0;
1395 
1396 	for (i = 0; i < nodes; i++) {
1397 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1398 		if (IS_ERR_OR_NULL(new_nodes[i]))
1399 			goto out_nocoalesce;
1400 	}
1401 
1402 	/*
1403 	 * We have to check the reserve here, after we've allocated our new
1404 	 * nodes, to make sure the insert below will succeed - we also check
1405 	 * before as an optimization to potentially avoid a bunch of expensive
1406 	 * allocs/sorts
1407 	 */
1408 	if (btree_check_reserve(b, NULL))
1409 		goto out_nocoalesce;
1410 
1411 	for (i = 0; i < nodes; i++)
1412 		mutex_lock(&new_nodes[i]->write_lock);
1413 
1414 	for (i = nodes - 1; i > 0; --i) {
1415 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1416 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1417 		struct bkey *k, *last = NULL;
1418 
1419 		keys = 0;
1420 
1421 		if (i > 1) {
1422 			for (k = n2->start;
1423 			     k < bset_bkey_last(n2);
1424 			     k = bkey_next(k)) {
1425 				if (__set_blocks(n1, n1->keys + keys +
1426 						 bkey_u64s(k),
1427 						 block_bytes(b->c)) > blocks)
1428 					break;
1429 
1430 				last = k;
1431 				keys += bkey_u64s(k);
1432 			}
1433 		} else {
1434 			/*
1435 			 * Last node we're not getting rid of - we're getting
1436 			 * rid of the node at r[0]. Have to try and fit all of
1437 			 * the remaining keys into this node; we can't ensure
1438 			 * they will always fit due to rounding and variable
1439 			 * length keys (shouldn't be possible in practice,
1440 			 * though)
1441 			 */
1442 			if (__set_blocks(n1, n1->keys + n2->keys,
1443 					 block_bytes(b->c)) >
1444 			    btree_blocks(new_nodes[i]))
1445 				goto out_nocoalesce;
1446 
1447 			keys = n2->keys;
1448 			/* Take the key of the node we're getting rid of */
1449 			last = &r->b->key;
1450 		}
1451 
1452 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1453 		       btree_blocks(new_nodes[i]));
1454 
1455 		if (last)
1456 			bkey_copy_key(&new_nodes[i]->key, last);
1457 
1458 		memcpy(bset_bkey_last(n1),
1459 		       n2->start,
1460 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1461 
1462 		n1->keys += keys;
1463 		r[i].keys = n1->keys;
1464 
1465 		memmove(n2->start,
1466 			bset_bkey_idx(n2, keys),
1467 			(void *) bset_bkey_last(n2) -
1468 			(void *) bset_bkey_idx(n2, keys));
1469 
1470 		n2->keys -= keys;
1471 
1472 		if (__bch_keylist_realloc(&keylist,
1473 					  bkey_u64s(&new_nodes[i]->key)))
1474 			goto out_nocoalesce;
1475 
1476 		bch_btree_node_write(new_nodes[i], &cl);
1477 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1478 	}
1479 
1480 	for (i = 0; i < nodes; i++)
1481 		mutex_unlock(&new_nodes[i]->write_lock);
1482 
1483 	closure_sync(&cl);
1484 
1485 	/* We emptied out this node */
1486 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1487 	btree_node_free(new_nodes[0]);
1488 	rw_unlock(true, new_nodes[0]);
1489 	new_nodes[0] = NULL;
1490 
1491 	for (i = 0; i < nodes; i++) {
1492 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1493 			goto out_nocoalesce;
1494 
1495 		make_btree_freeing_key(r[i].b, keylist.top);
1496 		bch_keylist_push(&keylist);
1497 	}
1498 
1499 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1500 	BUG_ON(!bch_keylist_empty(&keylist));
1501 
1502 	for (i = 0; i < nodes; i++) {
1503 		btree_node_free(r[i].b);
1504 		rw_unlock(true, r[i].b);
1505 
1506 		r[i].b = new_nodes[i];
1507 	}
1508 
1509 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1510 	r[nodes - 1].b = ERR_PTR(-EINTR);
1511 
1512 	trace_bcache_btree_gc_coalesce(nodes);
1513 	gc->nodes--;
1514 
1515 	bch_keylist_free(&keylist);
1516 
1517 	/* Invalidated our iterator */
1518 	return -EINTR;
1519 
1520 out_nocoalesce:
1521 	closure_sync(&cl);
1522 
1523 	while ((k = bch_keylist_pop(&keylist)))
1524 		if (!bkey_cmp(k, &ZERO_KEY))
1525 			atomic_dec(&b->c->prio_blocked);
1526 	bch_keylist_free(&keylist);
1527 
1528 	for (i = 0; i < nodes; i++)
1529 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1530 			btree_node_free(new_nodes[i]);
1531 			rw_unlock(true, new_nodes[i]);
1532 		}
1533 	return 0;
1534 }
1535 
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1536 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1537 				 struct btree *replace)
1538 {
1539 	struct keylist keys;
1540 	struct btree *n;
1541 
1542 	if (btree_check_reserve(b, NULL))
1543 		return 0;
1544 
1545 	n = btree_node_alloc_replacement(replace, NULL);
1546 
1547 	/* recheck reserve after allocating replacement node */
1548 	if (btree_check_reserve(b, NULL)) {
1549 		btree_node_free(n);
1550 		rw_unlock(true, n);
1551 		return 0;
1552 	}
1553 
1554 	bch_btree_node_write_sync(n);
1555 
1556 	bch_keylist_init(&keys);
1557 	bch_keylist_add(&keys, &n->key);
1558 
1559 	make_btree_freeing_key(replace, keys.top);
1560 	bch_keylist_push(&keys);
1561 
1562 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1563 	BUG_ON(!bch_keylist_empty(&keys));
1564 
1565 	btree_node_free(replace);
1566 	rw_unlock(true, n);
1567 
1568 	/* Invalidated our iterator */
1569 	return -EINTR;
1570 }
1571 
btree_gc_count_keys(struct btree * b)1572 static unsigned int btree_gc_count_keys(struct btree *b)
1573 {
1574 	struct bkey *k;
1575 	struct btree_iter iter;
1576 	unsigned int ret = 0;
1577 
1578 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1579 		ret += bkey_u64s(k);
1580 
1581 	return ret;
1582 }
1583 
btree_gc_min_nodes(struct cache_set * c)1584 static size_t btree_gc_min_nodes(struct cache_set *c)
1585 {
1586 	size_t min_nodes;
1587 
1588 	/*
1589 	 * Since incremental GC would stop 100ms when front
1590 	 * side I/O comes, so when there are many btree nodes,
1591 	 * if GC only processes constant (100) nodes each time,
1592 	 * GC would last a long time, and the front side I/Os
1593 	 * would run out of the buckets (since no new bucket
1594 	 * can be allocated during GC), and be blocked again.
1595 	 * So GC should not process constant nodes, but varied
1596 	 * nodes according to the number of btree nodes, which
1597 	 * realized by dividing GC into constant(100) times,
1598 	 * so when there are many btree nodes, GC can process
1599 	 * more nodes each time, otherwise, GC will process less
1600 	 * nodes each time (but no less than MIN_GC_NODES)
1601 	 */
1602 	min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1603 	if (min_nodes < MIN_GC_NODES)
1604 		min_nodes = MIN_GC_NODES;
1605 
1606 	return min_nodes;
1607 }
1608 
1609 
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1610 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1611 			    struct closure *writes, struct gc_stat *gc)
1612 {
1613 	int ret = 0;
1614 	bool should_rewrite;
1615 	struct bkey *k;
1616 	struct btree_iter iter;
1617 	struct gc_merge_info r[GC_MERGE_NODES];
1618 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1619 
1620 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1621 
1622 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1623 		i->b = ERR_PTR(-EINTR);
1624 
1625 	while (1) {
1626 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1627 		if (k) {
1628 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1629 						  true, b);
1630 			if (IS_ERR(r->b)) {
1631 				ret = PTR_ERR(r->b);
1632 				break;
1633 			}
1634 
1635 			r->keys = btree_gc_count_keys(r->b);
1636 
1637 			ret = btree_gc_coalesce(b, op, gc, r);
1638 			if (ret)
1639 				break;
1640 		}
1641 
1642 		if (!last->b)
1643 			break;
1644 
1645 		if (!IS_ERR(last->b)) {
1646 			should_rewrite = btree_gc_mark_node(last->b, gc);
1647 			if (should_rewrite) {
1648 				ret = btree_gc_rewrite_node(b, op, last->b);
1649 				if (ret)
1650 					break;
1651 			}
1652 
1653 			if (last->b->level) {
1654 				ret = btree_gc_recurse(last->b, op, writes, gc);
1655 				if (ret)
1656 					break;
1657 			}
1658 
1659 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1660 
1661 			/*
1662 			 * Must flush leaf nodes before gc ends, since replace
1663 			 * operations aren't journalled
1664 			 */
1665 			mutex_lock(&last->b->write_lock);
1666 			if (btree_node_dirty(last->b))
1667 				bch_btree_node_write(last->b, writes);
1668 			mutex_unlock(&last->b->write_lock);
1669 			rw_unlock(true, last->b);
1670 		}
1671 
1672 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1673 		r->b = NULL;
1674 
1675 		if (atomic_read(&b->c->search_inflight) &&
1676 		    gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1677 			gc->nodes_pre =  gc->nodes;
1678 			ret = -EAGAIN;
1679 			break;
1680 		}
1681 
1682 		if (need_resched()) {
1683 			ret = -EAGAIN;
1684 			break;
1685 		}
1686 	}
1687 
1688 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1689 		if (!IS_ERR_OR_NULL(i->b)) {
1690 			mutex_lock(&i->b->write_lock);
1691 			if (btree_node_dirty(i->b))
1692 				bch_btree_node_write(i->b, writes);
1693 			mutex_unlock(&i->b->write_lock);
1694 			rw_unlock(true, i->b);
1695 		}
1696 
1697 	return ret;
1698 }
1699 
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1700 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1701 			     struct closure *writes, struct gc_stat *gc)
1702 {
1703 	struct btree *n = NULL;
1704 	int ret = 0;
1705 	bool should_rewrite;
1706 
1707 	should_rewrite = btree_gc_mark_node(b, gc);
1708 	if (should_rewrite) {
1709 		n = btree_node_alloc_replacement(b, NULL);
1710 
1711 		if (!IS_ERR_OR_NULL(n)) {
1712 			bch_btree_node_write_sync(n);
1713 
1714 			bch_btree_set_root(n);
1715 			btree_node_free(b);
1716 			rw_unlock(true, n);
1717 
1718 			return -EINTR;
1719 		}
1720 	}
1721 
1722 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1723 
1724 	if (b->level) {
1725 		ret = btree_gc_recurse(b, op, writes, gc);
1726 		if (ret)
1727 			return ret;
1728 	}
1729 
1730 	bkey_copy_key(&b->c->gc_done, &b->key);
1731 
1732 	return ret;
1733 }
1734 
btree_gc_start(struct cache_set * c)1735 static void btree_gc_start(struct cache_set *c)
1736 {
1737 	struct cache *ca;
1738 	struct bucket *b;
1739 	unsigned int i;
1740 
1741 	if (!c->gc_mark_valid)
1742 		return;
1743 
1744 	mutex_lock(&c->bucket_lock);
1745 
1746 	c->gc_mark_valid = 0;
1747 	c->gc_done = ZERO_KEY;
1748 
1749 	for_each_cache(ca, c, i)
1750 		for_each_bucket(b, ca) {
1751 			b->last_gc = b->gen;
1752 			if (!atomic_read(&b->pin)) {
1753 				SET_GC_MARK(b, 0);
1754 				SET_GC_SECTORS_USED(b, 0);
1755 			}
1756 		}
1757 
1758 	mutex_unlock(&c->bucket_lock);
1759 }
1760 
bch_btree_gc_finish(struct cache_set * c)1761 static void bch_btree_gc_finish(struct cache_set *c)
1762 {
1763 	struct bucket *b;
1764 	struct cache *ca;
1765 	unsigned int i;
1766 
1767 	mutex_lock(&c->bucket_lock);
1768 
1769 	set_gc_sectors(c);
1770 	c->gc_mark_valid = 1;
1771 	c->need_gc	= 0;
1772 
1773 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1774 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1775 			    GC_MARK_METADATA);
1776 
1777 	/* don't reclaim buckets to which writeback keys point */
1778 	rcu_read_lock();
1779 	for (i = 0; i < c->devices_max_used; i++) {
1780 		struct bcache_device *d = c->devices[i];
1781 		struct cached_dev *dc;
1782 		struct keybuf_key *w, *n;
1783 		unsigned int j;
1784 
1785 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1786 			continue;
1787 		dc = container_of(d, struct cached_dev, disk);
1788 
1789 		spin_lock(&dc->writeback_keys.lock);
1790 		rbtree_postorder_for_each_entry_safe(w, n,
1791 					&dc->writeback_keys.keys, node)
1792 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1793 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1794 					    GC_MARK_DIRTY);
1795 		spin_unlock(&dc->writeback_keys.lock);
1796 	}
1797 	rcu_read_unlock();
1798 
1799 	c->avail_nbuckets = 0;
1800 	for_each_cache(ca, c, i) {
1801 		uint64_t *i;
1802 
1803 		ca->invalidate_needs_gc = 0;
1804 
1805 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1806 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1807 
1808 		for (i = ca->prio_buckets;
1809 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1810 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1811 
1812 		for_each_bucket(b, ca) {
1813 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1814 
1815 			if (atomic_read(&b->pin))
1816 				continue;
1817 
1818 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1819 
1820 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1821 				c->avail_nbuckets++;
1822 		}
1823 	}
1824 
1825 	mutex_unlock(&c->bucket_lock);
1826 }
1827 
bch_btree_gc(struct cache_set * c)1828 static void bch_btree_gc(struct cache_set *c)
1829 {
1830 	int ret;
1831 	struct gc_stat stats;
1832 	struct closure writes;
1833 	struct btree_op op;
1834 	uint64_t start_time = local_clock();
1835 
1836 	trace_bcache_gc_start(c);
1837 
1838 	memset(&stats, 0, sizeof(struct gc_stat));
1839 	closure_init_stack(&writes);
1840 	bch_btree_op_init(&op, SHRT_MAX);
1841 
1842 	btree_gc_start(c);
1843 
1844 	/* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1845 	do {
1846 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1847 		closure_sync(&writes);
1848 		cond_resched();
1849 
1850 		if (ret == -EAGAIN)
1851 			schedule_timeout_interruptible(msecs_to_jiffies
1852 						       (GC_SLEEP_MS));
1853 		else if (ret)
1854 			pr_warn("gc failed!");
1855 	} while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1856 
1857 	bch_btree_gc_finish(c);
1858 	wake_up_allocators(c);
1859 
1860 	bch_time_stats_update(&c->btree_gc_time, start_time);
1861 
1862 	stats.key_bytes *= sizeof(uint64_t);
1863 	stats.data	<<= 9;
1864 	bch_update_bucket_in_use(c, &stats);
1865 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1866 
1867 	trace_bcache_gc_end(c);
1868 
1869 	bch_moving_gc(c);
1870 }
1871 
gc_should_run(struct cache_set * c)1872 static bool gc_should_run(struct cache_set *c)
1873 {
1874 	struct cache *ca;
1875 	unsigned int i;
1876 
1877 	for_each_cache(ca, c, i)
1878 		if (ca->invalidate_needs_gc)
1879 			return true;
1880 
1881 	if (atomic_read(&c->sectors_to_gc) < 0)
1882 		return true;
1883 
1884 	return false;
1885 }
1886 
bch_gc_thread(void * arg)1887 static int bch_gc_thread(void *arg)
1888 {
1889 	struct cache_set *c = arg;
1890 
1891 	while (1) {
1892 		wait_event_interruptible(c->gc_wait,
1893 			   kthread_should_stop() ||
1894 			   test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1895 			   gc_should_run(c));
1896 
1897 		if (kthread_should_stop() ||
1898 		    test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1899 			break;
1900 
1901 		set_gc_sectors(c);
1902 		bch_btree_gc(c);
1903 	}
1904 
1905 	wait_for_kthread_stop();
1906 	return 0;
1907 }
1908 
bch_gc_thread_start(struct cache_set * c)1909 int bch_gc_thread_start(struct cache_set *c)
1910 {
1911 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1912 	return PTR_ERR_OR_ZERO(c->gc_thread);
1913 }
1914 
1915 /* Initial partial gc */
1916 
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1917 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1918 {
1919 	int ret = 0;
1920 	struct bkey *k, *p = NULL;
1921 	struct btree_iter iter;
1922 
1923 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1924 		bch_initial_mark_key(b->c, b->level, k);
1925 
1926 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1927 
1928 	if (b->level) {
1929 		bch_btree_iter_init(&b->keys, &iter, NULL);
1930 
1931 		do {
1932 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1933 						       bch_ptr_bad);
1934 			if (k) {
1935 				btree_node_prefetch(b, k);
1936 				/*
1937 				 * initiallize c->gc_stats.nodes
1938 				 * for incremental GC
1939 				 */
1940 				b->c->gc_stats.nodes++;
1941 			}
1942 
1943 			if (p)
1944 				ret = btree(check_recurse, p, b, op);
1945 
1946 			p = k;
1947 		} while (p && !ret);
1948 	}
1949 
1950 	return ret;
1951 }
1952 
bch_btree_check(struct cache_set * c)1953 int bch_btree_check(struct cache_set *c)
1954 {
1955 	struct btree_op op;
1956 
1957 	bch_btree_op_init(&op, SHRT_MAX);
1958 
1959 	return btree_root(check_recurse, c, &op);
1960 }
1961 
bch_initial_gc_finish(struct cache_set * c)1962 void bch_initial_gc_finish(struct cache_set *c)
1963 {
1964 	struct cache *ca;
1965 	struct bucket *b;
1966 	unsigned int i;
1967 
1968 	bch_btree_gc_finish(c);
1969 
1970 	mutex_lock(&c->bucket_lock);
1971 
1972 	/*
1973 	 * We need to put some unused buckets directly on the prio freelist in
1974 	 * order to get the allocator thread started - it needs freed buckets in
1975 	 * order to rewrite the prios and gens, and it needs to rewrite prios
1976 	 * and gens in order to free buckets.
1977 	 *
1978 	 * This is only safe for buckets that have no live data in them, which
1979 	 * there should always be some of.
1980 	 */
1981 	for_each_cache(ca, c, i) {
1982 		for_each_bucket(b, ca) {
1983 			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1984 			    fifo_full(&ca->free[RESERVE_BTREE]))
1985 				break;
1986 
1987 			if (bch_can_invalidate_bucket(ca, b) &&
1988 			    !GC_MARK(b)) {
1989 				__bch_invalidate_one_bucket(ca, b);
1990 				if (!fifo_push(&ca->free[RESERVE_PRIO],
1991 				   b - ca->buckets))
1992 					fifo_push(&ca->free[RESERVE_BTREE],
1993 						  b - ca->buckets);
1994 			}
1995 		}
1996 	}
1997 
1998 	mutex_unlock(&c->bucket_lock);
1999 }
2000 
2001 /* Btree insertion */
2002 
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)2003 static bool btree_insert_key(struct btree *b, struct bkey *k,
2004 			     struct bkey *replace_key)
2005 {
2006 	unsigned int status;
2007 
2008 	BUG_ON(bkey_cmp(k, &b->key) > 0);
2009 
2010 	status = bch_btree_insert_key(&b->keys, k, replace_key);
2011 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2012 		bch_check_keys(&b->keys, "%u for %s", status,
2013 			       replace_key ? "replace" : "insert");
2014 
2015 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2016 					      status);
2017 		return true;
2018 	} else
2019 		return false;
2020 }
2021 
insert_u64s_remaining(struct btree * b)2022 static size_t insert_u64s_remaining(struct btree *b)
2023 {
2024 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
2025 
2026 	/*
2027 	 * Might land in the middle of an existing extent and have to split it
2028 	 */
2029 	if (b->keys.ops->is_extents)
2030 		ret -= KEY_MAX_U64S;
2031 
2032 	return max(ret, 0L);
2033 }
2034 
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2035 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2036 				  struct keylist *insert_keys,
2037 				  struct bkey *replace_key)
2038 {
2039 	bool ret = false;
2040 	int oldsize = bch_count_data(&b->keys);
2041 
2042 	while (!bch_keylist_empty(insert_keys)) {
2043 		struct bkey *k = insert_keys->keys;
2044 
2045 		if (bkey_u64s(k) > insert_u64s_remaining(b))
2046 			break;
2047 
2048 		if (bkey_cmp(k, &b->key) <= 0) {
2049 			if (!b->level)
2050 				bkey_put(b->c, k);
2051 
2052 			ret |= btree_insert_key(b, k, replace_key);
2053 			bch_keylist_pop_front(insert_keys);
2054 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2055 			BKEY_PADDED(key) temp;
2056 			bkey_copy(&temp.key, insert_keys->keys);
2057 
2058 			bch_cut_back(&b->key, &temp.key);
2059 			bch_cut_front(&b->key, insert_keys->keys);
2060 
2061 			ret |= btree_insert_key(b, &temp.key, replace_key);
2062 			break;
2063 		} else {
2064 			break;
2065 		}
2066 	}
2067 
2068 	if (!ret)
2069 		op->insert_collision = true;
2070 
2071 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2072 
2073 	BUG_ON(bch_count_data(&b->keys) < oldsize);
2074 	return ret;
2075 }
2076 
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)2077 static int btree_split(struct btree *b, struct btree_op *op,
2078 		       struct keylist *insert_keys,
2079 		       struct bkey *replace_key)
2080 {
2081 	bool split;
2082 	struct btree *n1, *n2 = NULL, *n3 = NULL;
2083 	uint64_t start_time = local_clock();
2084 	struct closure cl;
2085 	struct keylist parent_keys;
2086 
2087 	closure_init_stack(&cl);
2088 	bch_keylist_init(&parent_keys);
2089 
2090 	if (btree_check_reserve(b, op)) {
2091 		if (!b->level)
2092 			return -EINTR;
2093 		else
2094 			WARN(1, "insufficient reserve for split\n");
2095 	}
2096 
2097 	n1 = btree_node_alloc_replacement(b, op);
2098 	if (IS_ERR(n1))
2099 		goto err;
2100 
2101 	split = set_blocks(btree_bset_first(n1),
2102 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2103 
2104 	if (split) {
2105 		unsigned int keys = 0;
2106 
2107 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2108 
2109 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2110 		if (IS_ERR(n2))
2111 			goto err_free1;
2112 
2113 		if (!b->parent) {
2114 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2115 			if (IS_ERR(n3))
2116 				goto err_free2;
2117 		}
2118 
2119 		mutex_lock(&n1->write_lock);
2120 		mutex_lock(&n2->write_lock);
2121 
2122 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2123 
2124 		/*
2125 		 * Has to be a linear search because we don't have an auxiliary
2126 		 * search tree yet
2127 		 */
2128 
2129 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2130 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2131 							keys));
2132 
2133 		bkey_copy_key(&n1->key,
2134 			      bset_bkey_idx(btree_bset_first(n1), keys));
2135 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2136 
2137 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2138 		btree_bset_first(n1)->keys = keys;
2139 
2140 		memcpy(btree_bset_first(n2)->start,
2141 		       bset_bkey_last(btree_bset_first(n1)),
2142 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2143 
2144 		bkey_copy_key(&n2->key, &b->key);
2145 
2146 		bch_keylist_add(&parent_keys, &n2->key);
2147 		bch_btree_node_write(n2, &cl);
2148 		mutex_unlock(&n2->write_lock);
2149 		rw_unlock(true, n2);
2150 	} else {
2151 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2152 
2153 		mutex_lock(&n1->write_lock);
2154 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2155 	}
2156 
2157 	bch_keylist_add(&parent_keys, &n1->key);
2158 	bch_btree_node_write(n1, &cl);
2159 	mutex_unlock(&n1->write_lock);
2160 
2161 	if (n3) {
2162 		/* Depth increases, make a new root */
2163 		mutex_lock(&n3->write_lock);
2164 		bkey_copy_key(&n3->key, &MAX_KEY);
2165 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2166 		bch_btree_node_write(n3, &cl);
2167 		mutex_unlock(&n3->write_lock);
2168 
2169 		closure_sync(&cl);
2170 		bch_btree_set_root(n3);
2171 		rw_unlock(true, n3);
2172 	} else if (!b->parent) {
2173 		/* Root filled up but didn't need to be split */
2174 		closure_sync(&cl);
2175 		bch_btree_set_root(n1);
2176 	} else {
2177 		/* Split a non root node */
2178 		closure_sync(&cl);
2179 		make_btree_freeing_key(b, parent_keys.top);
2180 		bch_keylist_push(&parent_keys);
2181 
2182 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2183 		BUG_ON(!bch_keylist_empty(&parent_keys));
2184 	}
2185 
2186 	btree_node_free(b);
2187 	rw_unlock(true, n1);
2188 
2189 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2190 
2191 	return 0;
2192 err_free2:
2193 	bkey_put(b->c, &n2->key);
2194 	btree_node_free(n2);
2195 	rw_unlock(true, n2);
2196 err_free1:
2197 	bkey_put(b->c, &n1->key);
2198 	btree_node_free(n1);
2199 	rw_unlock(true, n1);
2200 err:
2201 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2202 
2203 	if (n3 == ERR_PTR(-EAGAIN) ||
2204 	    n2 == ERR_PTR(-EAGAIN) ||
2205 	    n1 == ERR_PTR(-EAGAIN))
2206 		return -EAGAIN;
2207 
2208 	return -ENOMEM;
2209 }
2210 
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2211 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2212 				 struct keylist *insert_keys,
2213 				 atomic_t *journal_ref,
2214 				 struct bkey *replace_key)
2215 {
2216 	struct closure cl;
2217 
2218 	BUG_ON(b->level && replace_key);
2219 
2220 	closure_init_stack(&cl);
2221 
2222 	mutex_lock(&b->write_lock);
2223 
2224 	if (write_block(b) != btree_bset_last(b) &&
2225 	    b->keys.last_set_unwritten)
2226 		bch_btree_init_next(b); /* just wrote a set */
2227 
2228 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2229 		mutex_unlock(&b->write_lock);
2230 		goto split;
2231 	}
2232 
2233 	BUG_ON(write_block(b) != btree_bset_last(b));
2234 
2235 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2236 		if (!b->level)
2237 			bch_btree_leaf_dirty(b, journal_ref);
2238 		else
2239 			bch_btree_node_write(b, &cl);
2240 	}
2241 
2242 	mutex_unlock(&b->write_lock);
2243 
2244 	/* wait for btree node write if necessary, after unlock */
2245 	closure_sync(&cl);
2246 
2247 	return 0;
2248 split:
2249 	if (current->bio_list) {
2250 		op->lock = b->c->root->level + 1;
2251 		return -EAGAIN;
2252 	} else if (op->lock <= b->c->root->level) {
2253 		op->lock = b->c->root->level + 1;
2254 		return -EINTR;
2255 	} else {
2256 		/* Invalidated all iterators */
2257 		int ret = btree_split(b, op, insert_keys, replace_key);
2258 
2259 		if (bch_keylist_empty(insert_keys))
2260 			return 0;
2261 		else if (!ret)
2262 			return -EINTR;
2263 		return ret;
2264 	}
2265 }
2266 
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2267 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2268 			       struct bkey *check_key)
2269 {
2270 	int ret = -EINTR;
2271 	uint64_t btree_ptr = b->key.ptr[0];
2272 	unsigned long seq = b->seq;
2273 	struct keylist insert;
2274 	bool upgrade = op->lock == -1;
2275 
2276 	bch_keylist_init(&insert);
2277 
2278 	if (upgrade) {
2279 		rw_unlock(false, b);
2280 		rw_lock(true, b, b->level);
2281 
2282 		if (b->key.ptr[0] != btree_ptr ||
2283 		    b->seq != seq + 1) {
2284 			op->lock = b->level;
2285 			goto out;
2286 		}
2287 	}
2288 
2289 	SET_KEY_PTRS(check_key, 1);
2290 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2291 
2292 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2293 
2294 	bch_keylist_add(&insert, check_key);
2295 
2296 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2297 
2298 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2299 out:
2300 	if (upgrade)
2301 		downgrade_write(&b->lock);
2302 	return ret;
2303 }
2304 
2305 struct btree_insert_op {
2306 	struct btree_op	op;
2307 	struct keylist	*keys;
2308 	atomic_t	*journal_ref;
2309 	struct bkey	*replace_key;
2310 };
2311 
btree_insert_fn(struct btree_op * b_op,struct btree * b)2312 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2313 {
2314 	struct btree_insert_op *op = container_of(b_op,
2315 					struct btree_insert_op, op);
2316 
2317 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2318 					op->journal_ref, op->replace_key);
2319 	if (ret && !bch_keylist_empty(op->keys))
2320 		return ret;
2321 	else
2322 		return MAP_DONE;
2323 }
2324 
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2325 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2326 		     atomic_t *journal_ref, struct bkey *replace_key)
2327 {
2328 	struct btree_insert_op op;
2329 	int ret = 0;
2330 
2331 	BUG_ON(current->bio_list);
2332 	BUG_ON(bch_keylist_empty(keys));
2333 
2334 	bch_btree_op_init(&op.op, 0);
2335 	op.keys		= keys;
2336 	op.journal_ref	= journal_ref;
2337 	op.replace_key	= replace_key;
2338 
2339 	while (!ret && !bch_keylist_empty(keys)) {
2340 		op.op.lock = 0;
2341 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2342 					       &START_KEY(keys->keys),
2343 					       btree_insert_fn);
2344 	}
2345 
2346 	if (ret) {
2347 		struct bkey *k;
2348 
2349 		pr_err("error %i", ret);
2350 
2351 		while ((k = bch_keylist_pop(keys)))
2352 			bkey_put(c, k);
2353 	} else if (op.op.insert_collision)
2354 		ret = -ESRCH;
2355 
2356 	return ret;
2357 }
2358 
bch_btree_set_root(struct btree * b)2359 void bch_btree_set_root(struct btree *b)
2360 {
2361 	unsigned int i;
2362 	struct closure cl;
2363 
2364 	closure_init_stack(&cl);
2365 
2366 	trace_bcache_btree_set_root(b);
2367 
2368 	BUG_ON(!b->written);
2369 
2370 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2371 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2372 
2373 	mutex_lock(&b->c->bucket_lock);
2374 	list_del_init(&b->list);
2375 	mutex_unlock(&b->c->bucket_lock);
2376 
2377 	b->c->root = b;
2378 
2379 	bch_journal_meta(b->c, &cl);
2380 	closure_sync(&cl);
2381 }
2382 
2383 /* Map across nodes or keys */
2384 
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2385 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2386 				       struct bkey *from,
2387 				       btree_map_nodes_fn *fn, int flags)
2388 {
2389 	int ret = MAP_CONTINUE;
2390 
2391 	if (b->level) {
2392 		struct bkey *k;
2393 		struct btree_iter iter;
2394 
2395 		bch_btree_iter_init(&b->keys, &iter, from);
2396 
2397 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2398 						       bch_ptr_bad))) {
2399 			ret = btree(map_nodes_recurse, k, b,
2400 				    op, from, fn, flags);
2401 			from = NULL;
2402 
2403 			if (ret != MAP_CONTINUE)
2404 				return ret;
2405 		}
2406 	}
2407 
2408 	if (!b->level || flags == MAP_ALL_NODES)
2409 		ret = fn(op, b);
2410 
2411 	return ret;
2412 }
2413 
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2414 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2415 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2416 {
2417 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2418 }
2419 
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2420 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2421 				      struct bkey *from, btree_map_keys_fn *fn,
2422 				      int flags)
2423 {
2424 	int ret = MAP_CONTINUE;
2425 	struct bkey *k;
2426 	struct btree_iter iter;
2427 
2428 	bch_btree_iter_init(&b->keys, &iter, from);
2429 
2430 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2431 		ret = !b->level
2432 			? fn(op, b, k)
2433 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
2434 		from = NULL;
2435 
2436 		if (ret != MAP_CONTINUE)
2437 			return ret;
2438 	}
2439 
2440 	if (!b->level && (flags & MAP_END_KEY))
2441 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2442 				     KEY_OFFSET(&b->key), 0));
2443 
2444 	return ret;
2445 }
2446 
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2447 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2448 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2449 {
2450 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
2451 }
2452 
2453 /* Keybuf code */
2454 
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2455 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2456 {
2457 	/* Overlapping keys compare equal */
2458 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2459 		return -1;
2460 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2461 		return 1;
2462 	return 0;
2463 }
2464 
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2465 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2466 					    struct keybuf_key *r)
2467 {
2468 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2469 }
2470 
2471 struct refill {
2472 	struct btree_op	op;
2473 	unsigned int	nr_found;
2474 	struct keybuf	*buf;
2475 	struct bkey	*end;
2476 	keybuf_pred_fn	*pred;
2477 };
2478 
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2479 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2480 			    struct bkey *k)
2481 {
2482 	struct refill *refill = container_of(op, struct refill, op);
2483 	struct keybuf *buf = refill->buf;
2484 	int ret = MAP_CONTINUE;
2485 
2486 	if (bkey_cmp(k, refill->end) > 0) {
2487 		ret = MAP_DONE;
2488 		goto out;
2489 	}
2490 
2491 	if (!KEY_SIZE(k)) /* end key */
2492 		goto out;
2493 
2494 	if (refill->pred(buf, k)) {
2495 		struct keybuf_key *w;
2496 
2497 		spin_lock(&buf->lock);
2498 
2499 		w = array_alloc(&buf->freelist);
2500 		if (!w) {
2501 			spin_unlock(&buf->lock);
2502 			return MAP_DONE;
2503 		}
2504 
2505 		w->private = NULL;
2506 		bkey_copy(&w->key, k);
2507 
2508 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2509 			array_free(&buf->freelist, w);
2510 		else
2511 			refill->nr_found++;
2512 
2513 		if (array_freelist_empty(&buf->freelist))
2514 			ret = MAP_DONE;
2515 
2516 		spin_unlock(&buf->lock);
2517 	}
2518 out:
2519 	buf->last_scanned = *k;
2520 	return ret;
2521 }
2522 
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2523 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2524 		       struct bkey *end, keybuf_pred_fn *pred)
2525 {
2526 	struct bkey start = buf->last_scanned;
2527 	struct refill refill;
2528 
2529 	cond_resched();
2530 
2531 	bch_btree_op_init(&refill.op, -1);
2532 	refill.nr_found	= 0;
2533 	refill.buf	= buf;
2534 	refill.end	= end;
2535 	refill.pred	= pred;
2536 
2537 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2538 			   refill_keybuf_fn, MAP_END_KEY);
2539 
2540 	trace_bcache_keyscan(refill.nr_found,
2541 			     KEY_INODE(&start), KEY_OFFSET(&start),
2542 			     KEY_INODE(&buf->last_scanned),
2543 			     KEY_OFFSET(&buf->last_scanned));
2544 
2545 	spin_lock(&buf->lock);
2546 
2547 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2548 		struct keybuf_key *w;
2549 
2550 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2551 		buf->start	= START_KEY(&w->key);
2552 
2553 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2554 		buf->end	= w->key;
2555 	} else {
2556 		buf->start	= MAX_KEY;
2557 		buf->end	= MAX_KEY;
2558 	}
2559 
2560 	spin_unlock(&buf->lock);
2561 }
2562 
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2563 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2564 {
2565 	rb_erase(&w->node, &buf->keys);
2566 	array_free(&buf->freelist, w);
2567 }
2568 
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2569 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2570 {
2571 	spin_lock(&buf->lock);
2572 	__bch_keybuf_del(buf, w);
2573 	spin_unlock(&buf->lock);
2574 }
2575 
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2576 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2577 				  struct bkey *end)
2578 {
2579 	bool ret = false;
2580 	struct keybuf_key *p, *w, s;
2581 
2582 	s.key = *start;
2583 
2584 	if (bkey_cmp(end, &buf->start) <= 0 ||
2585 	    bkey_cmp(start, &buf->end) >= 0)
2586 		return false;
2587 
2588 	spin_lock(&buf->lock);
2589 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2590 
2591 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2592 		p = w;
2593 		w = RB_NEXT(w, node);
2594 
2595 		if (p->private)
2596 			ret = true;
2597 		else
2598 			__bch_keybuf_del(buf, p);
2599 	}
2600 
2601 	spin_unlock(&buf->lock);
2602 	return ret;
2603 }
2604 
bch_keybuf_next(struct keybuf * buf)2605 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2606 {
2607 	struct keybuf_key *w;
2608 
2609 	spin_lock(&buf->lock);
2610 
2611 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2612 
2613 	while (w && w->private)
2614 		w = RB_NEXT(w, node);
2615 
2616 	if (w)
2617 		w->private = ERR_PTR(-EINTR);
2618 
2619 	spin_unlock(&buf->lock);
2620 	return w;
2621 }
2622 
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2623 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2624 					  struct keybuf *buf,
2625 					  struct bkey *end,
2626 					  keybuf_pred_fn *pred)
2627 {
2628 	struct keybuf_key *ret;
2629 
2630 	while (1) {
2631 		ret = bch_keybuf_next(buf);
2632 		if (ret)
2633 			break;
2634 
2635 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2636 			pr_debug("scan finished");
2637 			break;
2638 		}
2639 
2640 		bch_refill_keybuf(c, buf, end, pred);
2641 	}
2642 
2643 	return ret;
2644 }
2645 
bch_keybuf_init(struct keybuf * buf)2646 void bch_keybuf_init(struct keybuf *buf)
2647 {
2648 	buf->last_scanned	= MAX_KEY;
2649 	buf->keys		= RB_ROOT;
2650 
2651 	spin_lock_init(&buf->lock);
2652 	array_allocator_init(&buf->freelist);
2653 }
2654