• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/bcache.txt.
22  */
23 
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28 
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 
39 #include <trace/events/bcache.h>
40 
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90 
91 #define MAX_NEED_GC		64
92 #define MAX_SAVE_PRIO		72
93 
94 #define PTR_DIRTY_BIT		(((uint64_t) 1 << 36))
95 
96 #define PTR_HASH(c, k)							\
97 	(((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98 
99 #define insert_lock(s, b)	((b)->level <= (s)->lock)
100 
101 /*
102  * These macros are for recursing down the btree - they handle the details of
103  * locking and looking up nodes in the cache for you. They're best treated as
104  * mere syntax when reading code that uses them.
105  *
106  * op->lock determines whether we take a read or a write lock at a given depth.
107  * If you've got a read lock and find that you need a write lock (i.e. you're
108  * going to have to split), set op->lock and return -EINTR; btree_root() will
109  * call you again and you'll have the correct lock.
110  */
111 
112 /**
113  * btree - recurse down the btree on a specified key
114  * @fn:		function to call, which will be passed the child node
115  * @key:	key to recurse on
116  * @b:		parent btree node
117  * @op:		pointer to struct btree_op
118  */
119 #define btree(fn, key, b, op, ...)					\
120 ({									\
121 	int _r, l = (b)->level - 1;					\
122 	bool _w = l <= (op)->lock;					\
123 	struct btree *_child = bch_btree_node_get((b)->c, op, key, l,	\
124 						  _w, b);		\
125 	if (!IS_ERR(_child)) {						\
126 		_r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);	\
127 		rw_unlock(_w, _child);					\
128 	} else								\
129 		_r = PTR_ERR(_child);					\
130 	_r;								\
131 })
132 
133 /**
134  * btree_root - call a function on the root of the btree
135  * @fn:		function to call, which will be passed the child node
136  * @c:		cache set
137  * @op:		pointer to struct btree_op
138  */
139 #define btree_root(fn, c, op, ...)					\
140 ({									\
141 	int _r = -EINTR;						\
142 	do {								\
143 		struct btree *_b = (c)->root;				\
144 		bool _w = insert_lock(op, _b);				\
145 		rw_lock(_w, _b, _b->level);				\
146 		if (_b == (c)->root &&					\
147 		    _w == insert_lock(op, _b)) {			\
148 			_r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);	\
149 		}							\
150 		rw_unlock(_w, _b);					\
151 		bch_cannibalize_unlock(c);				\
152 		if (_r == -EINTR)					\
153 			schedule();					\
154 	} while (_r == -EINTR);						\
155 									\
156 	finish_wait(&(c)->btree_cache_wait, &(op)->wait);		\
157 	_r;								\
158 })
159 
write_block(struct btree * b)160 static inline struct bset *write_block(struct btree *b)
161 {
162 	return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163 }
164 
bch_btree_init_next(struct btree * b)165 static void bch_btree_init_next(struct btree *b)
166 {
167 	/* If not a leaf node, always sort */
168 	if (b->level && b->keys.nsets)
169 		bch_btree_sort(&b->keys, &b->c->sort);
170 	else
171 		bch_btree_sort_lazy(&b->keys, &b->c->sort);
172 
173 	if (b->written < btree_blocks(b))
174 		bch_bset_init_next(&b->keys, write_block(b),
175 				   bset_magic(&b->c->sb));
176 
177 }
178 
179 /* Btree key manipulation */
180 
bkey_put(struct cache_set * c,struct bkey * k)181 void bkey_put(struct cache_set *c, struct bkey *k)
182 {
183 	unsigned i;
184 
185 	for (i = 0; i < KEY_PTRS(k); i++)
186 		if (ptr_available(c, k, i))
187 			atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188 }
189 
190 /* Btree IO */
191 
btree_csum_set(struct btree * b,struct bset * i)192 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193 {
194 	uint64_t crc = b->key.ptr[0];
195 	void *data = (void *) i + 8, *end = bset_bkey_last(i);
196 
197 	crc = bch_crc64_update(crc, data, end - data);
198 	return crc ^ 0xffffffffffffffffULL;
199 }
200 
bch_btree_node_read_done(struct btree * b)201 void bch_btree_node_read_done(struct btree *b)
202 {
203 	const char *err = "bad btree header";
204 	struct bset *i = btree_bset_first(b);
205 	struct btree_iter *iter;
206 
207 	iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
208 	iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
209 	iter->used = 0;
210 
211 #ifdef CONFIG_BCACHE_DEBUG
212 	iter->b = &b->keys;
213 #endif
214 
215 	if (!i->seq)
216 		goto err;
217 
218 	for (;
219 	     b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
220 	     i = write_block(b)) {
221 		err = "unsupported bset version";
222 		if (i->version > BCACHE_BSET_VERSION)
223 			goto err;
224 
225 		err = "bad btree header";
226 		if (b->written + set_blocks(i, block_bytes(b->c)) >
227 		    btree_blocks(b))
228 			goto err;
229 
230 		err = "bad magic";
231 		if (i->magic != bset_magic(&b->c->sb))
232 			goto err;
233 
234 		err = "bad checksum";
235 		switch (i->version) {
236 		case 0:
237 			if (i->csum != csum_set(i))
238 				goto err;
239 			break;
240 		case BCACHE_BSET_VERSION:
241 			if (i->csum != btree_csum_set(b, i))
242 				goto err;
243 			break;
244 		}
245 
246 		err = "empty set";
247 		if (i != b->keys.set[0].data && !i->keys)
248 			goto err;
249 
250 		bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
251 
252 		b->written += set_blocks(i, block_bytes(b->c));
253 	}
254 
255 	err = "corrupted btree";
256 	for (i = write_block(b);
257 	     bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
258 	     i = ((void *) i) + block_bytes(b->c))
259 		if (i->seq == b->keys.set[0].data->seq)
260 			goto err;
261 
262 	bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
263 
264 	i = b->keys.set[0].data;
265 	err = "short btree key";
266 	if (b->keys.set[0].size &&
267 	    bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
268 		goto err;
269 
270 	if (b->written < btree_blocks(b))
271 		bch_bset_init_next(&b->keys, write_block(b),
272 				   bset_magic(&b->c->sb));
273 out:
274 	mempool_free(iter, b->c->fill_iter);
275 	return;
276 err:
277 	set_btree_node_io_error(b);
278 	bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
279 			    err, PTR_BUCKET_NR(b->c, &b->key, 0),
280 			    bset_block_offset(b, i), i->keys);
281 	goto out;
282 }
283 
btree_node_read_endio(struct bio * bio)284 static void btree_node_read_endio(struct bio *bio)
285 {
286 	struct closure *cl = bio->bi_private;
287 	closure_put(cl);
288 }
289 
bch_btree_node_read(struct btree * b)290 static void bch_btree_node_read(struct btree *b)
291 {
292 	uint64_t start_time = local_clock();
293 	struct closure cl;
294 	struct bio *bio;
295 
296 	trace_bcache_btree_read(b);
297 
298 	closure_init_stack(&cl);
299 
300 	bio = bch_bbio_alloc(b->c);
301 	bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
302 	bio->bi_end_io	= btree_node_read_endio;
303 	bio->bi_private	= &cl;
304 	bio->bi_opf = REQ_OP_READ | REQ_META;
305 
306 	bch_bio_map(bio, b->keys.set[0].data);
307 
308 	bch_submit_bbio(bio, b->c, &b->key, 0);
309 	closure_sync(&cl);
310 
311 	if (bio->bi_status)
312 		set_btree_node_io_error(b);
313 
314 	bch_bbio_free(bio, b->c);
315 
316 	if (btree_node_io_error(b))
317 		goto err;
318 
319 	bch_btree_node_read_done(b);
320 	bch_time_stats_update(&b->c->btree_read_time, start_time);
321 
322 	return;
323 err:
324 	bch_cache_set_error(b->c, "io error reading bucket %zu",
325 			    PTR_BUCKET_NR(b->c, &b->key, 0));
326 }
327 
btree_complete_write(struct btree * b,struct btree_write * w)328 static void btree_complete_write(struct btree *b, struct btree_write *w)
329 {
330 	if (w->prio_blocked &&
331 	    !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
332 		wake_up_allocators(b->c);
333 
334 	if (w->journal) {
335 		atomic_dec_bug(w->journal);
336 		__closure_wake_up(&b->c->journal.wait);
337 	}
338 
339 	w->prio_blocked	= 0;
340 	w->journal	= NULL;
341 }
342 
btree_node_write_unlock(struct closure * cl)343 static void btree_node_write_unlock(struct closure *cl)
344 {
345 	struct btree *b = container_of(cl, struct btree, io);
346 
347 	up(&b->io_mutex);
348 }
349 
__btree_node_write_done(struct closure * cl)350 static void __btree_node_write_done(struct closure *cl)
351 {
352 	struct btree *b = container_of(cl, struct btree, io);
353 	struct btree_write *w = btree_prev_write(b);
354 
355 	bch_bbio_free(b->bio, b->c);
356 	b->bio = NULL;
357 	btree_complete_write(b, w);
358 
359 	if (btree_node_dirty(b))
360 		schedule_delayed_work(&b->work, 30 * HZ);
361 
362 	closure_return_with_destructor(cl, btree_node_write_unlock);
363 }
364 
btree_node_write_done(struct closure * cl)365 static void btree_node_write_done(struct closure *cl)
366 {
367 	struct btree *b = container_of(cl, struct btree, io);
368 
369 	bio_free_pages(b->bio);
370 	__btree_node_write_done(cl);
371 }
372 
btree_node_write_endio(struct bio * bio)373 static void btree_node_write_endio(struct bio *bio)
374 {
375 	struct closure *cl = bio->bi_private;
376 	struct btree *b = container_of(cl, struct btree, io);
377 
378 	if (bio->bi_status)
379 		set_btree_node_io_error(b);
380 
381 	bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
382 	closure_put(cl);
383 }
384 
do_btree_node_write(struct btree * b)385 static void do_btree_node_write(struct btree *b)
386 {
387 	struct closure *cl = &b->io;
388 	struct bset *i = btree_bset_last(b);
389 	BKEY_PADDED(key) k;
390 
391 	i->version	= BCACHE_BSET_VERSION;
392 	i->csum		= btree_csum_set(b, i);
393 
394 	BUG_ON(b->bio);
395 	b->bio = bch_bbio_alloc(b->c);
396 
397 	b->bio->bi_end_io	= btree_node_write_endio;
398 	b->bio->bi_private	= cl;
399 	b->bio->bi_iter.bi_size	= roundup(set_bytes(i), block_bytes(b->c));
400 	b->bio->bi_opf		= REQ_OP_WRITE | REQ_META | REQ_FUA;
401 	bch_bio_map(b->bio, i);
402 
403 	/*
404 	 * If we're appending to a leaf node, we don't technically need FUA -
405 	 * this write just needs to be persisted before the next journal write,
406 	 * which will be marked FLUSH|FUA.
407 	 *
408 	 * Similarly if we're writing a new btree root - the pointer is going to
409 	 * be in the next journal entry.
410 	 *
411 	 * But if we're writing a new btree node (that isn't a root) or
412 	 * appending to a non leaf btree node, we need either FUA or a flush
413 	 * when we write the parent with the new pointer. FUA is cheaper than a
414 	 * flush, and writes appending to leaf nodes aren't blocking anything so
415 	 * just make all btree node writes FUA to keep things sane.
416 	 */
417 
418 	bkey_copy(&k.key, &b->key);
419 	SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
420 		       bset_sector_offset(&b->keys, i));
421 
422 	if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
423 		int j;
424 		struct bio_vec *bv;
425 		void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426 
427 		bio_for_each_segment_all(bv, b->bio, j)
428 			memcpy(page_address(bv->bv_page),
429 			       base + j * PAGE_SIZE, PAGE_SIZE);
430 
431 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
432 
433 		continue_at(cl, btree_node_write_done, NULL);
434 	} else {
435 		b->bio->bi_vcnt = 0;
436 		bch_bio_map(b->bio, i);
437 
438 		bch_submit_bbio(b->bio, b->c, &k.key, 0);
439 
440 		closure_sync(cl);
441 		continue_at_nobarrier(cl, __btree_node_write_done, NULL);
442 	}
443 }
444 
__bch_btree_node_write(struct btree * b,struct closure * parent)445 void __bch_btree_node_write(struct btree *b, struct closure *parent)
446 {
447 	struct bset *i = btree_bset_last(b);
448 
449 	lockdep_assert_held(&b->write_lock);
450 
451 	trace_bcache_btree_write(b);
452 
453 	BUG_ON(current->bio_list);
454 	BUG_ON(b->written >= btree_blocks(b));
455 	BUG_ON(b->written && !i->keys);
456 	BUG_ON(btree_bset_first(b)->seq != i->seq);
457 	bch_check_keys(&b->keys, "writing");
458 
459 	cancel_delayed_work(&b->work);
460 
461 	/* If caller isn't waiting for write, parent refcount is cache set */
462 	down(&b->io_mutex);
463 	closure_init(&b->io, parent ?: &b->c->cl);
464 
465 	clear_bit(BTREE_NODE_dirty,	 &b->flags);
466 	change_bit(BTREE_NODE_write_idx, &b->flags);
467 
468 	do_btree_node_write(b);
469 
470 	atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
471 			&PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
472 
473 	b->written += set_blocks(i, block_bytes(b->c));
474 }
475 
bch_btree_node_write(struct btree * b,struct closure * parent)476 void bch_btree_node_write(struct btree *b, struct closure *parent)
477 {
478 	unsigned nsets = b->keys.nsets;
479 
480 	lockdep_assert_held(&b->lock);
481 
482 	__bch_btree_node_write(b, parent);
483 
484 	/*
485 	 * do verify if there was more than one set initially (i.e. we did a
486 	 * sort) and we sorted down to a single set:
487 	 */
488 	if (nsets && !b->keys.nsets)
489 		bch_btree_verify(b);
490 
491 	bch_btree_init_next(b);
492 }
493 
bch_btree_node_write_sync(struct btree * b)494 static void bch_btree_node_write_sync(struct btree *b)
495 {
496 	struct closure cl;
497 
498 	closure_init_stack(&cl);
499 
500 	mutex_lock(&b->write_lock);
501 	bch_btree_node_write(b, &cl);
502 	mutex_unlock(&b->write_lock);
503 
504 	closure_sync(&cl);
505 }
506 
btree_node_write_work(struct work_struct * w)507 static void btree_node_write_work(struct work_struct *w)
508 {
509 	struct btree *b = container_of(to_delayed_work(w), struct btree, work);
510 
511 	mutex_lock(&b->write_lock);
512 	if (btree_node_dirty(b))
513 		__bch_btree_node_write(b, NULL);
514 	mutex_unlock(&b->write_lock);
515 }
516 
bch_btree_leaf_dirty(struct btree * b,atomic_t * journal_ref)517 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
518 {
519 	struct bset *i = btree_bset_last(b);
520 	struct btree_write *w = btree_current_write(b);
521 
522 	lockdep_assert_held(&b->write_lock);
523 
524 	BUG_ON(!b->written);
525 	BUG_ON(!i->keys);
526 
527 	if (!btree_node_dirty(b))
528 		schedule_delayed_work(&b->work, 30 * HZ);
529 
530 	set_btree_node_dirty(b);
531 
532 	if (journal_ref) {
533 		if (w->journal &&
534 		    journal_pin_cmp(b->c, w->journal, journal_ref)) {
535 			atomic_dec_bug(w->journal);
536 			w->journal = NULL;
537 		}
538 
539 		if (!w->journal) {
540 			w->journal = journal_ref;
541 			atomic_inc(w->journal);
542 		}
543 	}
544 
545 	/* Force write if set is too big */
546 	if (set_bytes(i) > PAGE_SIZE - 48 &&
547 	    !current->bio_list)
548 		bch_btree_node_write(b, NULL);
549 }
550 
551 /*
552  * Btree in memory cache - allocation/freeing
553  * mca -> memory cache
554  */
555 
556 #define mca_reserve(c)	(((c->root && c->root->level)		\
557 			  ? c->root->level : 1) * 8 + 16)
558 #define mca_can_free(c)						\
559 	max_t(int, 0, c->btree_cache_used - mca_reserve(c))
560 
mca_data_free(struct btree * b)561 static void mca_data_free(struct btree *b)
562 {
563 	BUG_ON(b->io_mutex.count != 1);
564 
565 	bch_btree_keys_free(&b->keys);
566 
567 	b->c->btree_cache_used--;
568 	list_move(&b->list, &b->c->btree_cache_freed);
569 }
570 
mca_bucket_free(struct btree * b)571 static void mca_bucket_free(struct btree *b)
572 {
573 	BUG_ON(btree_node_dirty(b));
574 
575 	b->key.ptr[0] = 0;
576 	hlist_del_init_rcu(&b->hash);
577 	list_move(&b->list, &b->c->btree_cache_freeable);
578 }
579 
btree_order(struct bkey * k)580 static unsigned btree_order(struct bkey *k)
581 {
582 	return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
583 }
584 
mca_data_alloc(struct btree * b,struct bkey * k,gfp_t gfp)585 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
586 {
587 	if (!bch_btree_keys_alloc(&b->keys,
588 				  max_t(unsigned,
589 					ilog2(b->c->btree_pages),
590 					btree_order(k)),
591 				  gfp)) {
592 		b->c->btree_cache_used++;
593 		list_move(&b->list, &b->c->btree_cache);
594 	} else {
595 		list_move(&b->list, &b->c->btree_cache_freed);
596 	}
597 }
598 
mca_bucket_alloc(struct cache_set * c,struct bkey * k,gfp_t gfp)599 static struct btree *mca_bucket_alloc(struct cache_set *c,
600 				      struct bkey *k, gfp_t gfp)
601 {
602 	struct btree *b = kzalloc(sizeof(struct btree), gfp);
603 	if (!b)
604 		return NULL;
605 
606 	init_rwsem(&b->lock);
607 	lockdep_set_novalidate_class(&b->lock);
608 	mutex_init(&b->write_lock);
609 	lockdep_set_novalidate_class(&b->write_lock);
610 	INIT_LIST_HEAD(&b->list);
611 	INIT_DELAYED_WORK(&b->work, btree_node_write_work);
612 	b->c = c;
613 	sema_init(&b->io_mutex, 1);
614 
615 	mca_data_alloc(b, k, gfp);
616 	return b;
617 }
618 
mca_reap(struct btree * b,unsigned min_order,bool flush)619 static int mca_reap(struct btree *b, unsigned min_order, bool flush)
620 {
621 	struct closure cl;
622 
623 	closure_init_stack(&cl);
624 	lockdep_assert_held(&b->c->bucket_lock);
625 
626 	if (!down_write_trylock(&b->lock))
627 		return -ENOMEM;
628 
629 	BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
630 
631 	if (b->keys.page_order < min_order)
632 		goto out_unlock;
633 
634 	if (!flush) {
635 		if (btree_node_dirty(b))
636 			goto out_unlock;
637 
638 		if (down_trylock(&b->io_mutex))
639 			goto out_unlock;
640 		up(&b->io_mutex);
641 	}
642 
643 	mutex_lock(&b->write_lock);
644 	if (btree_node_dirty(b))
645 		__bch_btree_node_write(b, &cl);
646 	mutex_unlock(&b->write_lock);
647 
648 	closure_sync(&cl);
649 
650 	/* wait for any in flight btree write */
651 	down(&b->io_mutex);
652 	up(&b->io_mutex);
653 
654 	return 0;
655 out_unlock:
656 	rw_unlock(true, b);
657 	return -ENOMEM;
658 }
659 
bch_mca_scan(struct shrinker * shrink,struct shrink_control * sc)660 static unsigned long bch_mca_scan(struct shrinker *shrink,
661 				  struct shrink_control *sc)
662 {
663 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
664 	struct btree *b, *t;
665 	unsigned long i, nr = sc->nr_to_scan;
666 	unsigned long freed = 0;
667 
668 	if (c->shrinker_disabled)
669 		return SHRINK_STOP;
670 
671 	if (c->btree_cache_alloc_lock)
672 		return SHRINK_STOP;
673 
674 	/* Return -1 if we can't do anything right now */
675 	if (sc->gfp_mask & __GFP_IO)
676 		mutex_lock(&c->bucket_lock);
677 	else if (!mutex_trylock(&c->bucket_lock))
678 		return -1;
679 
680 	/*
681 	 * It's _really_ critical that we don't free too many btree nodes - we
682 	 * have to always leave ourselves a reserve. The reserve is how we
683 	 * guarantee that allocating memory for a new btree node can always
684 	 * succeed, so that inserting keys into the btree can always succeed and
685 	 * IO can always make forward progress:
686 	 */
687 	nr /= c->btree_pages;
688 	if (nr == 0)
689 		nr = 1;
690 	nr = min_t(unsigned long, nr, mca_can_free(c));
691 
692 	i = 0;
693 	list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
694 		if (freed >= nr)
695 			break;
696 
697 		if (++i > 3 &&
698 		    !mca_reap(b, 0, false)) {
699 			mca_data_free(b);
700 			rw_unlock(true, b);
701 			freed++;
702 		}
703 	}
704 
705 	for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
706 		if (list_empty(&c->btree_cache))
707 			goto out;
708 
709 		b = list_first_entry(&c->btree_cache, struct btree, list);
710 		list_rotate_left(&c->btree_cache);
711 
712 		if (!b->accessed &&
713 		    !mca_reap(b, 0, false)) {
714 			mca_bucket_free(b);
715 			mca_data_free(b);
716 			rw_unlock(true, b);
717 			freed++;
718 		} else
719 			b->accessed = 0;
720 	}
721 out:
722 	mutex_unlock(&c->bucket_lock);
723 	return freed;
724 }
725 
bch_mca_count(struct shrinker * shrink,struct shrink_control * sc)726 static unsigned long bch_mca_count(struct shrinker *shrink,
727 				   struct shrink_control *sc)
728 {
729 	struct cache_set *c = container_of(shrink, struct cache_set, shrink);
730 
731 	if (c->shrinker_disabled)
732 		return 0;
733 
734 	if (c->btree_cache_alloc_lock)
735 		return 0;
736 
737 	return mca_can_free(c) * c->btree_pages;
738 }
739 
bch_btree_cache_free(struct cache_set * c)740 void bch_btree_cache_free(struct cache_set *c)
741 {
742 	struct btree *b;
743 	struct closure cl;
744 	closure_init_stack(&cl);
745 
746 	if (c->shrink.list.next)
747 		unregister_shrinker(&c->shrink);
748 
749 	mutex_lock(&c->bucket_lock);
750 
751 #ifdef CONFIG_BCACHE_DEBUG
752 	if (c->verify_data)
753 		list_move(&c->verify_data->list, &c->btree_cache);
754 
755 	free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
756 #endif
757 
758 	list_splice(&c->btree_cache_freeable,
759 		    &c->btree_cache);
760 
761 	while (!list_empty(&c->btree_cache)) {
762 		b = list_first_entry(&c->btree_cache, struct btree, list);
763 
764 		if (btree_node_dirty(b))
765 			btree_complete_write(b, btree_current_write(b));
766 		clear_bit(BTREE_NODE_dirty, &b->flags);
767 
768 		mca_data_free(b);
769 	}
770 
771 	while (!list_empty(&c->btree_cache_freed)) {
772 		b = list_first_entry(&c->btree_cache_freed,
773 				     struct btree, list);
774 		list_del(&b->list);
775 		cancel_delayed_work_sync(&b->work);
776 		kfree(b);
777 	}
778 
779 	mutex_unlock(&c->bucket_lock);
780 }
781 
bch_btree_cache_alloc(struct cache_set * c)782 int bch_btree_cache_alloc(struct cache_set *c)
783 {
784 	unsigned i;
785 
786 	for (i = 0; i < mca_reserve(c); i++)
787 		if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
788 			return -ENOMEM;
789 
790 	list_splice_init(&c->btree_cache,
791 			 &c->btree_cache_freeable);
792 
793 #ifdef CONFIG_BCACHE_DEBUG
794 	mutex_init(&c->verify_lock);
795 
796 	c->verify_ondisk = (void *)
797 		__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
798 
799 	c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
800 
801 	if (c->verify_data &&
802 	    c->verify_data->keys.set->data)
803 		list_del_init(&c->verify_data->list);
804 	else
805 		c->verify_data = NULL;
806 #endif
807 
808 	c->shrink.count_objects = bch_mca_count;
809 	c->shrink.scan_objects = bch_mca_scan;
810 	c->shrink.seeks = 4;
811 	c->shrink.batch = c->btree_pages * 2;
812 
813 	if (register_shrinker(&c->shrink))
814 		pr_warn("bcache: %s: could not register shrinker",
815 				__func__);
816 
817 	return 0;
818 }
819 
820 /* Btree in memory cache - hash table */
821 
mca_hash(struct cache_set * c,struct bkey * k)822 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
823 {
824 	return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
825 }
826 
mca_find(struct cache_set * c,struct bkey * k)827 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
828 {
829 	struct btree *b;
830 
831 	rcu_read_lock();
832 	hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
833 		if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
834 			goto out;
835 	b = NULL;
836 out:
837 	rcu_read_unlock();
838 	return b;
839 }
840 
mca_cannibalize_lock(struct cache_set * c,struct btree_op * op)841 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
842 {
843 	struct task_struct *old;
844 
845 	old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
846 	if (old && old != current) {
847 		if (op)
848 			prepare_to_wait(&c->btree_cache_wait, &op->wait,
849 					TASK_UNINTERRUPTIBLE);
850 		return -EINTR;
851 	}
852 
853 	return 0;
854 }
855 
mca_cannibalize(struct cache_set * c,struct btree_op * op,struct bkey * k)856 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
857 				     struct bkey *k)
858 {
859 	struct btree *b;
860 
861 	trace_bcache_btree_cache_cannibalize(c);
862 
863 	if (mca_cannibalize_lock(c, op))
864 		return ERR_PTR(-EINTR);
865 
866 	list_for_each_entry_reverse(b, &c->btree_cache, list)
867 		if (!mca_reap(b, btree_order(k), false))
868 			return b;
869 
870 	list_for_each_entry_reverse(b, &c->btree_cache, list)
871 		if (!mca_reap(b, btree_order(k), true))
872 			return b;
873 
874 	WARN(1, "btree cache cannibalize failed\n");
875 	return ERR_PTR(-ENOMEM);
876 }
877 
878 /*
879  * We can only have one thread cannibalizing other cached btree nodes at a time,
880  * or we'll deadlock. We use an open coded mutex to ensure that, which a
881  * cannibalize_bucket() will take. This means every time we unlock the root of
882  * the btree, we need to release this lock if we have it held.
883  */
bch_cannibalize_unlock(struct cache_set * c)884 static void bch_cannibalize_unlock(struct cache_set *c)
885 {
886 	if (c->btree_cache_alloc_lock == current) {
887 		c->btree_cache_alloc_lock = NULL;
888 		wake_up(&c->btree_cache_wait);
889 	}
890 }
891 
mca_alloc(struct cache_set * c,struct btree_op * op,struct bkey * k,int level)892 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
893 			       struct bkey *k, int level)
894 {
895 	struct btree *b;
896 
897 	BUG_ON(current->bio_list);
898 
899 	lockdep_assert_held(&c->bucket_lock);
900 
901 	if (mca_find(c, k))
902 		return NULL;
903 
904 	/* btree_free() doesn't free memory; it sticks the node on the end of
905 	 * the list. Check if there's any freed nodes there:
906 	 */
907 	list_for_each_entry(b, &c->btree_cache_freeable, list)
908 		if (!mca_reap(b, btree_order(k), false))
909 			goto out;
910 
911 	/* We never free struct btree itself, just the memory that holds the on
912 	 * disk node. Check the freed list before allocating a new one:
913 	 */
914 	list_for_each_entry(b, &c->btree_cache_freed, list)
915 		if (!mca_reap(b, 0, false)) {
916 			mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
917 			if (!b->keys.set[0].data)
918 				goto err;
919 			else
920 				goto out;
921 		}
922 
923 	b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
924 	if (!b)
925 		goto err;
926 
927 	BUG_ON(!down_write_trylock(&b->lock));
928 	if (!b->keys.set->data)
929 		goto err;
930 out:
931 	BUG_ON(b->io_mutex.count != 1);
932 
933 	bkey_copy(&b->key, k);
934 	list_move(&b->list, &c->btree_cache);
935 	hlist_del_init_rcu(&b->hash);
936 	hlist_add_head_rcu(&b->hash, mca_hash(c, k));
937 
938 	lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
939 	b->parent	= (void *) ~0UL;
940 	b->flags	= 0;
941 	b->written	= 0;
942 	b->level	= level;
943 
944 	if (!b->level)
945 		bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
946 				    &b->c->expensive_debug_checks);
947 	else
948 		bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
949 				    &b->c->expensive_debug_checks);
950 
951 	return b;
952 err:
953 	if (b)
954 		rw_unlock(true, b);
955 
956 	b = mca_cannibalize(c, op, k);
957 	if (!IS_ERR(b))
958 		goto out;
959 
960 	return b;
961 }
962 
963 /**
964  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
965  * in from disk if necessary.
966  *
967  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
968  *
969  * The btree node will have either a read or a write lock held, depending on
970  * level and op->lock.
971  */
bch_btree_node_get(struct cache_set * c,struct btree_op * op,struct bkey * k,int level,bool write,struct btree * parent)972 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
973 				 struct bkey *k, int level, bool write,
974 				 struct btree *parent)
975 {
976 	int i = 0;
977 	struct btree *b;
978 
979 	BUG_ON(level < 0);
980 retry:
981 	b = mca_find(c, k);
982 
983 	if (!b) {
984 		if (current->bio_list)
985 			return ERR_PTR(-EAGAIN);
986 
987 		mutex_lock(&c->bucket_lock);
988 		b = mca_alloc(c, op, k, level);
989 		mutex_unlock(&c->bucket_lock);
990 
991 		if (!b)
992 			goto retry;
993 		if (IS_ERR(b))
994 			return b;
995 
996 		bch_btree_node_read(b);
997 
998 		if (!write)
999 			downgrade_write(&b->lock);
1000 	} else {
1001 		rw_lock(write, b, level);
1002 		if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1003 			rw_unlock(write, b);
1004 			goto retry;
1005 		}
1006 		BUG_ON(b->level != level);
1007 	}
1008 
1009 	b->parent = parent;
1010 	b->accessed = 1;
1011 
1012 	for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1013 		prefetch(b->keys.set[i].tree);
1014 		prefetch(b->keys.set[i].data);
1015 	}
1016 
1017 	for (; i <= b->keys.nsets; i++)
1018 		prefetch(b->keys.set[i].data);
1019 
1020 	if (btree_node_io_error(b)) {
1021 		rw_unlock(write, b);
1022 		return ERR_PTR(-EIO);
1023 	}
1024 
1025 	BUG_ON(!b->written);
1026 
1027 	return b;
1028 }
1029 
btree_node_prefetch(struct btree * parent,struct bkey * k)1030 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1031 {
1032 	struct btree *b;
1033 
1034 	mutex_lock(&parent->c->bucket_lock);
1035 	b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1036 	mutex_unlock(&parent->c->bucket_lock);
1037 
1038 	if (!IS_ERR_OR_NULL(b)) {
1039 		b->parent = parent;
1040 		bch_btree_node_read(b);
1041 		rw_unlock(true, b);
1042 	}
1043 }
1044 
1045 /* Btree alloc */
1046 
btree_node_free(struct btree * b)1047 static void btree_node_free(struct btree *b)
1048 {
1049 	trace_bcache_btree_node_free(b);
1050 
1051 	BUG_ON(b == b->c->root);
1052 
1053 	mutex_lock(&b->write_lock);
1054 
1055 	if (btree_node_dirty(b))
1056 		btree_complete_write(b, btree_current_write(b));
1057 	clear_bit(BTREE_NODE_dirty, &b->flags);
1058 
1059 	mutex_unlock(&b->write_lock);
1060 
1061 	cancel_delayed_work(&b->work);
1062 
1063 	mutex_lock(&b->c->bucket_lock);
1064 	bch_bucket_free(b->c, &b->key);
1065 	mca_bucket_free(b);
1066 	mutex_unlock(&b->c->bucket_lock);
1067 }
1068 
__bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,bool wait,struct btree * parent)1069 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1070 				     int level, bool wait,
1071 				     struct btree *parent)
1072 {
1073 	BKEY_PADDED(key) k;
1074 	struct btree *b = ERR_PTR(-EAGAIN);
1075 
1076 	mutex_lock(&c->bucket_lock);
1077 retry:
1078 	if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1079 		goto err;
1080 
1081 	bkey_put(c, &k.key);
1082 	SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1083 
1084 	b = mca_alloc(c, op, &k.key, level);
1085 	if (IS_ERR(b))
1086 		goto err_free;
1087 
1088 	if (!b) {
1089 		cache_bug(c,
1090 			"Tried to allocate bucket that was in btree cache");
1091 		goto retry;
1092 	}
1093 
1094 	b->accessed = 1;
1095 	b->parent = parent;
1096 	bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1097 
1098 	mutex_unlock(&c->bucket_lock);
1099 
1100 	trace_bcache_btree_node_alloc(b);
1101 	return b;
1102 err_free:
1103 	bch_bucket_free(c, &k.key);
1104 err:
1105 	mutex_unlock(&c->bucket_lock);
1106 
1107 	trace_bcache_btree_node_alloc_fail(c);
1108 	return b;
1109 }
1110 
bch_btree_node_alloc(struct cache_set * c,struct btree_op * op,int level,struct btree * parent)1111 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1112 					  struct btree_op *op, int level,
1113 					  struct btree *parent)
1114 {
1115 	return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1116 }
1117 
btree_node_alloc_replacement(struct btree * b,struct btree_op * op)1118 static struct btree *btree_node_alloc_replacement(struct btree *b,
1119 						  struct btree_op *op)
1120 {
1121 	struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1122 	if (!IS_ERR_OR_NULL(n)) {
1123 		mutex_lock(&n->write_lock);
1124 		bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1125 		bkey_copy_key(&n->key, &b->key);
1126 		mutex_unlock(&n->write_lock);
1127 	}
1128 
1129 	return n;
1130 }
1131 
make_btree_freeing_key(struct btree * b,struct bkey * k)1132 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1133 {
1134 	unsigned i;
1135 
1136 	mutex_lock(&b->c->bucket_lock);
1137 
1138 	atomic_inc(&b->c->prio_blocked);
1139 
1140 	bkey_copy(k, &b->key);
1141 	bkey_copy_key(k, &ZERO_KEY);
1142 
1143 	for (i = 0; i < KEY_PTRS(k); i++)
1144 		SET_PTR_GEN(k, i,
1145 			    bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1146 					PTR_BUCKET(b->c, &b->key, i)));
1147 
1148 	mutex_unlock(&b->c->bucket_lock);
1149 }
1150 
btree_check_reserve(struct btree * b,struct btree_op * op)1151 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1152 {
1153 	struct cache_set *c = b->c;
1154 	struct cache *ca;
1155 	unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
1156 
1157 	mutex_lock(&c->bucket_lock);
1158 
1159 	for_each_cache(ca, c, i)
1160 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1161 			if (op)
1162 				prepare_to_wait(&c->btree_cache_wait, &op->wait,
1163 						TASK_UNINTERRUPTIBLE);
1164 			mutex_unlock(&c->bucket_lock);
1165 			return -EINTR;
1166 		}
1167 
1168 	mutex_unlock(&c->bucket_lock);
1169 
1170 	return mca_cannibalize_lock(b->c, op);
1171 }
1172 
1173 /* Garbage collection */
1174 
__bch_btree_mark_key(struct cache_set * c,int level,struct bkey * k)1175 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1176 				    struct bkey *k)
1177 {
1178 	uint8_t stale = 0;
1179 	unsigned i;
1180 	struct bucket *g;
1181 
1182 	/*
1183 	 * ptr_invalid() can't return true for the keys that mark btree nodes as
1184 	 * freed, but since ptr_bad() returns true we'll never actually use them
1185 	 * for anything and thus we don't want mark their pointers here
1186 	 */
1187 	if (!bkey_cmp(k, &ZERO_KEY))
1188 		return stale;
1189 
1190 	for (i = 0; i < KEY_PTRS(k); i++) {
1191 		if (!ptr_available(c, k, i))
1192 			continue;
1193 
1194 		g = PTR_BUCKET(c, k, i);
1195 
1196 		if (gen_after(g->last_gc, PTR_GEN(k, i)))
1197 			g->last_gc = PTR_GEN(k, i);
1198 
1199 		if (ptr_stale(c, k, i)) {
1200 			stale = max(stale, ptr_stale(c, k, i));
1201 			continue;
1202 		}
1203 
1204 		cache_bug_on(GC_MARK(g) &&
1205 			     (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1206 			     c, "inconsistent ptrs: mark = %llu, level = %i",
1207 			     GC_MARK(g), level);
1208 
1209 		if (level)
1210 			SET_GC_MARK(g, GC_MARK_METADATA);
1211 		else if (KEY_DIRTY(k))
1212 			SET_GC_MARK(g, GC_MARK_DIRTY);
1213 		else if (!GC_MARK(g))
1214 			SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1215 
1216 		/* guard against overflow */
1217 		SET_GC_SECTORS_USED(g, min_t(unsigned,
1218 					     GC_SECTORS_USED(g) + KEY_SIZE(k),
1219 					     MAX_GC_SECTORS_USED));
1220 
1221 		BUG_ON(!GC_SECTORS_USED(g));
1222 	}
1223 
1224 	return stale;
1225 }
1226 
1227 #define btree_mark_key(b, k)	__bch_btree_mark_key(b->c, b->level, k)
1228 
bch_initial_mark_key(struct cache_set * c,int level,struct bkey * k)1229 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1230 {
1231 	unsigned i;
1232 
1233 	for (i = 0; i < KEY_PTRS(k); i++)
1234 		if (ptr_available(c, k, i) &&
1235 		    !ptr_stale(c, k, i)) {
1236 			struct bucket *b = PTR_BUCKET(c, k, i);
1237 
1238 			b->gen = PTR_GEN(k, i);
1239 
1240 			if (level && bkey_cmp(k, &ZERO_KEY))
1241 				b->prio = BTREE_PRIO;
1242 			else if (!level && b->prio == BTREE_PRIO)
1243 				b->prio = INITIAL_PRIO;
1244 		}
1245 
1246 	__bch_btree_mark_key(c, level, k);
1247 }
1248 
btree_gc_mark_node(struct btree * b,struct gc_stat * gc)1249 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1250 {
1251 	uint8_t stale = 0;
1252 	unsigned keys = 0, good_keys = 0;
1253 	struct bkey *k;
1254 	struct btree_iter iter;
1255 	struct bset_tree *t;
1256 
1257 	gc->nodes++;
1258 
1259 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1260 		stale = max(stale, btree_mark_key(b, k));
1261 		keys++;
1262 
1263 		if (bch_ptr_bad(&b->keys, k))
1264 			continue;
1265 
1266 		gc->key_bytes += bkey_u64s(k);
1267 		gc->nkeys++;
1268 		good_keys++;
1269 
1270 		gc->data += KEY_SIZE(k);
1271 	}
1272 
1273 	for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1274 		btree_bug_on(t->size &&
1275 			     bset_written(&b->keys, t) &&
1276 			     bkey_cmp(&b->key, &t->end) < 0,
1277 			     b, "found short btree key in gc");
1278 
1279 	if (b->c->gc_always_rewrite)
1280 		return true;
1281 
1282 	if (stale > 10)
1283 		return true;
1284 
1285 	if ((keys - good_keys) * 2 > keys)
1286 		return true;
1287 
1288 	return false;
1289 }
1290 
1291 #define GC_MERGE_NODES	4U
1292 
1293 struct gc_merge_info {
1294 	struct btree	*b;
1295 	unsigned	keys;
1296 };
1297 
1298 static int bch_btree_insert_node(struct btree *, struct btree_op *,
1299 				 struct keylist *, atomic_t *, struct bkey *);
1300 
btree_gc_coalesce(struct btree * b,struct btree_op * op,struct gc_stat * gc,struct gc_merge_info * r)1301 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1302 			     struct gc_stat *gc, struct gc_merge_info *r)
1303 {
1304 	unsigned i, nodes = 0, keys = 0, blocks;
1305 	struct btree *new_nodes[GC_MERGE_NODES];
1306 	struct keylist keylist;
1307 	struct closure cl;
1308 	struct bkey *k;
1309 
1310 	bch_keylist_init(&keylist);
1311 
1312 	if (btree_check_reserve(b, NULL))
1313 		return 0;
1314 
1315 	memset(new_nodes, 0, sizeof(new_nodes));
1316 	closure_init_stack(&cl);
1317 
1318 	while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1319 		keys += r[nodes++].keys;
1320 
1321 	blocks = btree_default_blocks(b->c) * 2 / 3;
1322 
1323 	if (nodes < 2 ||
1324 	    __set_blocks(b->keys.set[0].data, keys,
1325 			 block_bytes(b->c)) > blocks * (nodes - 1))
1326 		return 0;
1327 
1328 	for (i = 0; i < nodes; i++) {
1329 		new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1330 		if (IS_ERR_OR_NULL(new_nodes[i]))
1331 			goto out_nocoalesce;
1332 	}
1333 
1334 	/*
1335 	 * We have to check the reserve here, after we've allocated our new
1336 	 * nodes, to make sure the insert below will succeed - we also check
1337 	 * before as an optimization to potentially avoid a bunch of expensive
1338 	 * allocs/sorts
1339 	 */
1340 	if (btree_check_reserve(b, NULL))
1341 		goto out_nocoalesce;
1342 
1343 	for (i = 0; i < nodes; i++)
1344 		mutex_lock(&new_nodes[i]->write_lock);
1345 
1346 	for (i = nodes - 1; i > 0; --i) {
1347 		struct bset *n1 = btree_bset_first(new_nodes[i]);
1348 		struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1349 		struct bkey *k, *last = NULL;
1350 
1351 		keys = 0;
1352 
1353 		if (i > 1) {
1354 			for (k = n2->start;
1355 			     k < bset_bkey_last(n2);
1356 			     k = bkey_next(k)) {
1357 				if (__set_blocks(n1, n1->keys + keys +
1358 						 bkey_u64s(k),
1359 						 block_bytes(b->c)) > blocks)
1360 					break;
1361 
1362 				last = k;
1363 				keys += bkey_u64s(k);
1364 			}
1365 		} else {
1366 			/*
1367 			 * Last node we're not getting rid of - we're getting
1368 			 * rid of the node at r[0]. Have to try and fit all of
1369 			 * the remaining keys into this node; we can't ensure
1370 			 * they will always fit due to rounding and variable
1371 			 * length keys (shouldn't be possible in practice,
1372 			 * though)
1373 			 */
1374 			if (__set_blocks(n1, n1->keys + n2->keys,
1375 					 block_bytes(b->c)) >
1376 			    btree_blocks(new_nodes[i]))
1377 				goto out_nocoalesce;
1378 
1379 			keys = n2->keys;
1380 			/* Take the key of the node we're getting rid of */
1381 			last = &r->b->key;
1382 		}
1383 
1384 		BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1385 		       btree_blocks(new_nodes[i]));
1386 
1387 		if (last)
1388 			bkey_copy_key(&new_nodes[i]->key, last);
1389 
1390 		memcpy(bset_bkey_last(n1),
1391 		       n2->start,
1392 		       (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1393 
1394 		n1->keys += keys;
1395 		r[i].keys = n1->keys;
1396 
1397 		memmove(n2->start,
1398 			bset_bkey_idx(n2, keys),
1399 			(void *) bset_bkey_last(n2) -
1400 			(void *) bset_bkey_idx(n2, keys));
1401 
1402 		n2->keys -= keys;
1403 
1404 		if (__bch_keylist_realloc(&keylist,
1405 					  bkey_u64s(&new_nodes[i]->key)))
1406 			goto out_nocoalesce;
1407 
1408 		bch_btree_node_write(new_nodes[i], &cl);
1409 		bch_keylist_add(&keylist, &new_nodes[i]->key);
1410 	}
1411 
1412 	for (i = 0; i < nodes; i++)
1413 		mutex_unlock(&new_nodes[i]->write_lock);
1414 
1415 	closure_sync(&cl);
1416 
1417 	/* We emptied out this node */
1418 	BUG_ON(btree_bset_first(new_nodes[0])->keys);
1419 	btree_node_free(new_nodes[0]);
1420 	rw_unlock(true, new_nodes[0]);
1421 	new_nodes[0] = NULL;
1422 
1423 	for (i = 0; i < nodes; i++) {
1424 		if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1425 			goto out_nocoalesce;
1426 
1427 		make_btree_freeing_key(r[i].b, keylist.top);
1428 		bch_keylist_push(&keylist);
1429 	}
1430 
1431 	bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1432 	BUG_ON(!bch_keylist_empty(&keylist));
1433 
1434 	for (i = 0; i < nodes; i++) {
1435 		btree_node_free(r[i].b);
1436 		rw_unlock(true, r[i].b);
1437 
1438 		r[i].b = new_nodes[i];
1439 	}
1440 
1441 	memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1442 	r[nodes - 1].b = ERR_PTR(-EINTR);
1443 
1444 	trace_bcache_btree_gc_coalesce(nodes);
1445 	gc->nodes--;
1446 
1447 	bch_keylist_free(&keylist);
1448 
1449 	/* Invalidated our iterator */
1450 	return -EINTR;
1451 
1452 out_nocoalesce:
1453 	closure_sync(&cl);
1454 	bch_keylist_free(&keylist);
1455 
1456 	while ((k = bch_keylist_pop(&keylist)))
1457 		if (!bkey_cmp(k, &ZERO_KEY))
1458 			atomic_dec(&b->c->prio_blocked);
1459 
1460 	for (i = 0; i < nodes; i++)
1461 		if (!IS_ERR_OR_NULL(new_nodes[i])) {
1462 			btree_node_free(new_nodes[i]);
1463 			rw_unlock(true, new_nodes[i]);
1464 		}
1465 	return 0;
1466 }
1467 
btree_gc_rewrite_node(struct btree * b,struct btree_op * op,struct btree * replace)1468 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1469 				 struct btree *replace)
1470 {
1471 	struct keylist keys;
1472 	struct btree *n;
1473 
1474 	if (btree_check_reserve(b, NULL))
1475 		return 0;
1476 
1477 	n = btree_node_alloc_replacement(replace, NULL);
1478 
1479 	/* recheck reserve after allocating replacement node */
1480 	if (btree_check_reserve(b, NULL)) {
1481 		btree_node_free(n);
1482 		rw_unlock(true, n);
1483 		return 0;
1484 	}
1485 
1486 	bch_btree_node_write_sync(n);
1487 
1488 	bch_keylist_init(&keys);
1489 	bch_keylist_add(&keys, &n->key);
1490 
1491 	make_btree_freeing_key(replace, keys.top);
1492 	bch_keylist_push(&keys);
1493 
1494 	bch_btree_insert_node(b, op, &keys, NULL, NULL);
1495 	BUG_ON(!bch_keylist_empty(&keys));
1496 
1497 	btree_node_free(replace);
1498 	rw_unlock(true, n);
1499 
1500 	/* Invalidated our iterator */
1501 	return -EINTR;
1502 }
1503 
btree_gc_count_keys(struct btree * b)1504 static unsigned btree_gc_count_keys(struct btree *b)
1505 {
1506 	struct bkey *k;
1507 	struct btree_iter iter;
1508 	unsigned ret = 0;
1509 
1510 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1511 		ret += bkey_u64s(k);
1512 
1513 	return ret;
1514 }
1515 
btree_gc_recurse(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1516 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1517 			    struct closure *writes, struct gc_stat *gc)
1518 {
1519 	int ret = 0;
1520 	bool should_rewrite;
1521 	struct bkey *k;
1522 	struct btree_iter iter;
1523 	struct gc_merge_info r[GC_MERGE_NODES];
1524 	struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1525 
1526 	bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1527 
1528 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1529 		i->b = ERR_PTR(-EINTR);
1530 
1531 	while (1) {
1532 		k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1533 		if (k) {
1534 			r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1535 						  true, b);
1536 			if (IS_ERR(r->b)) {
1537 				ret = PTR_ERR(r->b);
1538 				break;
1539 			}
1540 
1541 			r->keys = btree_gc_count_keys(r->b);
1542 
1543 			ret = btree_gc_coalesce(b, op, gc, r);
1544 			if (ret)
1545 				break;
1546 		}
1547 
1548 		if (!last->b)
1549 			break;
1550 
1551 		if (!IS_ERR(last->b)) {
1552 			should_rewrite = btree_gc_mark_node(last->b, gc);
1553 			if (should_rewrite) {
1554 				ret = btree_gc_rewrite_node(b, op, last->b);
1555 				if (ret)
1556 					break;
1557 			}
1558 
1559 			if (last->b->level) {
1560 				ret = btree_gc_recurse(last->b, op, writes, gc);
1561 				if (ret)
1562 					break;
1563 			}
1564 
1565 			bkey_copy_key(&b->c->gc_done, &last->b->key);
1566 
1567 			/*
1568 			 * Must flush leaf nodes before gc ends, since replace
1569 			 * operations aren't journalled
1570 			 */
1571 			mutex_lock(&last->b->write_lock);
1572 			if (btree_node_dirty(last->b))
1573 				bch_btree_node_write(last->b, writes);
1574 			mutex_unlock(&last->b->write_lock);
1575 			rw_unlock(true, last->b);
1576 		}
1577 
1578 		memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1579 		r->b = NULL;
1580 
1581 		if (need_resched()) {
1582 			ret = -EAGAIN;
1583 			break;
1584 		}
1585 	}
1586 
1587 	for (i = r; i < r + ARRAY_SIZE(r); i++)
1588 		if (!IS_ERR_OR_NULL(i->b)) {
1589 			mutex_lock(&i->b->write_lock);
1590 			if (btree_node_dirty(i->b))
1591 				bch_btree_node_write(i->b, writes);
1592 			mutex_unlock(&i->b->write_lock);
1593 			rw_unlock(true, i->b);
1594 		}
1595 
1596 	return ret;
1597 }
1598 
bch_btree_gc_root(struct btree * b,struct btree_op * op,struct closure * writes,struct gc_stat * gc)1599 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1600 			     struct closure *writes, struct gc_stat *gc)
1601 {
1602 	struct btree *n = NULL;
1603 	int ret = 0;
1604 	bool should_rewrite;
1605 
1606 	should_rewrite = btree_gc_mark_node(b, gc);
1607 	if (should_rewrite) {
1608 		n = btree_node_alloc_replacement(b, NULL);
1609 
1610 		if (!IS_ERR_OR_NULL(n)) {
1611 			bch_btree_node_write_sync(n);
1612 
1613 			bch_btree_set_root(n);
1614 			btree_node_free(b);
1615 			rw_unlock(true, n);
1616 
1617 			return -EINTR;
1618 		}
1619 	}
1620 
1621 	__bch_btree_mark_key(b->c, b->level + 1, &b->key);
1622 
1623 	if (b->level) {
1624 		ret = btree_gc_recurse(b, op, writes, gc);
1625 		if (ret)
1626 			return ret;
1627 	}
1628 
1629 	bkey_copy_key(&b->c->gc_done, &b->key);
1630 
1631 	return ret;
1632 }
1633 
btree_gc_start(struct cache_set * c)1634 static void btree_gc_start(struct cache_set *c)
1635 {
1636 	struct cache *ca;
1637 	struct bucket *b;
1638 	unsigned i;
1639 
1640 	if (!c->gc_mark_valid)
1641 		return;
1642 
1643 	mutex_lock(&c->bucket_lock);
1644 
1645 	c->gc_mark_valid = 0;
1646 	c->gc_done = ZERO_KEY;
1647 
1648 	for_each_cache(ca, c, i)
1649 		for_each_bucket(b, ca) {
1650 			b->last_gc = b->gen;
1651 			if (!atomic_read(&b->pin)) {
1652 				SET_GC_MARK(b, 0);
1653 				SET_GC_SECTORS_USED(b, 0);
1654 			}
1655 		}
1656 
1657 	mutex_unlock(&c->bucket_lock);
1658 }
1659 
bch_btree_gc_finish(struct cache_set * c)1660 static size_t bch_btree_gc_finish(struct cache_set *c)
1661 {
1662 	size_t available = 0;
1663 	struct bucket *b;
1664 	struct cache *ca;
1665 	unsigned i;
1666 
1667 	mutex_lock(&c->bucket_lock);
1668 
1669 	set_gc_sectors(c);
1670 	c->gc_mark_valid = 1;
1671 	c->need_gc	= 0;
1672 
1673 	for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1674 		SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1675 			    GC_MARK_METADATA);
1676 
1677 	/* don't reclaim buckets to which writeback keys point */
1678 	rcu_read_lock();
1679 	for (i = 0; i < c->nr_uuids; i++) {
1680 		struct bcache_device *d = c->devices[i];
1681 		struct cached_dev *dc;
1682 		struct keybuf_key *w, *n;
1683 		unsigned j;
1684 
1685 		if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1686 			continue;
1687 		dc = container_of(d, struct cached_dev, disk);
1688 
1689 		spin_lock(&dc->writeback_keys.lock);
1690 		rbtree_postorder_for_each_entry_safe(w, n,
1691 					&dc->writeback_keys.keys, node)
1692 			for (j = 0; j < KEY_PTRS(&w->key); j++)
1693 				SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1694 					    GC_MARK_DIRTY);
1695 		spin_unlock(&dc->writeback_keys.lock);
1696 	}
1697 	rcu_read_unlock();
1698 
1699 	for_each_cache(ca, c, i) {
1700 		uint64_t *i;
1701 
1702 		ca->invalidate_needs_gc = 0;
1703 
1704 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1705 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1706 
1707 		for (i = ca->prio_buckets;
1708 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1709 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1710 
1711 		for_each_bucket(b, ca) {
1712 			c->need_gc	= max(c->need_gc, bucket_gc_gen(b));
1713 
1714 			if (atomic_read(&b->pin))
1715 				continue;
1716 
1717 			BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1718 
1719 			if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1720 				available++;
1721 		}
1722 	}
1723 
1724 	mutex_unlock(&c->bucket_lock);
1725 	return available;
1726 }
1727 
bch_btree_gc(struct cache_set * c)1728 static void bch_btree_gc(struct cache_set *c)
1729 {
1730 	int ret;
1731 	unsigned long available;
1732 	struct gc_stat stats;
1733 	struct closure writes;
1734 	struct btree_op op;
1735 	uint64_t start_time = local_clock();
1736 
1737 	trace_bcache_gc_start(c);
1738 
1739 	memset(&stats, 0, sizeof(struct gc_stat));
1740 	closure_init_stack(&writes);
1741 	bch_btree_op_init(&op, SHRT_MAX);
1742 
1743 	btree_gc_start(c);
1744 
1745 	do {
1746 		ret = btree_root(gc_root, c, &op, &writes, &stats);
1747 		closure_sync(&writes);
1748 		cond_resched();
1749 
1750 		if (ret && ret != -EAGAIN)
1751 			pr_warn("gc failed!");
1752 	} while (ret);
1753 
1754 	available = bch_btree_gc_finish(c);
1755 	wake_up_allocators(c);
1756 
1757 	bch_time_stats_update(&c->btree_gc_time, start_time);
1758 
1759 	stats.key_bytes *= sizeof(uint64_t);
1760 	stats.data	<<= 9;
1761 	stats.in_use	= (c->nbuckets - available) * 100 / c->nbuckets;
1762 	memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1763 
1764 	trace_bcache_gc_end(c);
1765 
1766 	bch_moving_gc(c);
1767 }
1768 
gc_should_run(struct cache_set * c)1769 static bool gc_should_run(struct cache_set *c)
1770 {
1771 	struct cache *ca;
1772 	unsigned i;
1773 
1774 	for_each_cache(ca, c, i)
1775 		if (ca->invalidate_needs_gc)
1776 			return true;
1777 
1778 	if (atomic_read(&c->sectors_to_gc) < 0)
1779 		return true;
1780 
1781 	return false;
1782 }
1783 
bch_gc_thread(void * arg)1784 static int bch_gc_thread(void *arg)
1785 {
1786 	struct cache_set *c = arg;
1787 
1788 	while (1) {
1789 		wait_event_interruptible(c->gc_wait,
1790 			   kthread_should_stop() || gc_should_run(c));
1791 
1792 		if (kthread_should_stop())
1793 			break;
1794 
1795 		set_gc_sectors(c);
1796 		bch_btree_gc(c);
1797 	}
1798 
1799 	return 0;
1800 }
1801 
bch_gc_thread_start(struct cache_set * c)1802 int bch_gc_thread_start(struct cache_set *c)
1803 {
1804 	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1805 	if (IS_ERR(c->gc_thread))
1806 		return PTR_ERR(c->gc_thread);
1807 
1808 	return 0;
1809 }
1810 
1811 /* Initial partial gc */
1812 
bch_btree_check_recurse(struct btree * b,struct btree_op * op)1813 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1814 {
1815 	int ret = 0;
1816 	struct bkey *k, *p = NULL;
1817 	struct btree_iter iter;
1818 
1819 	for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1820 		bch_initial_mark_key(b->c, b->level, k);
1821 
1822 	bch_initial_mark_key(b->c, b->level + 1, &b->key);
1823 
1824 	if (b->level) {
1825 		bch_btree_iter_init(&b->keys, &iter, NULL);
1826 
1827 		do {
1828 			k = bch_btree_iter_next_filter(&iter, &b->keys,
1829 						       bch_ptr_bad);
1830 			if (k)
1831 				btree_node_prefetch(b, k);
1832 
1833 			if (p)
1834 				ret = btree(check_recurse, p, b, op);
1835 
1836 			p = k;
1837 		} while (p && !ret);
1838 	}
1839 
1840 	return ret;
1841 }
1842 
bch_btree_check(struct cache_set * c)1843 int bch_btree_check(struct cache_set *c)
1844 {
1845 	struct btree_op op;
1846 
1847 	bch_btree_op_init(&op, SHRT_MAX);
1848 
1849 	return btree_root(check_recurse, c, &op);
1850 }
1851 
bch_initial_gc_finish(struct cache_set * c)1852 void bch_initial_gc_finish(struct cache_set *c)
1853 {
1854 	struct cache *ca;
1855 	struct bucket *b;
1856 	unsigned i;
1857 
1858 	bch_btree_gc_finish(c);
1859 
1860 	mutex_lock(&c->bucket_lock);
1861 
1862 	/*
1863 	 * We need to put some unused buckets directly on the prio freelist in
1864 	 * order to get the allocator thread started - it needs freed buckets in
1865 	 * order to rewrite the prios and gens, and it needs to rewrite prios
1866 	 * and gens in order to free buckets.
1867 	 *
1868 	 * This is only safe for buckets that have no live data in them, which
1869 	 * there should always be some of.
1870 	 */
1871 	for_each_cache(ca, c, i) {
1872 		for_each_bucket(b, ca) {
1873 			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
1874 			    fifo_full(&ca->free[RESERVE_BTREE]))
1875 				break;
1876 
1877 			if (bch_can_invalidate_bucket(ca, b) &&
1878 			    !GC_MARK(b)) {
1879 				__bch_invalidate_one_bucket(ca, b);
1880 				if (!fifo_push(&ca->free[RESERVE_PRIO],
1881 				   b - ca->buckets))
1882 					fifo_push(&ca->free[RESERVE_BTREE],
1883 						  b - ca->buckets);
1884 			}
1885 		}
1886 	}
1887 
1888 	mutex_unlock(&c->bucket_lock);
1889 }
1890 
1891 /* Btree insertion */
1892 
btree_insert_key(struct btree * b,struct bkey * k,struct bkey * replace_key)1893 static bool btree_insert_key(struct btree *b, struct bkey *k,
1894 			     struct bkey *replace_key)
1895 {
1896 	unsigned status;
1897 
1898 	BUG_ON(bkey_cmp(k, &b->key) > 0);
1899 
1900 	status = bch_btree_insert_key(&b->keys, k, replace_key);
1901 	if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1902 		bch_check_keys(&b->keys, "%u for %s", status,
1903 			       replace_key ? "replace" : "insert");
1904 
1905 		trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1906 					      status);
1907 		return true;
1908 	} else
1909 		return false;
1910 }
1911 
insert_u64s_remaining(struct btree * b)1912 static size_t insert_u64s_remaining(struct btree *b)
1913 {
1914 	long ret = bch_btree_keys_u64s_remaining(&b->keys);
1915 
1916 	/*
1917 	 * Might land in the middle of an existing extent and have to split it
1918 	 */
1919 	if (b->keys.ops->is_extents)
1920 		ret -= KEY_MAX_U64S;
1921 
1922 	return max(ret, 0L);
1923 }
1924 
bch_btree_insert_keys(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)1925 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1926 				  struct keylist *insert_keys,
1927 				  struct bkey *replace_key)
1928 {
1929 	bool ret = false;
1930 	int oldsize = bch_count_data(&b->keys);
1931 
1932 	while (!bch_keylist_empty(insert_keys)) {
1933 		struct bkey *k = insert_keys->keys;
1934 
1935 		if (bkey_u64s(k) > insert_u64s_remaining(b))
1936 			break;
1937 
1938 		if (bkey_cmp(k, &b->key) <= 0) {
1939 			if (!b->level)
1940 				bkey_put(b->c, k);
1941 
1942 			ret |= btree_insert_key(b, k, replace_key);
1943 			bch_keylist_pop_front(insert_keys);
1944 		} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
1945 			BKEY_PADDED(key) temp;
1946 			bkey_copy(&temp.key, insert_keys->keys);
1947 
1948 			bch_cut_back(&b->key, &temp.key);
1949 			bch_cut_front(&b->key, insert_keys->keys);
1950 
1951 			ret |= btree_insert_key(b, &temp.key, replace_key);
1952 			break;
1953 		} else {
1954 			break;
1955 		}
1956 	}
1957 
1958 	if (!ret)
1959 		op->insert_collision = true;
1960 
1961 	BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1962 
1963 	BUG_ON(bch_count_data(&b->keys) < oldsize);
1964 	return ret;
1965 }
1966 
btree_split(struct btree * b,struct btree_op * op,struct keylist * insert_keys,struct bkey * replace_key)1967 static int btree_split(struct btree *b, struct btree_op *op,
1968 		       struct keylist *insert_keys,
1969 		       struct bkey *replace_key)
1970 {
1971 	bool split;
1972 	struct btree *n1, *n2 = NULL, *n3 = NULL;
1973 	uint64_t start_time = local_clock();
1974 	struct closure cl;
1975 	struct keylist parent_keys;
1976 
1977 	closure_init_stack(&cl);
1978 	bch_keylist_init(&parent_keys);
1979 
1980 	if (btree_check_reserve(b, op)) {
1981 		if (!b->level)
1982 			return -EINTR;
1983 		else
1984 			WARN(1, "insufficient reserve for split\n");
1985 	}
1986 
1987 	n1 = btree_node_alloc_replacement(b, op);
1988 	if (IS_ERR(n1))
1989 		goto err;
1990 
1991 	split = set_blocks(btree_bset_first(n1),
1992 			   block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
1993 
1994 	if (split) {
1995 		unsigned keys = 0;
1996 
1997 		trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
1998 
1999 		n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2000 		if (IS_ERR(n2))
2001 			goto err_free1;
2002 
2003 		if (!b->parent) {
2004 			n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2005 			if (IS_ERR(n3))
2006 				goto err_free2;
2007 		}
2008 
2009 		mutex_lock(&n1->write_lock);
2010 		mutex_lock(&n2->write_lock);
2011 
2012 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2013 
2014 		/*
2015 		 * Has to be a linear search because we don't have an auxiliary
2016 		 * search tree yet
2017 		 */
2018 
2019 		while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2020 			keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2021 							keys));
2022 
2023 		bkey_copy_key(&n1->key,
2024 			      bset_bkey_idx(btree_bset_first(n1), keys));
2025 		keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2026 
2027 		btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2028 		btree_bset_first(n1)->keys = keys;
2029 
2030 		memcpy(btree_bset_first(n2)->start,
2031 		       bset_bkey_last(btree_bset_first(n1)),
2032 		       btree_bset_first(n2)->keys * sizeof(uint64_t));
2033 
2034 		bkey_copy_key(&n2->key, &b->key);
2035 
2036 		bch_keylist_add(&parent_keys, &n2->key);
2037 		bch_btree_node_write(n2, &cl);
2038 		mutex_unlock(&n2->write_lock);
2039 		rw_unlock(true, n2);
2040 	} else {
2041 		trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2042 
2043 		mutex_lock(&n1->write_lock);
2044 		bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2045 	}
2046 
2047 	bch_keylist_add(&parent_keys, &n1->key);
2048 	bch_btree_node_write(n1, &cl);
2049 	mutex_unlock(&n1->write_lock);
2050 
2051 	if (n3) {
2052 		/* Depth increases, make a new root */
2053 		mutex_lock(&n3->write_lock);
2054 		bkey_copy_key(&n3->key, &MAX_KEY);
2055 		bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2056 		bch_btree_node_write(n3, &cl);
2057 		mutex_unlock(&n3->write_lock);
2058 
2059 		closure_sync(&cl);
2060 		bch_btree_set_root(n3);
2061 		rw_unlock(true, n3);
2062 	} else if (!b->parent) {
2063 		/* Root filled up but didn't need to be split */
2064 		closure_sync(&cl);
2065 		bch_btree_set_root(n1);
2066 	} else {
2067 		/* Split a non root node */
2068 		closure_sync(&cl);
2069 		make_btree_freeing_key(b, parent_keys.top);
2070 		bch_keylist_push(&parent_keys);
2071 
2072 		bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2073 		BUG_ON(!bch_keylist_empty(&parent_keys));
2074 	}
2075 
2076 	btree_node_free(b);
2077 	rw_unlock(true, n1);
2078 
2079 	bch_time_stats_update(&b->c->btree_split_time, start_time);
2080 
2081 	return 0;
2082 err_free2:
2083 	bkey_put(b->c, &n2->key);
2084 	btree_node_free(n2);
2085 	rw_unlock(true, n2);
2086 err_free1:
2087 	bkey_put(b->c, &n1->key);
2088 	btree_node_free(n1);
2089 	rw_unlock(true, n1);
2090 err:
2091 	WARN(1, "bcache: btree split failed (level %u)", b->level);
2092 
2093 	if (n3 == ERR_PTR(-EAGAIN) ||
2094 	    n2 == ERR_PTR(-EAGAIN) ||
2095 	    n1 == ERR_PTR(-EAGAIN))
2096 		return -EAGAIN;
2097 
2098 	return -ENOMEM;
2099 }
2100 
bch_btree_insert_node(struct btree * b,struct btree_op * op,struct keylist * insert_keys,atomic_t * journal_ref,struct bkey * replace_key)2101 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2102 				 struct keylist *insert_keys,
2103 				 atomic_t *journal_ref,
2104 				 struct bkey *replace_key)
2105 {
2106 	struct closure cl;
2107 
2108 	BUG_ON(b->level && replace_key);
2109 
2110 	closure_init_stack(&cl);
2111 
2112 	mutex_lock(&b->write_lock);
2113 
2114 	if (write_block(b) != btree_bset_last(b) &&
2115 	    b->keys.last_set_unwritten)
2116 		bch_btree_init_next(b); /* just wrote a set */
2117 
2118 	if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2119 		mutex_unlock(&b->write_lock);
2120 		goto split;
2121 	}
2122 
2123 	BUG_ON(write_block(b) != btree_bset_last(b));
2124 
2125 	if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2126 		if (!b->level)
2127 			bch_btree_leaf_dirty(b, journal_ref);
2128 		else
2129 			bch_btree_node_write(b, &cl);
2130 	}
2131 
2132 	mutex_unlock(&b->write_lock);
2133 
2134 	/* wait for btree node write if necessary, after unlock */
2135 	closure_sync(&cl);
2136 
2137 	return 0;
2138 split:
2139 	if (current->bio_list) {
2140 		op->lock = b->c->root->level + 1;
2141 		return -EAGAIN;
2142 	} else if (op->lock <= b->c->root->level) {
2143 		op->lock = b->c->root->level + 1;
2144 		return -EINTR;
2145 	} else {
2146 		/* Invalidated all iterators */
2147 		int ret = btree_split(b, op, insert_keys, replace_key);
2148 
2149 		if (bch_keylist_empty(insert_keys))
2150 			return 0;
2151 		else if (!ret)
2152 			return -EINTR;
2153 		return ret;
2154 	}
2155 }
2156 
bch_btree_insert_check_key(struct btree * b,struct btree_op * op,struct bkey * check_key)2157 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2158 			       struct bkey *check_key)
2159 {
2160 	int ret = -EINTR;
2161 	uint64_t btree_ptr = b->key.ptr[0];
2162 	unsigned long seq = b->seq;
2163 	struct keylist insert;
2164 	bool upgrade = op->lock == -1;
2165 
2166 	bch_keylist_init(&insert);
2167 
2168 	if (upgrade) {
2169 		rw_unlock(false, b);
2170 		rw_lock(true, b, b->level);
2171 
2172 		if (b->key.ptr[0] != btree_ptr ||
2173                    b->seq != seq + 1) {
2174                        op->lock = b->level;
2175 			goto out;
2176                }
2177 	}
2178 
2179 	SET_KEY_PTRS(check_key, 1);
2180 	get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2181 
2182 	SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2183 
2184 	bch_keylist_add(&insert, check_key);
2185 
2186 	ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2187 
2188 	BUG_ON(!ret && !bch_keylist_empty(&insert));
2189 out:
2190 	if (upgrade)
2191 		downgrade_write(&b->lock);
2192 	return ret;
2193 }
2194 
2195 struct btree_insert_op {
2196 	struct btree_op	op;
2197 	struct keylist	*keys;
2198 	atomic_t	*journal_ref;
2199 	struct bkey	*replace_key;
2200 };
2201 
btree_insert_fn(struct btree_op * b_op,struct btree * b)2202 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2203 {
2204 	struct btree_insert_op *op = container_of(b_op,
2205 					struct btree_insert_op, op);
2206 
2207 	int ret = bch_btree_insert_node(b, &op->op, op->keys,
2208 					op->journal_ref, op->replace_key);
2209 	if (ret && !bch_keylist_empty(op->keys))
2210 		return ret;
2211 	else
2212 		return MAP_DONE;
2213 }
2214 
bch_btree_insert(struct cache_set * c,struct keylist * keys,atomic_t * journal_ref,struct bkey * replace_key)2215 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2216 		     atomic_t *journal_ref, struct bkey *replace_key)
2217 {
2218 	struct btree_insert_op op;
2219 	int ret = 0;
2220 
2221 	BUG_ON(current->bio_list);
2222 	BUG_ON(bch_keylist_empty(keys));
2223 
2224 	bch_btree_op_init(&op.op, 0);
2225 	op.keys		= keys;
2226 	op.journal_ref	= journal_ref;
2227 	op.replace_key	= replace_key;
2228 
2229 	while (!ret && !bch_keylist_empty(keys)) {
2230 		op.op.lock = 0;
2231 		ret = bch_btree_map_leaf_nodes(&op.op, c,
2232 					       &START_KEY(keys->keys),
2233 					       btree_insert_fn);
2234 	}
2235 
2236 	if (ret) {
2237 		struct bkey *k;
2238 
2239 		pr_err("error %i", ret);
2240 
2241 		while ((k = bch_keylist_pop(keys)))
2242 			bkey_put(c, k);
2243 	} else if (op.op.insert_collision)
2244 		ret = -ESRCH;
2245 
2246 	return ret;
2247 }
2248 
bch_btree_set_root(struct btree * b)2249 void bch_btree_set_root(struct btree *b)
2250 {
2251 	unsigned i;
2252 	struct closure cl;
2253 
2254 	closure_init_stack(&cl);
2255 
2256 	trace_bcache_btree_set_root(b);
2257 
2258 	BUG_ON(!b->written);
2259 
2260 	for (i = 0; i < KEY_PTRS(&b->key); i++)
2261 		BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2262 
2263 	mutex_lock(&b->c->bucket_lock);
2264 	list_del_init(&b->list);
2265 	mutex_unlock(&b->c->bucket_lock);
2266 
2267 	b->c->root = b;
2268 
2269 	bch_journal_meta(b->c, &cl);
2270 	closure_sync(&cl);
2271 }
2272 
2273 /* Map across nodes or keys */
2274 
bch_btree_map_nodes_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_nodes_fn * fn,int flags)2275 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2276 				       struct bkey *from,
2277 				       btree_map_nodes_fn *fn, int flags)
2278 {
2279 	int ret = MAP_CONTINUE;
2280 
2281 	if (b->level) {
2282 		struct bkey *k;
2283 		struct btree_iter iter;
2284 
2285 		bch_btree_iter_init(&b->keys, &iter, from);
2286 
2287 		while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2288 						       bch_ptr_bad))) {
2289 			ret = btree(map_nodes_recurse, k, b,
2290 				    op, from, fn, flags);
2291 			from = NULL;
2292 
2293 			if (ret != MAP_CONTINUE)
2294 				return ret;
2295 		}
2296 	}
2297 
2298 	if (!b->level || flags == MAP_ALL_NODES)
2299 		ret = fn(op, b);
2300 
2301 	return ret;
2302 }
2303 
__bch_btree_map_nodes(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_nodes_fn * fn,int flags)2304 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2305 			  struct bkey *from, btree_map_nodes_fn *fn, int flags)
2306 {
2307 	return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2308 }
2309 
bch_btree_map_keys_recurse(struct btree * b,struct btree_op * op,struct bkey * from,btree_map_keys_fn * fn,int flags)2310 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2311 				      struct bkey *from, btree_map_keys_fn *fn,
2312 				      int flags)
2313 {
2314 	int ret = MAP_CONTINUE;
2315 	struct bkey *k;
2316 	struct btree_iter iter;
2317 
2318 	bch_btree_iter_init(&b->keys, &iter, from);
2319 
2320 	while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2321 		ret = !b->level
2322 			? fn(op, b, k)
2323 			: btree(map_keys_recurse, k, b, op, from, fn, flags);
2324 		from = NULL;
2325 
2326 		if (ret != MAP_CONTINUE)
2327 			return ret;
2328 	}
2329 
2330 	if (!b->level && (flags & MAP_END_KEY))
2331 		ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2332 				     KEY_OFFSET(&b->key), 0));
2333 
2334 	return ret;
2335 }
2336 
bch_btree_map_keys(struct btree_op * op,struct cache_set * c,struct bkey * from,btree_map_keys_fn * fn,int flags)2337 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2338 		       struct bkey *from, btree_map_keys_fn *fn, int flags)
2339 {
2340 	return btree_root(map_keys_recurse, c, op, from, fn, flags);
2341 }
2342 
2343 /* Keybuf code */
2344 
keybuf_cmp(struct keybuf_key * l,struct keybuf_key * r)2345 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2346 {
2347 	/* Overlapping keys compare equal */
2348 	if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2349 		return -1;
2350 	if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2351 		return 1;
2352 	return 0;
2353 }
2354 
keybuf_nonoverlapping_cmp(struct keybuf_key * l,struct keybuf_key * r)2355 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2356 					    struct keybuf_key *r)
2357 {
2358 	return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2359 }
2360 
2361 struct refill {
2362 	struct btree_op	op;
2363 	unsigned	nr_found;
2364 	struct keybuf	*buf;
2365 	struct bkey	*end;
2366 	keybuf_pred_fn	*pred;
2367 };
2368 
refill_keybuf_fn(struct btree_op * op,struct btree * b,struct bkey * k)2369 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2370 			    struct bkey *k)
2371 {
2372 	struct refill *refill = container_of(op, struct refill, op);
2373 	struct keybuf *buf = refill->buf;
2374 	int ret = MAP_CONTINUE;
2375 
2376 	if (bkey_cmp(k, refill->end) > 0) {
2377 		ret = MAP_DONE;
2378 		goto out;
2379 	}
2380 
2381 	if (!KEY_SIZE(k)) /* end key */
2382 		goto out;
2383 
2384 	if (refill->pred(buf, k)) {
2385 		struct keybuf_key *w;
2386 
2387 		spin_lock(&buf->lock);
2388 
2389 		w = array_alloc(&buf->freelist);
2390 		if (!w) {
2391 			spin_unlock(&buf->lock);
2392 			return MAP_DONE;
2393 		}
2394 
2395 		w->private = NULL;
2396 		bkey_copy(&w->key, k);
2397 
2398 		if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2399 			array_free(&buf->freelist, w);
2400 		else
2401 			refill->nr_found++;
2402 
2403 		if (array_freelist_empty(&buf->freelist))
2404 			ret = MAP_DONE;
2405 
2406 		spin_unlock(&buf->lock);
2407 	}
2408 out:
2409 	buf->last_scanned = *k;
2410 	return ret;
2411 }
2412 
bch_refill_keybuf(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2413 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2414 		       struct bkey *end, keybuf_pred_fn *pred)
2415 {
2416 	struct bkey start = buf->last_scanned;
2417 	struct refill refill;
2418 
2419 	cond_resched();
2420 
2421 	bch_btree_op_init(&refill.op, -1);
2422 	refill.nr_found	= 0;
2423 	refill.buf	= buf;
2424 	refill.end	= end;
2425 	refill.pred	= pred;
2426 
2427 	bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2428 			   refill_keybuf_fn, MAP_END_KEY);
2429 
2430 	trace_bcache_keyscan(refill.nr_found,
2431 			     KEY_INODE(&start), KEY_OFFSET(&start),
2432 			     KEY_INODE(&buf->last_scanned),
2433 			     KEY_OFFSET(&buf->last_scanned));
2434 
2435 	spin_lock(&buf->lock);
2436 
2437 	if (!RB_EMPTY_ROOT(&buf->keys)) {
2438 		struct keybuf_key *w;
2439 		w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2440 		buf->start	= START_KEY(&w->key);
2441 
2442 		w = RB_LAST(&buf->keys, struct keybuf_key, node);
2443 		buf->end	= w->key;
2444 	} else {
2445 		buf->start	= MAX_KEY;
2446 		buf->end	= MAX_KEY;
2447 	}
2448 
2449 	spin_unlock(&buf->lock);
2450 }
2451 
__bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2452 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2453 {
2454 	rb_erase(&w->node, &buf->keys);
2455 	array_free(&buf->freelist, w);
2456 }
2457 
bch_keybuf_del(struct keybuf * buf,struct keybuf_key * w)2458 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2459 {
2460 	spin_lock(&buf->lock);
2461 	__bch_keybuf_del(buf, w);
2462 	spin_unlock(&buf->lock);
2463 }
2464 
bch_keybuf_check_overlapping(struct keybuf * buf,struct bkey * start,struct bkey * end)2465 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2466 				  struct bkey *end)
2467 {
2468 	bool ret = false;
2469 	struct keybuf_key *p, *w, s;
2470 	s.key = *start;
2471 
2472 	if (bkey_cmp(end, &buf->start) <= 0 ||
2473 	    bkey_cmp(start, &buf->end) >= 0)
2474 		return false;
2475 
2476 	spin_lock(&buf->lock);
2477 	w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2478 
2479 	while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2480 		p = w;
2481 		w = RB_NEXT(w, node);
2482 
2483 		if (p->private)
2484 			ret = true;
2485 		else
2486 			__bch_keybuf_del(buf, p);
2487 	}
2488 
2489 	spin_unlock(&buf->lock);
2490 	return ret;
2491 }
2492 
bch_keybuf_next(struct keybuf * buf)2493 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2494 {
2495 	struct keybuf_key *w;
2496 	spin_lock(&buf->lock);
2497 
2498 	w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2499 
2500 	while (w && w->private)
2501 		w = RB_NEXT(w, node);
2502 
2503 	if (w)
2504 		w->private = ERR_PTR(-EINTR);
2505 
2506 	spin_unlock(&buf->lock);
2507 	return w;
2508 }
2509 
bch_keybuf_next_rescan(struct cache_set * c,struct keybuf * buf,struct bkey * end,keybuf_pred_fn * pred)2510 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2511 					  struct keybuf *buf,
2512 					  struct bkey *end,
2513 					  keybuf_pred_fn *pred)
2514 {
2515 	struct keybuf_key *ret;
2516 
2517 	while (1) {
2518 		ret = bch_keybuf_next(buf);
2519 		if (ret)
2520 			break;
2521 
2522 		if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2523 			pr_debug("scan finished");
2524 			break;
2525 		}
2526 
2527 		bch_refill_keybuf(c, buf, end, pred);
2528 	}
2529 
2530 	return ret;
2531 }
2532 
bch_keybuf_init(struct keybuf * buf)2533 void bch_keybuf_init(struct keybuf *buf)
2534 {
2535 	buf->last_scanned	= MAX_KEY;
2536 	buf->keys		= RB_ROOT;
2537 
2538 	spin_lock_init(&buf->lock);
2539 	array_allocator_init(&buf->freelist);
2540 }
2541