• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Main bcache entry point - handle a read or a write request and decide what to
3  * do with it; the make_request functions are called by the block layer.
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include "bcache.h"
10 #include "btree.h"
11 #include "debug.h"
12 #include "request.h"
13 #include "writeback.h"
14 
15 #include <linux/module.h>
16 #include <linux/hash.h>
17 #include <linux/random.h>
18 
19 #include <trace/events/bcache.h>
20 
21 #define CUTOFF_CACHE_ADD	95
22 #define CUTOFF_CACHE_READA	90
23 
24 struct kmem_cache *bch_search_cache;
25 
26 static void bch_data_insert_start(struct closure *);
27 
cache_mode(struct cached_dev * dc,struct bio * bio)28 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
29 {
30 	return BDEV_CACHE_MODE(&dc->sb);
31 }
32 
verify(struct cached_dev * dc,struct bio * bio)33 static bool verify(struct cached_dev *dc, struct bio *bio)
34 {
35 	return dc->verify;
36 }
37 
bio_csum(struct bio * bio,struct bkey * k)38 static void bio_csum(struct bio *bio, struct bkey *k)
39 {
40 	struct bio_vec bv;
41 	struct bvec_iter iter;
42 	uint64_t csum = 0;
43 
44 	bio_for_each_segment(bv, bio, iter) {
45 		void *d = kmap(bv.bv_page) + bv.bv_offset;
46 		csum = bch_crc64_update(csum, d, bv.bv_len);
47 		kunmap(bv.bv_page);
48 	}
49 
50 	k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
51 }
52 
53 /* Insert data into cache */
54 
bch_data_insert_keys(struct closure * cl)55 static void bch_data_insert_keys(struct closure *cl)
56 {
57 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
58 	atomic_t *journal_ref = NULL;
59 	struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
60 	int ret;
61 
62 	/*
63 	 * If we're looping, might already be waiting on
64 	 * another journal write - can't wait on more than one journal write at
65 	 * a time
66 	 *
67 	 * XXX: this looks wrong
68 	 */
69 #if 0
70 	while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
71 		closure_sync(&s->cl);
72 #endif
73 
74 	if (!op->replace)
75 		journal_ref = bch_journal(op->c, &op->insert_keys,
76 					  op->flush_journal ? cl : NULL);
77 
78 	ret = bch_btree_insert(op->c, &op->insert_keys,
79 			       journal_ref, replace_key);
80 	if (ret == -ESRCH) {
81 		op->replace_collision = true;
82 	} else if (ret) {
83 		op->error		= -ENOMEM;
84 		op->insert_data_done	= true;
85 	}
86 
87 	if (journal_ref)
88 		atomic_dec_bug(journal_ref);
89 
90 	if (!op->insert_data_done)
91 		continue_at(cl, bch_data_insert_start, op->wq);
92 
93 	bch_keylist_free(&op->insert_keys);
94 	closure_return(cl);
95 }
96 
bch_keylist_realloc(struct keylist * l,unsigned u64s,struct cache_set * c)97 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
98 			       struct cache_set *c)
99 {
100 	size_t oldsize = bch_keylist_nkeys(l);
101 	size_t newsize = oldsize + u64s;
102 
103 	/*
104 	 * The journalling code doesn't handle the case where the keys to insert
105 	 * is bigger than an empty write: If we just return -ENOMEM here,
106 	 * bio_insert() and bio_invalidate() will insert the keys created so far
107 	 * and finish the rest when the keylist is empty.
108 	 */
109 	if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
110 		return -ENOMEM;
111 
112 	return __bch_keylist_realloc(l, u64s);
113 }
114 
bch_data_invalidate(struct closure * cl)115 static void bch_data_invalidate(struct closure *cl)
116 {
117 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
118 	struct bio *bio = op->bio;
119 
120 	pr_debug("invalidating %i sectors from %llu",
121 		 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
122 
123 	while (bio_sectors(bio)) {
124 		unsigned sectors = min(bio_sectors(bio),
125 				       1U << (KEY_SIZE_BITS - 1));
126 
127 		if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
128 			goto out;
129 
130 		bio->bi_iter.bi_sector	+= sectors;
131 		bio->bi_iter.bi_size	-= sectors << 9;
132 
133 		bch_keylist_add(&op->insert_keys,
134 				&KEY(op->inode, bio->bi_iter.bi_sector, sectors));
135 	}
136 
137 	op->insert_data_done = true;
138 	bio_put(bio);
139 out:
140 	continue_at(cl, bch_data_insert_keys, op->wq);
141 }
142 
bch_data_insert_error(struct closure * cl)143 static void bch_data_insert_error(struct closure *cl)
144 {
145 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
146 
147 	/*
148 	 * Our data write just errored, which means we've got a bunch of keys to
149 	 * insert that point to data that wasn't succesfully written.
150 	 *
151 	 * We don't have to insert those keys but we still have to invalidate
152 	 * that region of the cache - so, if we just strip off all the pointers
153 	 * from the keys we'll accomplish just that.
154 	 */
155 
156 	struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
157 
158 	while (src != op->insert_keys.top) {
159 		struct bkey *n = bkey_next(src);
160 
161 		SET_KEY_PTRS(src, 0);
162 		memmove(dst, src, bkey_bytes(src));
163 
164 		dst = bkey_next(dst);
165 		src = n;
166 	}
167 
168 	op->insert_keys.top = dst;
169 
170 	bch_data_insert_keys(cl);
171 }
172 
bch_data_insert_endio(struct bio * bio,int error)173 static void bch_data_insert_endio(struct bio *bio, int error)
174 {
175 	struct closure *cl = bio->bi_private;
176 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
177 
178 	if (error) {
179 		/* TODO: We could try to recover from this. */
180 		if (op->writeback)
181 			op->error = error;
182 		else if (!op->replace)
183 			set_closure_fn(cl, bch_data_insert_error, op->wq);
184 		else
185 			set_closure_fn(cl, NULL, NULL);
186 	}
187 
188 	bch_bbio_endio(op->c, bio, error, "writing data to cache");
189 }
190 
bch_data_insert_start(struct closure * cl)191 static void bch_data_insert_start(struct closure *cl)
192 {
193 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
194 	struct bio *bio = op->bio, *n;
195 
196 	if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
197 		set_gc_sectors(op->c);
198 		wake_up_gc(op->c);
199 	}
200 
201 	if (op->bypass)
202 		return bch_data_invalidate(cl);
203 
204 	/*
205 	 * Journal writes are marked REQ_FLUSH; if the original write was a
206 	 * flush, it'll wait on the journal write.
207 	 */
208 	bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA);
209 
210 	do {
211 		unsigned i;
212 		struct bkey *k;
213 		struct bio_set *split = op->c->bio_split;
214 
215 		/* 1 for the device pointer and 1 for the chksum */
216 		if (bch_keylist_realloc(&op->insert_keys,
217 					3 + (op->csum ? 1 : 0),
218 					op->c))
219 			continue_at(cl, bch_data_insert_keys, op->wq);
220 
221 		k = op->insert_keys.top;
222 		bkey_init(k);
223 		SET_KEY_INODE(k, op->inode);
224 		SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
225 
226 		if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
227 				       op->write_point, op->write_prio,
228 				       op->writeback))
229 			goto err;
230 
231 		n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
232 
233 		n->bi_end_io	= bch_data_insert_endio;
234 		n->bi_private	= cl;
235 
236 		if (op->writeback) {
237 			SET_KEY_DIRTY(k, true);
238 
239 			for (i = 0; i < KEY_PTRS(k); i++)
240 				SET_GC_MARK(PTR_BUCKET(op->c, k, i),
241 					    GC_MARK_DIRTY);
242 		}
243 
244 		SET_KEY_CSUM(k, op->csum);
245 		if (KEY_CSUM(k))
246 			bio_csum(n, k);
247 
248 		trace_bcache_cache_insert(k);
249 		bch_keylist_push(&op->insert_keys);
250 
251 		n->bi_rw |= REQ_WRITE;
252 		bch_submit_bbio(n, op->c, k, 0);
253 	} while (n != bio);
254 
255 	op->insert_data_done = true;
256 	continue_at(cl, bch_data_insert_keys, op->wq);
257 err:
258 	/* bch_alloc_sectors() blocks if s->writeback = true */
259 	BUG_ON(op->writeback);
260 
261 	/*
262 	 * But if it's not a writeback write we'd rather just bail out if
263 	 * there aren't any buckets ready to write to - it might take awhile and
264 	 * we might be starving btree writes for gc or something.
265 	 */
266 
267 	if (!op->replace) {
268 		/*
269 		 * Writethrough write: We can't complete the write until we've
270 		 * updated the index. But we don't want to delay the write while
271 		 * we wait for buckets to be freed up, so just invalidate the
272 		 * rest of the write.
273 		 */
274 		op->bypass = true;
275 		return bch_data_invalidate(cl);
276 	} else {
277 		/*
278 		 * From a cache miss, we can just insert the keys for the data
279 		 * we have written or bail out if we didn't do anything.
280 		 */
281 		op->insert_data_done = true;
282 		bio_put(bio);
283 
284 		if (!bch_keylist_empty(&op->insert_keys))
285 			continue_at(cl, bch_data_insert_keys, op->wq);
286 		else
287 			closure_return(cl);
288 	}
289 }
290 
291 /**
292  * bch_data_insert - stick some data in the cache
293  *
294  * This is the starting point for any data to end up in a cache device; it could
295  * be from a normal write, or a writeback write, or a write to a flash only
296  * volume - it's also used by the moving garbage collector to compact data in
297  * mostly empty buckets.
298  *
299  * It first writes the data to the cache, creating a list of keys to be inserted
300  * (if the data had to be fragmented there will be multiple keys); after the
301  * data is written it calls bch_journal, and after the keys have been added to
302  * the next journal write they're inserted into the btree.
303  *
304  * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
305  * and op->inode is used for the key inode.
306  *
307  * If s->bypass is true, instead of inserting the data it invalidates the
308  * region of the cache represented by s->cache_bio and op->inode.
309  */
bch_data_insert(struct closure * cl)310 void bch_data_insert(struct closure *cl)
311 {
312 	struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
313 
314 	trace_bcache_write(op->c, op->inode, op->bio,
315 			   op->writeback, op->bypass);
316 
317 	bch_keylist_init(&op->insert_keys);
318 	bio_get(op->bio);
319 	bch_data_insert_start(cl);
320 }
321 
322 /* Congested? */
323 
bch_get_congested(struct cache_set * c)324 unsigned bch_get_congested(struct cache_set *c)
325 {
326 	int i;
327 	long rand;
328 
329 	if (!c->congested_read_threshold_us &&
330 	    !c->congested_write_threshold_us)
331 		return 0;
332 
333 	i = (local_clock_us() - c->congested_last_us) / 1024;
334 	if (i < 0)
335 		return 0;
336 
337 	i += atomic_read(&c->congested);
338 	if (i >= 0)
339 		return 0;
340 
341 	i += CONGESTED_MAX;
342 
343 	if (i > 0)
344 		i = fract_exp_two(i, 6);
345 
346 	rand = get_random_int();
347 	i -= bitmap_weight(&rand, BITS_PER_LONG);
348 
349 	return i > 0 ? i : 1;
350 }
351 
add_sequential(struct task_struct * t)352 static void add_sequential(struct task_struct *t)
353 {
354 	ewma_add(t->sequential_io_avg,
355 		 t->sequential_io, 8, 0);
356 
357 	t->sequential_io = 0;
358 }
359 
iohash(struct cached_dev * dc,uint64_t k)360 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
361 {
362 	return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
363 }
364 
check_should_bypass(struct cached_dev * dc,struct bio * bio)365 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
366 {
367 	struct cache_set *c = dc->disk.c;
368 	unsigned mode = cache_mode(dc, bio);
369 	unsigned sectors, congested = bch_get_congested(c);
370 	struct task_struct *task = current;
371 	struct io *i;
372 
373 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
374 	    c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
375 	    (bio->bi_rw & REQ_DISCARD))
376 		goto skip;
377 
378 	if (mode == CACHE_MODE_NONE ||
379 	    (mode == CACHE_MODE_WRITEAROUND &&
380 	     (bio->bi_rw & REQ_WRITE)))
381 		goto skip;
382 
383 	if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
384 	    bio_sectors(bio) & (c->sb.block_size - 1)) {
385 		pr_debug("skipping unaligned io");
386 		goto skip;
387 	}
388 
389 	if (bypass_torture_test(dc)) {
390 		if ((get_random_int() & 3) == 3)
391 			goto skip;
392 		else
393 			goto rescale;
394 	}
395 
396 	if (!congested && !dc->sequential_cutoff)
397 		goto rescale;
398 
399 	if (!congested &&
400 	    mode == CACHE_MODE_WRITEBACK &&
401 	    (bio->bi_rw & REQ_WRITE) &&
402 	    (bio->bi_rw & REQ_SYNC))
403 		goto rescale;
404 
405 	spin_lock(&dc->io_lock);
406 
407 	hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
408 		if (i->last == bio->bi_iter.bi_sector &&
409 		    time_before(jiffies, i->jiffies))
410 			goto found;
411 
412 	i = list_first_entry(&dc->io_lru, struct io, lru);
413 
414 	add_sequential(task);
415 	i->sequential = 0;
416 found:
417 	if (i->sequential + bio->bi_iter.bi_size > i->sequential)
418 		i->sequential	+= bio->bi_iter.bi_size;
419 
420 	i->last			 = bio_end_sector(bio);
421 	i->jiffies		 = jiffies + msecs_to_jiffies(5000);
422 	task->sequential_io	 = i->sequential;
423 
424 	hlist_del(&i->hash);
425 	hlist_add_head(&i->hash, iohash(dc, i->last));
426 	list_move_tail(&i->lru, &dc->io_lru);
427 
428 	spin_unlock(&dc->io_lock);
429 
430 	sectors = max(task->sequential_io,
431 		      task->sequential_io_avg) >> 9;
432 
433 	if (dc->sequential_cutoff &&
434 	    sectors >= dc->sequential_cutoff >> 9) {
435 		trace_bcache_bypass_sequential(bio);
436 		goto skip;
437 	}
438 
439 	if (congested && sectors >= congested) {
440 		trace_bcache_bypass_congested(bio);
441 		goto skip;
442 	}
443 
444 rescale:
445 	bch_rescale_priorities(c, bio_sectors(bio));
446 	return false;
447 skip:
448 	bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
449 	return true;
450 }
451 
452 /* Cache lookup */
453 
454 struct search {
455 	/* Stack frame for bio_complete */
456 	struct closure		cl;
457 
458 	struct bbio		bio;
459 	struct bio		*orig_bio;
460 	struct bio		*cache_miss;
461 	struct bcache_device	*d;
462 
463 	unsigned		insert_bio_sectors;
464 	unsigned		recoverable:1;
465 	unsigned		write:1;
466 	unsigned		read_dirty_data:1;
467 	unsigned		cache_missed:1;
468 
469 	unsigned long		start_time;
470 
471 	struct btree_op		op;
472 	struct data_insert_op	iop;
473 };
474 
bch_cache_read_endio(struct bio * bio,int error)475 static void bch_cache_read_endio(struct bio *bio, int error)
476 {
477 	struct bbio *b = container_of(bio, struct bbio, bio);
478 	struct closure *cl = bio->bi_private;
479 	struct search *s = container_of(cl, struct search, cl);
480 
481 	/*
482 	 * If the bucket was reused while our bio was in flight, we might have
483 	 * read the wrong data. Set s->error but not error so it doesn't get
484 	 * counted against the cache device, but we'll still reread the data
485 	 * from the backing device.
486 	 */
487 
488 	if (error)
489 		s->iop.error = error;
490 	else if (!KEY_DIRTY(&b->key) &&
491 		 ptr_stale(s->iop.c, &b->key, 0)) {
492 		atomic_long_inc(&s->iop.c->cache_read_races);
493 		s->iop.error = -EINTR;
494 	}
495 
496 	bch_bbio_endio(s->iop.c, bio, error, "reading from cache");
497 }
498 
499 /*
500  * Read from a single key, handling the initial cache miss if the key starts in
501  * the middle of the bio
502  */
cache_lookup_fn(struct btree_op * op,struct btree * b,struct bkey * k)503 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
504 {
505 	struct search *s = container_of(op, struct search, op);
506 	struct bio *n, *bio = &s->bio.bio;
507 	struct bkey *bio_key;
508 	unsigned ptr;
509 
510 	if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
511 		return MAP_CONTINUE;
512 
513 	if (KEY_INODE(k) != s->iop.inode ||
514 	    KEY_START(k) > bio->bi_iter.bi_sector) {
515 		unsigned bio_sectors = bio_sectors(bio);
516 		unsigned sectors = KEY_INODE(k) == s->iop.inode
517 			? min_t(uint64_t, INT_MAX,
518 				KEY_START(k) - bio->bi_iter.bi_sector)
519 			: INT_MAX;
520 
521 		int ret = s->d->cache_miss(b, s, bio, sectors);
522 		if (ret != MAP_CONTINUE)
523 			return ret;
524 
525 		/* if this was a complete miss we shouldn't get here */
526 		BUG_ON(bio_sectors <= sectors);
527 	}
528 
529 	if (!KEY_SIZE(k))
530 		return MAP_CONTINUE;
531 
532 	/* XXX: figure out best pointer - for multiple cache devices */
533 	ptr = 0;
534 
535 	PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
536 
537 	if (KEY_DIRTY(k))
538 		s->read_dirty_data = true;
539 
540 	n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
541 				      KEY_OFFSET(k) - bio->bi_iter.bi_sector),
542 			   GFP_NOIO, s->d->bio_split);
543 
544 	bio_key = &container_of(n, struct bbio, bio)->key;
545 	bch_bkey_copy_single_ptr(bio_key, k, ptr);
546 
547 	bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
548 	bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
549 
550 	n->bi_end_io	= bch_cache_read_endio;
551 	n->bi_private	= &s->cl;
552 
553 	/*
554 	 * The bucket we're reading from might be reused while our bio
555 	 * is in flight, and we could then end up reading the wrong
556 	 * data.
557 	 *
558 	 * We guard against this by checking (in cache_read_endio()) if
559 	 * the pointer is stale again; if so, we treat it as an error
560 	 * and reread from the backing device (but we don't pass that
561 	 * error up anywhere).
562 	 */
563 
564 	__bch_submit_bbio(n, b->c);
565 	return n == bio ? MAP_DONE : MAP_CONTINUE;
566 }
567 
cache_lookup(struct closure * cl)568 static void cache_lookup(struct closure *cl)
569 {
570 	struct search *s = container_of(cl, struct search, iop.cl);
571 	struct bio *bio = &s->bio.bio;
572 	int ret;
573 
574 	bch_btree_op_init(&s->op, -1);
575 
576 	ret = bch_btree_map_keys(&s->op, s->iop.c,
577 				 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
578 				 cache_lookup_fn, MAP_END_KEY);
579 	if (ret == -EAGAIN)
580 		continue_at(cl, cache_lookup, bcache_wq);
581 
582 	closure_return(cl);
583 }
584 
585 /* Common code for the make_request functions */
586 
request_endio(struct bio * bio,int error)587 static void request_endio(struct bio *bio, int error)
588 {
589 	struct closure *cl = bio->bi_private;
590 
591 	if (error) {
592 		struct search *s = container_of(cl, struct search, cl);
593 		s->iop.error = error;
594 		/* Only cache read errors are recoverable */
595 		s->recoverable = false;
596 	}
597 
598 	bio_put(bio);
599 	closure_put(cl);
600 }
601 
bio_complete(struct search * s)602 static void bio_complete(struct search *s)
603 {
604 	if (s->orig_bio) {
605 		int cpu, rw = bio_data_dir(s->orig_bio);
606 		unsigned long duration = jiffies - s->start_time;
607 
608 		cpu = part_stat_lock();
609 		part_round_stats(cpu, &s->d->disk->part0);
610 		part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration);
611 		part_stat_unlock();
612 
613 		trace_bcache_request_end(s->d, s->orig_bio);
614 		bio_endio(s->orig_bio, s->iop.error);
615 		s->orig_bio = NULL;
616 	}
617 }
618 
do_bio_hook(struct search * s,struct bio * orig_bio)619 static void do_bio_hook(struct search *s, struct bio *orig_bio)
620 {
621 	struct bio *bio = &s->bio.bio;
622 
623 	bio_init(bio);
624 	__bio_clone_fast(bio, orig_bio);
625 	bio->bi_end_io		= request_endio;
626 	bio->bi_private		= &s->cl;
627 
628 	atomic_set(&bio->bi_cnt, 3);
629 }
630 
search_free(struct closure * cl)631 static void search_free(struct closure *cl)
632 {
633 	struct search *s = container_of(cl, struct search, cl);
634 	bio_complete(s);
635 
636 	if (s->iop.bio)
637 		bio_put(s->iop.bio);
638 
639 	closure_debug_destroy(cl);
640 	mempool_free(s, s->d->c->search);
641 }
642 
search_alloc(struct bio * bio,struct bcache_device * d)643 static inline struct search *search_alloc(struct bio *bio,
644 					  struct bcache_device *d)
645 {
646 	struct search *s;
647 
648 	s = mempool_alloc(d->c->search, GFP_NOIO);
649 
650 	closure_init(&s->cl, NULL);
651 	do_bio_hook(s, bio);
652 
653 	s->orig_bio		= bio;
654 	s->cache_miss		= NULL;
655 	s->cache_missed		= 0;
656 	s->d			= d;
657 	s->recoverable		= 1;
658 	s->write		= (bio->bi_rw & REQ_WRITE) != 0;
659 	s->read_dirty_data	= 0;
660 	s->start_time		= jiffies;
661 
662 	s->iop.c		= d->c;
663 	s->iop.bio		= NULL;
664 	s->iop.inode		= d->id;
665 	s->iop.write_point	= hash_long((unsigned long) current, 16);
666 	s->iop.write_prio	= 0;
667 	s->iop.error		= 0;
668 	s->iop.flags		= 0;
669 	s->iop.flush_journal	= (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
670 	s->iop.wq		= bcache_wq;
671 
672 	return s;
673 }
674 
675 /* Cached devices */
676 
cached_dev_bio_complete(struct closure * cl)677 static void cached_dev_bio_complete(struct closure *cl)
678 {
679 	struct search *s = container_of(cl, struct search, cl);
680 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
681 
682 	search_free(cl);
683 	cached_dev_put(dc);
684 }
685 
686 /* Process reads */
687 
cached_dev_cache_miss_done(struct closure * cl)688 static void cached_dev_cache_miss_done(struct closure *cl)
689 {
690 	struct search *s = container_of(cl, struct search, cl);
691 
692 	if (s->iop.replace_collision)
693 		bch_mark_cache_miss_collision(s->iop.c, s->d);
694 
695 	if (s->iop.bio) {
696 		int i;
697 		struct bio_vec *bv;
698 
699 		bio_for_each_segment_all(bv, s->iop.bio, i)
700 			__free_page(bv->bv_page);
701 	}
702 
703 	cached_dev_bio_complete(cl);
704 }
705 
cached_dev_read_error(struct closure * cl)706 static void cached_dev_read_error(struct closure *cl)
707 {
708 	struct search *s = container_of(cl, struct search, cl);
709 	struct bio *bio = &s->bio.bio;
710 
711 	/*
712 	 * If read request hit dirty data (s->read_dirty_data is true),
713 	 * then recovery a failed read request from cached device may
714 	 * get a stale data back. So read failure recovery is only
715 	 * permitted when read request hit clean data in cache device,
716 	 * or when cache read race happened.
717 	 */
718 	if (s->recoverable && !s->read_dirty_data) {
719 		/* Retry from the backing device: */
720 		trace_bcache_read_retry(s->orig_bio);
721 
722 		s->iop.error = 0;
723 		do_bio_hook(s, s->orig_bio);
724 
725 		/* XXX: invalidate cache */
726 
727 		closure_bio_submit(bio, cl, s->d);
728 	}
729 
730 	continue_at(cl, cached_dev_cache_miss_done, NULL);
731 }
732 
cached_dev_read_done(struct closure * cl)733 static void cached_dev_read_done(struct closure *cl)
734 {
735 	struct search *s = container_of(cl, struct search, cl);
736 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
737 
738 	/*
739 	 * We had a cache miss; cache_bio now contains data ready to be inserted
740 	 * into the cache.
741 	 *
742 	 * First, we copy the data we just read from cache_bio's bounce buffers
743 	 * to the buffers the original bio pointed to:
744 	 */
745 
746 	if (s->iop.bio) {
747 		bio_reset(s->iop.bio);
748 		s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
749 		s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
750 		s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
751 		bch_bio_map(s->iop.bio, NULL);
752 
753 		bio_copy_data(s->cache_miss, s->iop.bio);
754 
755 		bio_put(s->cache_miss);
756 		s->cache_miss = NULL;
757 	}
758 
759 	if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
760 		bch_data_verify(dc, s->orig_bio);
761 
762 	bio_complete(s);
763 
764 	if (s->iop.bio &&
765 	    !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
766 		BUG_ON(!s->iop.replace);
767 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
768 	}
769 
770 	continue_at(cl, cached_dev_cache_miss_done, NULL);
771 }
772 
cached_dev_read_done_bh(struct closure * cl)773 static void cached_dev_read_done_bh(struct closure *cl)
774 {
775 	struct search *s = container_of(cl, struct search, cl);
776 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
777 
778 	bch_mark_cache_accounting(s->iop.c, s->d,
779 				  !s->cache_missed, s->iop.bypass);
780 	trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
781 
782 	if (s->iop.error)
783 		continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
784 	else if (s->iop.bio || verify(dc, &s->bio.bio))
785 		continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
786 	else
787 		continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
788 }
789 
cached_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned sectors)790 static int cached_dev_cache_miss(struct btree *b, struct search *s,
791 				 struct bio *bio, unsigned sectors)
792 {
793 	int ret = MAP_CONTINUE;
794 	unsigned reada = 0;
795 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
796 	struct bio *miss, *cache_bio;
797 
798 	s->cache_missed = 1;
799 
800 	if (s->cache_miss || s->iop.bypass) {
801 		miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
802 		ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
803 		goto out_submit;
804 	}
805 
806 	if (!(bio->bi_rw & REQ_RAHEAD) &&
807 	    !(bio->bi_rw & REQ_META) &&
808 	    s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
809 		reada = min_t(sector_t, dc->readahead >> 9,
810 			      bdev_sectors(bio->bi_bdev) - bio_end_sector(bio));
811 
812 	s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
813 
814 	s->iop.replace_key = KEY(s->iop.inode,
815 				 bio->bi_iter.bi_sector + s->insert_bio_sectors,
816 				 s->insert_bio_sectors);
817 
818 	ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
819 	if (ret)
820 		return ret;
821 
822 	s->iop.replace = true;
823 
824 	miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
825 
826 	/* btree_search_recurse()'s btree iterator is no good anymore */
827 	ret = miss == bio ? MAP_DONE : -EINTR;
828 
829 	cache_bio = bio_alloc_bioset(GFP_NOWAIT,
830 			DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
831 			dc->disk.bio_split);
832 	if (!cache_bio)
833 		goto out_submit;
834 
835 	cache_bio->bi_iter.bi_sector	= miss->bi_iter.bi_sector;
836 	cache_bio->bi_bdev		= miss->bi_bdev;
837 	cache_bio->bi_iter.bi_size	= s->insert_bio_sectors << 9;
838 
839 	cache_bio->bi_end_io	= request_endio;
840 	cache_bio->bi_private	= &s->cl;
841 
842 	bch_bio_map(cache_bio, NULL);
843 	if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
844 		goto out_put;
845 
846 	if (reada)
847 		bch_mark_cache_readahead(s->iop.c, s->d);
848 
849 	s->cache_miss	= miss;
850 	s->iop.bio	= cache_bio;
851 	bio_get(cache_bio);
852 	closure_bio_submit(cache_bio, &s->cl, s->d);
853 
854 	return ret;
855 out_put:
856 	bio_put(cache_bio);
857 out_submit:
858 	miss->bi_end_io		= request_endio;
859 	miss->bi_private	= &s->cl;
860 	closure_bio_submit(miss, &s->cl, s->d);
861 	return ret;
862 }
863 
cached_dev_read(struct cached_dev * dc,struct search * s)864 static void cached_dev_read(struct cached_dev *dc, struct search *s)
865 {
866 	struct closure *cl = &s->cl;
867 
868 	closure_call(&s->iop.cl, cache_lookup, NULL, cl);
869 	continue_at(cl, cached_dev_read_done_bh, NULL);
870 }
871 
872 /* Process writes */
873 
cached_dev_write_complete(struct closure * cl)874 static void cached_dev_write_complete(struct closure *cl)
875 {
876 	struct search *s = container_of(cl, struct search, cl);
877 	struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
878 
879 	up_read_non_owner(&dc->writeback_lock);
880 	cached_dev_bio_complete(cl);
881 }
882 
cached_dev_write(struct cached_dev * dc,struct search * s)883 static void cached_dev_write(struct cached_dev *dc, struct search *s)
884 {
885 	struct closure *cl = &s->cl;
886 	struct bio *bio = &s->bio.bio;
887 	struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
888 	struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
889 
890 	bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
891 
892 	down_read_non_owner(&dc->writeback_lock);
893 	if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
894 		/*
895 		 * We overlap with some dirty data undergoing background
896 		 * writeback, force this write to writeback
897 		 */
898 		s->iop.bypass = false;
899 		s->iop.writeback = true;
900 	}
901 
902 	/*
903 	 * Discards aren't _required_ to do anything, so skipping if
904 	 * check_overlapping returned true is ok
905 	 *
906 	 * But check_overlapping drops dirty keys for which io hasn't started,
907 	 * so we still want to call it.
908 	 */
909 	if (bio->bi_rw & REQ_DISCARD)
910 		s->iop.bypass = true;
911 
912 	if (should_writeback(dc, s->orig_bio,
913 			     cache_mode(dc, bio),
914 			     s->iop.bypass)) {
915 		s->iop.bypass = false;
916 		s->iop.writeback = true;
917 	}
918 
919 	if (s->iop.bypass) {
920 		s->iop.bio = s->orig_bio;
921 		bio_get(s->iop.bio);
922 
923 		if (!(bio->bi_rw & REQ_DISCARD) ||
924 		    blk_queue_discard(bdev_get_queue(dc->bdev)))
925 			closure_bio_submit(bio, cl, s->d);
926 	} else if (s->iop.writeback) {
927 		bch_writeback_add(dc);
928 		s->iop.bio = bio;
929 
930 		if (bio->bi_rw & REQ_FLUSH) {
931 			/* Also need to send a flush to the backing device */
932 			struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
933 							     dc->disk.bio_split);
934 
935 			flush->bi_rw	= WRITE_FLUSH;
936 			flush->bi_bdev	= bio->bi_bdev;
937 			flush->bi_end_io = request_endio;
938 			flush->bi_private = cl;
939 
940 			closure_bio_submit(flush, cl, s->d);
941 		}
942 	} else {
943 		s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
944 
945 		closure_bio_submit(bio, cl, s->d);
946 	}
947 
948 	closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
949 	continue_at(cl, cached_dev_write_complete, NULL);
950 }
951 
cached_dev_nodata(struct closure * cl)952 static void cached_dev_nodata(struct closure *cl)
953 {
954 	struct search *s = container_of(cl, struct search, cl);
955 	struct bio *bio = &s->bio.bio;
956 
957 	if (s->iop.flush_journal)
958 		bch_journal_meta(s->iop.c, cl);
959 
960 	/* If it's a flush, we send the flush to the backing device too */
961 	closure_bio_submit(bio, cl, s->d);
962 
963 	continue_at(cl, cached_dev_bio_complete, NULL);
964 }
965 
966 /* Cached devices - read & write stuff */
967 
cached_dev_make_request(struct request_queue * q,struct bio * bio)968 static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
969 {
970 	struct search *s;
971 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
972 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
973 	int cpu, rw = bio_data_dir(bio);
974 
975 	cpu = part_stat_lock();
976 	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
977 	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
978 	part_stat_unlock();
979 
980 	bio->bi_bdev = dc->bdev;
981 	bio->bi_iter.bi_sector += dc->sb.data_offset;
982 
983 	if (cached_dev_get(dc)) {
984 		s = search_alloc(bio, d);
985 		trace_bcache_request_start(s->d, bio);
986 
987 		if (!bio->bi_iter.bi_size) {
988 			/*
989 			 * can't call bch_journal_meta from under
990 			 * generic_make_request
991 			 */
992 			continue_at_nobarrier(&s->cl,
993 					      cached_dev_nodata,
994 					      bcache_wq);
995 		} else {
996 			s->iop.bypass = check_should_bypass(dc, bio);
997 
998 			if (rw)
999 				cached_dev_write(dc, s);
1000 			else
1001 				cached_dev_read(dc, s);
1002 		}
1003 	} else {
1004 		if ((bio->bi_rw & REQ_DISCARD) &&
1005 		    !blk_queue_discard(bdev_get_queue(dc->bdev)))
1006 			bio_endio(bio, 0);
1007 		else
1008 			bch_generic_make_request(bio, &d->bio_split_hook);
1009 	}
1010 }
1011 
cached_dev_ioctl(struct bcache_device * d,fmode_t mode,unsigned int cmd,unsigned long arg)1012 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1013 			    unsigned int cmd, unsigned long arg)
1014 {
1015 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1016 	return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1017 }
1018 
cached_dev_congested(void * data,int bits)1019 static int cached_dev_congested(void *data, int bits)
1020 {
1021 	struct bcache_device *d = data;
1022 	struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1023 	struct request_queue *q = bdev_get_queue(dc->bdev);
1024 	int ret = 0;
1025 
1026 	if (bdi_congested(&q->backing_dev_info, bits))
1027 		return 1;
1028 
1029 	if (cached_dev_get(dc)) {
1030 		unsigned i;
1031 		struct cache *ca;
1032 
1033 		for_each_cache(ca, d->c, i) {
1034 			q = bdev_get_queue(ca->bdev);
1035 			ret |= bdi_congested(&q->backing_dev_info, bits);
1036 		}
1037 
1038 		cached_dev_put(dc);
1039 	}
1040 
1041 	return ret;
1042 }
1043 
bch_cached_dev_request_init(struct cached_dev * dc)1044 void bch_cached_dev_request_init(struct cached_dev *dc)
1045 {
1046 	struct gendisk *g = dc->disk.disk;
1047 
1048 	g->queue->make_request_fn		= cached_dev_make_request;
1049 	g->queue->backing_dev_info.congested_fn = cached_dev_congested;
1050 	dc->disk.cache_miss			= cached_dev_cache_miss;
1051 	dc->disk.ioctl				= cached_dev_ioctl;
1052 }
1053 
1054 /* Flash backed devices */
1055 
flash_dev_cache_miss(struct btree * b,struct search * s,struct bio * bio,unsigned sectors)1056 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1057 				struct bio *bio, unsigned sectors)
1058 {
1059 	unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1060 
1061 	swap(bio->bi_iter.bi_size, bytes);
1062 	zero_fill_bio(bio);
1063 	swap(bio->bi_iter.bi_size, bytes);
1064 
1065 	bio_advance(bio, bytes);
1066 
1067 	if (!bio->bi_iter.bi_size)
1068 		return MAP_DONE;
1069 
1070 	return MAP_CONTINUE;
1071 }
1072 
flash_dev_nodata(struct closure * cl)1073 static void flash_dev_nodata(struct closure *cl)
1074 {
1075 	struct search *s = container_of(cl, struct search, cl);
1076 
1077 	if (s->iop.flush_journal)
1078 		bch_journal_meta(s->iop.c, cl);
1079 
1080 	continue_at(cl, search_free, NULL);
1081 }
1082 
flash_dev_make_request(struct request_queue * q,struct bio * bio)1083 static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1084 {
1085 	struct search *s;
1086 	struct closure *cl;
1087 	struct bcache_device *d = bio->bi_bdev->bd_disk->private_data;
1088 	int cpu, rw = bio_data_dir(bio);
1089 
1090 	cpu = part_stat_lock();
1091 	part_stat_inc(cpu, &d->disk->part0, ios[rw]);
1092 	part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio));
1093 	part_stat_unlock();
1094 
1095 	s = search_alloc(bio, d);
1096 	cl = &s->cl;
1097 	bio = &s->bio.bio;
1098 
1099 	trace_bcache_request_start(s->d, bio);
1100 
1101 	if (!bio->bi_iter.bi_size) {
1102 		/*
1103 		 * can't call bch_journal_meta from under
1104 		 * generic_make_request
1105 		 */
1106 		continue_at_nobarrier(&s->cl,
1107 				      flash_dev_nodata,
1108 				      bcache_wq);
1109 	} else if (rw) {
1110 		bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1111 					&KEY(d->id, bio->bi_iter.bi_sector, 0),
1112 					&KEY(d->id, bio_end_sector(bio), 0));
1113 
1114 		s->iop.bypass		= (bio->bi_rw & REQ_DISCARD) != 0;
1115 		s->iop.writeback	= true;
1116 		s->iop.bio		= bio;
1117 
1118 		closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1119 	} else {
1120 		closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1121 	}
1122 
1123 	continue_at(cl, search_free, NULL);
1124 }
1125 
flash_dev_ioctl(struct bcache_device * d,fmode_t mode,unsigned int cmd,unsigned long arg)1126 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1127 			   unsigned int cmd, unsigned long arg)
1128 {
1129 	return -ENOTTY;
1130 }
1131 
flash_dev_congested(void * data,int bits)1132 static int flash_dev_congested(void *data, int bits)
1133 {
1134 	struct bcache_device *d = data;
1135 	struct request_queue *q;
1136 	struct cache *ca;
1137 	unsigned i;
1138 	int ret = 0;
1139 
1140 	for_each_cache(ca, d->c, i) {
1141 		q = bdev_get_queue(ca->bdev);
1142 		ret |= bdi_congested(&q->backing_dev_info, bits);
1143 	}
1144 
1145 	return ret;
1146 }
1147 
bch_flash_dev_request_init(struct bcache_device * d)1148 void bch_flash_dev_request_init(struct bcache_device *d)
1149 {
1150 	struct gendisk *g = d->disk;
1151 
1152 	g->queue->make_request_fn		= flash_dev_make_request;
1153 	g->queue->backing_dev_info.congested_fn = flash_dev_congested;
1154 	d->cache_miss				= flash_dev_cache_miss;
1155 	d->ioctl				= flash_dev_ioctl;
1156 }
1157 
bch_request_exit(void)1158 void bch_request_exit(void)
1159 {
1160 	if (bch_search_cache)
1161 		kmem_cache_destroy(bch_search_cache);
1162 }
1163 
bch_request_init(void)1164 int __init bch_request_init(void)
1165 {
1166 	bch_search_cache = KMEM_CACHE(search, 0);
1167 	if (!bch_search_cache)
1168 		return -ENOMEM;
1169 
1170 	return 0;
1171 }
1172