Lines Matching +full:dc +full:- +full:dc +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0
3 * Main bcache entry point - handle a read or a write request and decide what to
19 #include <linux/backing-dev.h>
30 static unsigned int cache_mode(struct cached_dev *dc) in cache_mode() argument
32 return BDEV_CACHE_MODE(&dc->sb); in cache_mode()
35 static bool verify(struct cached_dev *dc) in verify() argument
37 return dc->verify; in verify()
53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); in bio_csum()
62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; in bch_data_insert_keys()
65 if (!op->replace) in bch_data_insert_keys()
66 journal_ref = bch_journal(op->c, &op->insert_keys, in bch_data_insert_keys()
67 op->flush_journal ? cl : NULL); in bch_data_insert_keys()
69 ret = bch_btree_insert(op->c, &op->insert_keys, in bch_data_insert_keys()
71 if (ret == -ESRCH) { in bch_data_insert_keys()
72 op->replace_collision = true; in bch_data_insert_keys()
74 op->status = BLK_STS_RESOURCE; in bch_data_insert_keys()
75 op->insert_data_done = true; in bch_data_insert_keys()
81 if (!op->insert_data_done) { in bch_data_insert_keys()
82 continue_at(cl, bch_data_insert_start, op->wq); in bch_data_insert_keys()
86 bch_keylist_free(&op->insert_keys); in bch_data_insert_keys()
98 * is bigger than an empty write: If we just return -ENOMEM here, in bch_keylist_realloc()
102 if (newsize * sizeof(uint64_t) > block_bytes(c->cache) - sizeof(struct jset)) in bch_keylist_realloc()
103 return -ENOMEM; in bch_keylist_realloc()
111 struct bio *bio = op->bio; in bch_data_invalidate()
114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
118 1U << (KEY_SIZE_BITS - 1)); in bch_data_invalidate()
120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) in bch_data_invalidate()
123 bio->bi_iter.bi_sector += sectors; in bch_data_invalidate()
124 bio->bi_iter.bi_size -= sectors << 9; in bch_data_invalidate()
126 bch_keylist_add(&op->insert_keys, in bch_data_invalidate()
127 &KEY(op->inode, in bch_data_invalidate()
128 bio->bi_iter.bi_sector, in bch_data_invalidate()
132 op->insert_data_done = true; in bch_data_invalidate()
136 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_invalidate()
148 * that region of the cache - so, if we just strip off all the pointers in bch_data_insert_error()
152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; in bch_data_insert_error()
154 while (src != op->insert_keys.top) { in bch_data_insert_error()
164 op->insert_keys.top = dst; in bch_data_insert_error()
171 struct closure *cl = bio->bi_private; in bch_data_insert_endio()
174 if (bio->bi_status) { in bch_data_insert_endio()
176 if (op->writeback) in bch_data_insert_endio()
177 op->status = bio->bi_status; in bch_data_insert_endio()
178 else if (!op->replace) in bch_data_insert_endio()
179 set_closure_fn(cl, bch_data_insert_error, op->wq); in bch_data_insert_endio()
184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); in bch_data_insert_endio()
190 struct bio *bio = op->bio, *n; in bch_data_insert_start()
192 if (op->bypass) in bch_data_insert_start()
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in bch_data_insert_start()
196 wake_up_gc(op->c); in bch_data_insert_start()
202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); in bch_data_insert_start()
207 struct bio_set *split = &op->c->bio_split; in bch_data_insert_start()
210 if (bch_keylist_realloc(&op->insert_keys, in bch_data_insert_start()
211 3 + (op->csum ? 1 : 0), in bch_data_insert_start()
212 op->c)) { in bch_data_insert_start()
213 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start()
217 k = op->insert_keys.top; in bch_data_insert_start()
219 SET_KEY_INODE(k, op->inode); in bch_data_insert_start()
220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); in bch_data_insert_start()
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in bch_data_insert_start()
223 op->write_point, op->write_prio, in bch_data_insert_start()
224 op->writeback)) in bch_data_insert_start()
229 n->bi_end_io = bch_data_insert_endio; in bch_data_insert_start()
230 n->bi_private = cl; in bch_data_insert_start()
232 if (op->writeback) { in bch_data_insert_start()
236 SET_GC_MARK(PTR_BUCKET(op->c, k, i), in bch_data_insert_start()
240 SET_KEY_CSUM(k, op->csum); in bch_data_insert_start()
245 bch_keylist_push(&op->insert_keys); in bch_data_insert_start()
248 bch_submit_bbio(n, op->c, k, 0); in bch_data_insert_start()
251 op->insert_data_done = true; in bch_data_insert_start()
252 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start()
255 /* bch_alloc_sectors() blocks if s->writeback = true */ in bch_data_insert_start()
256 BUG_ON(op->writeback); in bch_data_insert_start()
260 * there aren't any buckets ready to write to - it might take awhile and in bch_data_insert_start()
264 if (!op->replace) { in bch_data_insert_start()
271 op->bypass = true; in bch_data_insert_start()
278 op->insert_data_done = true; in bch_data_insert_start()
281 if (!bch_keylist_empty(&op->insert_keys)) in bch_data_insert_start()
282 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start()
289 * bch_data_insert - stick some data in the cache
294 * volume - it's also used by the moving garbage collector to compact data in
302 * It inserts the data in op->bio; bi_sector is used for the key offset,
303 * and op->inode is used for the key inode.
305 * If op->bypass is true, instead of inserting the data it invalidates the
306 * region of the cache represented by op->bio and op->inode.
312 trace_bcache_write(op->c, op->inode, op->bio, in bch_data_insert()
313 op->writeback, op->bypass); in bch_data_insert()
315 bch_keylist_init(&op->insert_keys); in bch_data_insert()
316 bio_get(op->bio); in bch_data_insert()
328 if (!c->congested_read_threshold_us && in bch_get_congested()
329 !c->congested_write_threshold_us) in bch_get_congested()
332 i = (local_clock_us() - c->congested_last_us) / 1024; in bch_get_congested()
336 i += atomic_read(&c->congested); in bch_get_congested()
345 i -= hweight32(get_random_u32()); in bch_get_congested()
352 ewma_add(t->sequential_io_avg, in add_sequential()
353 t->sequential_io, 8, 0); in add_sequential()
355 t->sequential_io = 0; in add_sequential()
358 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) in iohash() argument
360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; in iohash()
363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) in check_should_bypass() argument
365 struct cache_set *c = dc->disk.c; in check_should_bypass()
366 unsigned int mode = cache_mode(dc); in check_should_bypass() local
371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || in check_should_bypass()
372 c->gc_stats.in_use > CUTOFF_CACHE_ADD || in check_should_bypass()
376 if (mode == CACHE_MODE_NONE || in check_should_bypass()
377 (mode == CACHE_MODE_WRITEAROUND && in check_should_bypass()
382 * If the bio is for read-ahead or background IO, bypass it or in check_should_bypass()
384 * - If the IO is for meta data, always cache it and no bypass in check_should_bypass()
385 * - If the IO is not meta data, check dc->cache_reada_policy, in check_should_bypass()
388 * That is, read-ahead request for metadata always get cached in check_should_bypass()
391 if ((bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND))) { in check_should_bypass()
392 if (!(bio->bi_opf & (REQ_META|REQ_PRIO)) && in check_should_bypass()
393 (dc->cache_readahead_policy != BCH_CACHE_READA_ALL)) in check_should_bypass()
397 if (bio->bi_iter.bi_sector & (c->cache->sb.block_size - 1) || in check_should_bypass()
398 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass()
403 if (bypass_torture_test(dc)) { in check_should_bypass()
411 if (!congested && !dc->sequential_cutoff) in check_should_bypass()
414 spin_lock(&dc->io_lock); in check_should_bypass()
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) in check_should_bypass()
417 if (i->last == bio->bi_iter.bi_sector && in check_should_bypass()
418 time_before(jiffies, i->jiffies)) in check_should_bypass()
421 i = list_first_entry(&dc->io_lru, struct io, lru); in check_should_bypass()
424 i->sequential = 0; in check_should_bypass()
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential) in check_should_bypass()
427 i->sequential += bio->bi_iter.bi_size; in check_should_bypass()
429 i->last = bio_end_sector(bio); in check_should_bypass()
430 i->jiffies = jiffies + msecs_to_jiffies(5000); in check_should_bypass()
431 task->sequential_io = i->sequential; in check_should_bypass()
433 hlist_del(&i->hash); in check_should_bypass()
434 hlist_add_head(&i->hash, iohash(dc, i->last)); in check_should_bypass()
435 list_move_tail(&i->lru, &dc->io_lru); in check_should_bypass()
437 spin_unlock(&dc->io_lock); in check_should_bypass()
439 sectors = max(task->sequential_io, in check_should_bypass()
440 task->sequential_io_avg) >> 9; in check_should_bypass()
442 if (dc->sequential_cutoff && in check_should_bypass()
443 sectors >= dc->sequential_cutoff >> 9) { in check_should_bypass()
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass()
488 struct closure *cl = bio->bi_private; in bch_cache_read_endio()
493 * read the wrong data. Set s->error but not error so it doesn't get in bch_cache_read_endio()
498 if (bio->bi_status) in bch_cache_read_endio()
499 s->iop.status = bio->bi_status; in bch_cache_read_endio()
500 else if (!KEY_DIRTY(&b->key) && in bch_cache_read_endio()
501 ptr_stale(s->iop.c, &b->key, 0)) { in bch_cache_read_endio()
502 atomic_long_inc(&s->iop.c->cache_read_races); in bch_cache_read_endio()
503 s->iop.status = BLK_STS_IOERR; in bch_cache_read_endio()
506 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); in bch_cache_read_endio()
516 struct bio *n, *bio = &s->bio.bio; in cache_lookup_fn()
520 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) in cache_lookup_fn()
523 if (KEY_INODE(k) != s->iop.inode || in cache_lookup_fn()
524 KEY_START(k) > bio->bi_iter.bi_sector) { in cache_lookup_fn()
526 unsigned int sectors = KEY_INODE(k) == s->iop.inode in cache_lookup_fn()
528 KEY_START(k) - bio->bi_iter.bi_sector) in cache_lookup_fn()
530 int ret = s->d->cache_miss(b, s, bio, sectors); in cache_lookup_fn()
542 /* XXX: figure out best pointer - for multiple cache devices */ in cache_lookup_fn()
545 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; in cache_lookup_fn()
548 s->read_dirty_data = true; in cache_lookup_fn()
551 KEY_OFFSET(k) - bio->bi_iter.bi_sector), in cache_lookup_fn()
552 GFP_NOIO, &s->d->bio_split); in cache_lookup_fn()
554 bio_key = &container_of(n, struct bbio, bio)->key; in cache_lookup_fn()
557 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); in cache_lookup_fn()
558 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); in cache_lookup_fn()
560 n->bi_end_io = bch_cache_read_endio; in cache_lookup_fn()
561 n->bi_private = &s->cl; in cache_lookup_fn()
574 __bch_submit_bbio(n, b->c); in cache_lookup_fn()
581 struct bio *bio = &s->bio.bio; in cache_lookup()
582 struct cached_dev *dc; in cache_lookup() local
585 bch_btree_op_init(&s->op, -1); in cache_lookup()
587 ret = bch_btree_map_keys(&s->op, s->iop.c, in cache_lookup()
588 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), in cache_lookup()
590 if (ret == -EAGAIN) { in cache_lookup()
601 * And after that happened, s->iop.status is still its initial value in cache_lookup()
602 * before we submit s->bio.bio in cache_lookup()
605 BUG_ON(ret == -EINTR); in cache_lookup()
606 if (s->d && s->d->c && in cache_lookup()
607 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { in cache_lookup()
608 dc = container_of(s->d, struct cached_dev, disk); in cache_lookup()
609 if (dc && atomic_read(&dc->has_dirty)) in cache_lookup()
610 s->recoverable = false; in cache_lookup()
612 if (!s->iop.status) in cache_lookup()
613 s->iop.status = BLK_STS_IOERR; in cache_lookup()
623 struct closure *cl = bio->bi_private; in request_endio()
625 if (bio->bi_status) { in request_endio()
628 s->iop.status = bio->bi_status; in request_endio()
630 s->recoverable = false; in request_endio()
639 struct closure *cl = bio->bi_private; in backing_request_endio()
641 if (bio->bi_status) { in backing_request_endio()
643 struct cached_dev *dc = container_of(s->d, in backing_request_endio() local
646 * If a bio has REQ_PREFLUSH for writeback mode, it is in backing_request_endio()
647 * speically assembled in cached_dev_write() for a non-zero in backing_request_endio()
649 * s->iop.status by this failure, the status will be decided in backing_request_endio()
652 if (unlikely(s->iop.writeback && in backing_request_endio()
653 bio->bi_opf & REQ_PREFLUSH)) { in backing_request_endio()
655 dc->backing_dev_name, bio->bi_status); in backing_request_endio()
657 /* set to orig_bio->bi_status in bio_complete() */ in backing_request_endio()
658 s->iop.status = bio->bi_status; in backing_request_endio()
660 s->recoverable = false; in backing_request_endio()
662 bch_count_backing_io_errors(dc, bio); in backing_request_endio()
671 if (s->orig_bio) { in bio_complete()
673 part_end_io_acct(s->part, s->orig_bio, s->start_time); in bio_complete()
675 trace_bcache_request_end(s->d, s->orig_bio); in bio_complete()
676 s->orig_bio->bi_status = s->iop.status; in bio_complete()
677 bio_endio(s->orig_bio); in bio_complete()
678 s->orig_bio = NULL; in bio_complete()
686 struct bio *bio = &s->bio.bio; in do_bio_hook()
693 * - cache_bio->bi_end_io from cached_dev_cache_miss() in do_bio_hook()
694 * - n->bi_end_io from cache_lookup_fn() in do_bio_hook()
696 bio->bi_end_io = end_io_fn; in do_bio_hook()
697 bio->bi_private = &s->cl; in do_bio_hook()
706 atomic_dec(&s->iop.c->search_inflight); in search_free()
708 if (s->iop.bio) in search_free()
709 bio_put(s->iop.bio); in search_free()
713 mempool_free(s, &s->iop.c->search); in search_free()
721 s = mempool_alloc(&d->c->search, GFP_NOIO); in search_alloc()
723 closure_init(&s->cl, NULL); in search_alloc()
725 atomic_inc(&d->c->search_inflight); in search_alloc()
727 s->orig_bio = bio; in search_alloc()
728 s->cache_miss = NULL; in search_alloc()
729 s->cache_missed = 0; in search_alloc()
730 s->d = d; in search_alloc()
731 s->recoverable = 1; in search_alloc()
732 s->write = op_is_write(bio_op(bio)); in search_alloc()
733 s->read_dirty_data = 0; in search_alloc()
735 s->start_time = part_start_io_acct(d->disk, &s->part, bio); in search_alloc()
736 s->iop.c = d->c; in search_alloc()
737 s->iop.bio = NULL; in search_alloc()
738 s->iop.inode = d->id; in search_alloc()
739 s->iop.write_point = hash_long((unsigned long) current, 16); in search_alloc()
740 s->iop.write_prio = 0; in search_alloc()
741 s->iop.status = 0; in search_alloc()
742 s->iop.flags = 0; in search_alloc()
743 s->iop.flush_journal = op_is_flush(bio->bi_opf); in search_alloc()
744 s->iop.wq = bcache_wq; in search_alloc()
754 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_bio_complete() local
756 cached_dev_put(dc); in cached_dev_bio_complete()
766 if (s->iop.replace_collision) in cached_dev_read_error_done()
767 bch_mark_cache_miss_collision(s->iop.c, s->d); in cached_dev_read_error_done()
769 if (s->iop.bio) in cached_dev_read_error_done()
770 bio_free_pages(s->iop.bio); in cached_dev_read_error_done()
778 struct bio *bio = &s->bio.bio; in cached_dev_read_error()
781 * If read request hit dirty data (s->read_dirty_data is true), in cached_dev_read_error()
787 if (s->recoverable && !s->read_dirty_data) { in cached_dev_read_error()
789 trace_bcache_read_retry(s->orig_bio); in cached_dev_read_error()
791 s->iop.status = 0; in cached_dev_read_error()
792 do_bio_hook(s, s->orig_bio, backing_request_endio); in cached_dev_read_error()
797 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_read_error()
806 struct bcache_device *d = s->d; in cached_dev_cache_miss_done()
808 if (s->iop.replace_collision) in cached_dev_cache_miss_done()
809 bch_mark_cache_miss_collision(s->iop.c, s->d); in cached_dev_cache_miss_done()
811 if (s->iop.bio) in cached_dev_cache_miss_done()
812 bio_free_pages(s->iop.bio); in cached_dev_cache_miss_done()
815 closure_put(&d->cl); in cached_dev_cache_miss_done()
821 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done() local
831 if (s->iop.bio) { in cached_dev_read_done()
832 bio_reset(s->iop.bio); in cached_dev_read_done()
833 s->iop.bio->bi_iter.bi_sector = in cached_dev_read_done()
834 s->cache_miss->bi_iter.bi_sector; in cached_dev_read_done()
835 bio_copy_dev(s->iop.bio, s->cache_miss); in cached_dev_read_done()
836 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; in cached_dev_read_done()
837 bch_bio_map(s->iop.bio, NULL); in cached_dev_read_done()
839 bio_copy_data(s->cache_miss, s->iop.bio); in cached_dev_read_done()
841 bio_put(s->cache_miss); in cached_dev_read_done()
842 s->cache_miss = NULL; in cached_dev_read_done()
845 if (verify(dc) && s->recoverable && !s->read_dirty_data) in cached_dev_read_done()
846 bch_data_verify(dc, s->orig_bio); in cached_dev_read_done()
848 closure_get(&dc->disk.cl); in cached_dev_read_done()
851 if (s->iop.bio && in cached_dev_read_done()
852 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { in cached_dev_read_done()
853 BUG_ON(!s->iop.replace); in cached_dev_read_done()
854 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in cached_dev_read_done()
863 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_read_done_bh() local
865 bch_mark_cache_accounting(s->iop.c, s->d, in cached_dev_read_done_bh()
866 !s->cache_missed, s->iop.bypass); in cached_dev_read_done_bh()
867 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass); in cached_dev_read_done_bh()
869 if (s->iop.status) in cached_dev_read_done_bh()
871 else if (s->iop.bio || verify(dc)) in cached_dev_read_done_bh()
882 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_cache_miss() local
885 s->cache_missed = 1; in cached_dev_cache_miss()
887 if (s->cache_miss || s->iop.bypass) { in cached_dev_cache_miss()
888 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); in cached_dev_cache_miss()
893 if (!(bio->bi_opf & REQ_RAHEAD) && in cached_dev_cache_miss()
894 !(bio->bi_opf & (REQ_META|REQ_PRIO)) && in cached_dev_cache_miss()
895 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) in cached_dev_cache_miss()
896 reada = min_t(sector_t, dc->readahead >> 9, in cached_dev_cache_miss()
897 get_capacity(bio->bi_disk) - bio_end_sector(bio)); in cached_dev_cache_miss()
899 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); in cached_dev_cache_miss()
901 s->iop.replace_key = KEY(s->iop.inode, in cached_dev_cache_miss()
902 bio->bi_iter.bi_sector + s->insert_bio_sectors, in cached_dev_cache_miss()
903 s->insert_bio_sectors); in cached_dev_cache_miss()
905 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); in cached_dev_cache_miss()
909 s->iop.replace = true; in cached_dev_cache_miss()
911 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); in cached_dev_cache_miss()
914 ret = miss == bio ? MAP_DONE : -EINTR; in cached_dev_cache_miss()
917 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), in cached_dev_cache_miss()
918 &dc->disk.bio_split); in cached_dev_cache_miss()
922 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; in cached_dev_cache_miss()
924 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; in cached_dev_cache_miss()
926 cache_bio->bi_end_io = backing_request_endio; in cached_dev_cache_miss()
927 cache_bio->bi_private = &s->cl; in cached_dev_cache_miss()
934 bch_mark_cache_readahead(s->iop.c, s->d); in cached_dev_cache_miss()
936 s->cache_miss = miss; in cached_dev_cache_miss()
937 s->iop.bio = cache_bio; in cached_dev_cache_miss()
940 closure_bio_submit(s->iop.c, cache_bio, &s->cl); in cached_dev_cache_miss()
946 miss->bi_end_io = backing_request_endio; in cached_dev_cache_miss()
947 miss->bi_private = &s->cl; in cached_dev_cache_miss()
949 closure_bio_submit(s->iop.c, miss, &s->cl); in cached_dev_cache_miss()
953 static void cached_dev_read(struct cached_dev *dc, struct search *s) in cached_dev_read() argument
955 struct closure *cl = &s->cl; in cached_dev_read()
957 closure_call(&s->iop.cl, cache_lookup, NULL, cl); in cached_dev_read()
966 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); in cached_dev_write_complete() local
968 up_read_non_owner(&dc->writeback_lock); in cached_dev_write_complete()
972 static void cached_dev_write(struct cached_dev *dc, struct search *s) in cached_dev_write() argument
974 struct closure *cl = &s->cl; in cached_dev_write()
975 struct bio *bio = &s->bio.bio; in cached_dev_write()
976 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); in cached_dev_write()
977 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); in cached_dev_write()
979 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); in cached_dev_write()
981 down_read_non_owner(&dc->writeback_lock); in cached_dev_write()
982 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { in cached_dev_write()
987 s->iop.bypass = false; in cached_dev_write()
988 s->iop.writeback = true; in cached_dev_write()
999 s->iop.bypass = true; in cached_dev_write()
1001 if (should_writeback(dc, s->orig_bio, in cached_dev_write()
1002 cache_mode(dc), in cached_dev_write()
1003 s->iop.bypass)) { in cached_dev_write()
1004 s->iop.bypass = false; in cached_dev_write()
1005 s->iop.writeback = true; in cached_dev_write()
1008 if (s->iop.bypass) { in cached_dev_write()
1009 s->iop.bio = s->orig_bio; in cached_dev_write()
1010 bio_get(s->iop.bio); in cached_dev_write()
1013 !blk_queue_discard(bdev_get_queue(dc->bdev))) in cached_dev_write()
1017 bio->bi_end_io = backing_request_endio; in cached_dev_write()
1018 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1020 } else if (s->iop.writeback) { in cached_dev_write()
1021 bch_writeback_add(dc); in cached_dev_write()
1022 s->iop.bio = bio; in cached_dev_write()
1024 if (bio->bi_opf & REQ_PREFLUSH) { in cached_dev_write()
1032 &dc->disk.bio_split); in cached_dev_write()
1034 s->iop.status = BLK_STS_RESOURCE; in cached_dev_write()
1038 flush->bi_end_io = backing_request_endio; in cached_dev_write()
1039 flush->bi_private = cl; in cached_dev_write()
1040 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in cached_dev_write()
1042 closure_bio_submit(s->iop.c, flush, cl); in cached_dev_write()
1045 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); in cached_dev_write()
1047 bio->bi_end_io = backing_request_endio; in cached_dev_write()
1048 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_write()
1052 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in cached_dev_write()
1059 struct bio *bio = &s->bio.bio; in cached_dev_nodata()
1061 if (s->iop.flush_journal) in cached_dev_nodata()
1062 bch_journal_meta(s->iop.c, cl); in cached_dev_nodata()
1065 bio->bi_end_io = backing_request_endio; in cached_dev_nodata()
1066 closure_bio_submit(s->iop.c, bio, cl); in cached_dev_nodata()
1083 ddip = bio->bi_private; in detached_dev_end_io()
1084 bio->bi_end_io = ddip->bi_end_io; in detached_dev_end_io()
1085 bio->bi_private = ddip->bi_private; in detached_dev_end_io()
1088 part_end_io_acct(ddip->part, bio, ddip->start_time); in detached_dev_end_io()
1090 if (bio->bi_status) { in detached_dev_end_io()
1091 struct cached_dev *dc = container_of(ddip->d, in detached_dev_end_io() local
1094 bch_count_backing_io_errors(dc, bio); in detached_dev_end_io()
1098 bio->bi_end_io(bio); in detached_dev_end_io()
1104 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in detached_dev_do_request() local
1107 * no need to call closure_get(&dc->disk.cl), in detached_dev_do_request()
1109 * which would call closure_get(&dc->disk.cl) in detached_dev_do_request()
1113 bio->bi_status = BLK_STS_RESOURCE; in detached_dev_do_request()
1114 bio->bi_end_io(bio); in detached_dev_do_request()
1118 ddip->d = d; in detached_dev_do_request()
1120 ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); in detached_dev_do_request()
1121 ddip->bi_end_io = bio->bi_end_io; in detached_dev_do_request()
1122 ddip->bi_private = bio->bi_private; in detached_dev_do_request()
1123 bio->bi_end_io = detached_dev_end_io; in detached_dev_do_request()
1124 bio->bi_private = ddip; in detached_dev_do_request()
1127 !blk_queue_discard(bdev_get_queue(dc->bdev))) in detached_dev_do_request()
1128 bio->bi_end_io(bio); in detached_dev_do_request()
1138 struct cached_dev *dc; in quit_max_writeback_rate() local
1147 * c->idle_counter is 0 already). in quit_max_writeback_rate()
1150 for (i = 0; i < c->devices_max_used; i++) { in quit_max_writeback_rate()
1151 if (!c->devices[i]) in quit_max_writeback_rate()
1154 if (UUID_FLASH_ONLY(&c->uuids[i])) in quit_max_writeback_rate()
1157 d = c->devices[i]; in quit_max_writeback_rate()
1158 dc = container_of(d, struct cached_dev, disk); in quit_max_writeback_rate()
1164 atomic_long_set(&dc->writeback_rate.rate, 1); in quit_max_writeback_rate()
1168 atomic_long_set(&this_dc->writeback_rate.rate, 1); in quit_max_writeback_rate()
1171 /* Cached devices - read & write stuff */
1176 struct bcache_device *d = bio->bi_disk->private_data; in cached_dev_submit_bio()
1177 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_submit_bio() local
1180 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || in cached_dev_submit_bio()
1181 dc->io_disable)) { in cached_dev_submit_bio()
1182 bio->bi_status = BLK_STS_IOERR; in cached_dev_submit_bio()
1187 if (likely(d->c)) { in cached_dev_submit_bio()
1188 if (atomic_read(&d->c->idle_counter)) in cached_dev_submit_bio()
1189 atomic_set(&d->c->idle_counter, 0); in cached_dev_submit_bio()
1196 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) { in cached_dev_submit_bio()
1197 atomic_set(&d->c->at_max_writeback_rate, 0); in cached_dev_submit_bio()
1198 quit_max_writeback_rate(d->c, dc); in cached_dev_submit_bio()
1202 bio_set_dev(bio, dc->bdev); in cached_dev_submit_bio()
1203 bio->bi_iter.bi_sector += dc->sb.data_offset; in cached_dev_submit_bio()
1205 if (cached_dev_get(dc)) { in cached_dev_submit_bio()
1207 trace_bcache_request_start(s->d, bio); in cached_dev_submit_bio()
1209 if (!bio->bi_iter.bi_size) { in cached_dev_submit_bio()
1214 continue_at_nobarrier(&s->cl, in cached_dev_submit_bio()
1218 s->iop.bypass = check_should_bypass(dc, bio); in cached_dev_submit_bio()
1221 cached_dev_write(dc, s); in cached_dev_submit_bio()
1223 cached_dev_read(dc, s); in cached_dev_submit_bio()
1232 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, in cached_dev_ioctl() argument
1235 struct cached_dev *dc = container_of(d, struct cached_dev, disk); in cached_dev_ioctl() local
1237 if (dc->io_disable) in cached_dev_ioctl()
1238 return -EIO; in cached_dev_ioctl()
1240 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); in cached_dev_ioctl()
1243 void bch_cached_dev_request_init(struct cached_dev *dc) in bch_cached_dev_request_init() argument
1245 dc->disk.cache_miss = cached_dev_cache_miss; in bch_cached_dev_request_init()
1246 dc->disk.ioctl = cached_dev_ioctl; in bch_cached_dev_request_init()
1256 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1258 swap(bio->bi_iter.bi_size, bytes); in flash_dev_cache_miss()
1262 if (!bio->bi_iter.bi_size) in flash_dev_cache_miss()
1272 if (s->iop.flush_journal) in flash_dev_nodata()
1273 bch_journal_meta(s->iop.c, cl); in flash_dev_nodata()
1282 struct bcache_device *d = bio->bi_disk->private_data; in flash_dev_submit_bio()
1284 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { in flash_dev_submit_bio()
1285 bio->bi_status = BLK_STS_IOERR; in flash_dev_submit_bio()
1291 cl = &s->cl; in flash_dev_submit_bio()
1292 bio = &s->bio.bio; in flash_dev_submit_bio()
1294 trace_bcache_request_start(s->d, bio); in flash_dev_submit_bio()
1296 if (!bio->bi_iter.bi_size) { in flash_dev_submit_bio()
1300 continue_at_nobarrier(&s->cl, in flash_dev_submit_bio()
1305 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, in flash_dev_submit_bio()
1306 &KEY(d->id, bio->bi_iter.bi_sector, 0), in flash_dev_submit_bio()
1307 &KEY(d->id, bio_end_sector(bio), 0)); in flash_dev_submit_bio()
1309 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; in flash_dev_submit_bio()
1310 s->iop.writeback = true; in flash_dev_submit_bio()
1311 s->iop.bio = bio; in flash_dev_submit_bio()
1313 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); in flash_dev_submit_bio()
1315 closure_call(&s->iop.cl, cache_lookup, NULL, cl); in flash_dev_submit_bio()
1322 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, in flash_dev_ioctl() argument
1325 return -ENOTTY; in flash_dev_ioctl()
1330 d->cache_miss = flash_dev_cache_miss; in bch_flash_dev_request_init()
1331 d->ioctl = flash_dev_ioctl; in bch_flash_dev_request_init()
1343 return -ENOMEM; in bch_request_init()