Lines Matching +full:cache +full:- +full:time +full:- +full:ms
1 // SPDX-License-Identifier: GPL-2.0
21 mempool_free(b, &c->bio_meta); in bch_bbio_free()
26 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); in bch_bbio_alloc()
27 struct bio *bio = &b->bio; in bch_bbio_alloc()
29 bio_init(bio, NULL, bio->bi_inline_vecs, in bch_bbio_alloc()
30 meta_bucket_pages(&c->cache->sb), 0); in bch_bbio_alloc()
39 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
40 bio_set_dev(bio, c->cache->bdev); in __bch_submit_bbio()
42 b->submit_time_us = local_clock_us(); in __bch_submit_bbio()
43 closure_bio_submit(c, bio, bio->bi_private); in __bch_submit_bbio()
51 bch_bkey_copy_single_ptr(&b->key, k, ptr); in bch_submit_bbio()
63 * Read-ahead requests on a degrading and recovering md raid in bch_count_backing_io_errors()
66 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors. in bch_count_backing_io_errors()
68 if (bio->bi_opf & REQ_RAHEAD) { in bch_count_backing_io_errors()
69 pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n", in bch_count_backing_io_errors()
70 dc->bdev); in bch_count_backing_io_errors()
74 errors = atomic_add_return(1, &dc->io_errors); in bch_count_backing_io_errors()
75 if (errors < dc->error_limit) in bch_count_backing_io_errors()
77 dc->bdev); in bch_count_backing_io_errors()
82 void bch_count_io_errors(struct cache *ca, in bch_count_io_errors()
92 if (ca->set->error_decay) { in bch_count_io_errors()
93 unsigned int count = atomic_inc_return(&ca->io_count); in bch_count_io_errors()
95 while (count > ca->set->error_decay) { in bch_count_io_errors()
98 unsigned int new = count - ca->set->error_decay; in bch_count_io_errors()
101 * First we subtract refresh from count; each time we in bch_count_io_errors()
105 count = atomic_cmpxchg(&ca->io_count, old, new); in bch_count_io_errors()
110 errors = atomic_read(&ca->io_errors); in bch_count_io_errors()
114 errors = atomic_cmpxchg(&ca->io_errors, in bch_count_io_errors()
123 &ca->io_errors); in bch_count_io_errors()
126 if (errors < ca->set->error_limit) in bch_count_io_errors()
128 ca->bdev, m, in bch_count_io_errors()
131 bch_cache_set_error(ca->set, in bch_count_io_errors()
133 ca->bdev, m); in bch_count_io_errors()
141 struct cache *ca = c->cache; in bch_bbio_count_io_errors()
145 ? c->congested_write_threshold_us in bch_bbio_count_io_errors()
146 : c->congested_read_threshold_us; in bch_bbio_count_io_errors()
150 int us = t - b->submit_time_us; in bch_bbio_count_io_errors()
151 int congested = atomic_read(&c->congested); in bch_bbio_count_io_errors()
154 int ms = us / 1024; in bch_bbio_count_io_errors() local
156 c->congested_last_us = t; in bch_bbio_count_io_errors()
158 ms = min(ms, CONGESTED_MAX + congested); in bch_bbio_count_io_errors()
159 atomic_sub(ms, &c->congested); in bch_bbio_count_io_errors()
161 atomic_inc(&c->congested); in bch_bbio_count_io_errors()
170 struct closure *cl = bio->bi_private; in bch_bbio_endio()