• Home
  • Raw
  • Download

Lines Matching +full:cache +full:- +full:time +full:- +full:ms

1 // SPDX-License-Identifier: GPL-2.0
21 mempool_free(b, &c->bio_meta); in bch_bbio_free()
26 struct bbio *b = mempool_alloc(&c->bio_meta, GFP_NOIO); in bch_bbio_alloc()
27 struct bio *bio = &b->bio; in bch_bbio_alloc()
29 bio_init(bio, bio->bi_inline_vecs, meta_bucket_pages(&c->cache->sb)); in bch_bbio_alloc()
38 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); in __bch_submit_bbio()
39 bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); in __bch_submit_bbio()
41 b->submit_time_us = local_clock_us(); in __bch_submit_bbio()
42 closure_bio_submit(c, bio, bio->bi_private); in __bch_submit_bbio()
50 bch_bkey_copy_single_ptr(&b->key, k, ptr); in bch_submit_bbio()
62 * Read-ahead requests on a degrading and recovering md raid in bch_count_backing_io_errors()
65 * we shouldn't count failed REQ_RAHEAD bio to dc->io_errors. in bch_count_backing_io_errors()
67 if (bio->bi_opf & REQ_RAHEAD) { in bch_count_backing_io_errors()
68 pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n", in bch_count_backing_io_errors()
69 dc->backing_dev_name); in bch_count_backing_io_errors()
73 errors = atomic_add_return(1, &dc->io_errors); in bch_count_backing_io_errors()
74 if (errors < dc->error_limit) in bch_count_backing_io_errors()
76 dc->backing_dev_name); in bch_count_backing_io_errors()
81 void bch_count_io_errors(struct cache *ca, in bch_count_io_errors()
91 if (ca->set->error_decay) { in bch_count_io_errors()
92 unsigned int count = atomic_inc_return(&ca->io_count); in bch_count_io_errors()
94 while (count > ca->set->error_decay) { in bch_count_io_errors()
97 unsigned int new = count - ca->set->error_decay; in bch_count_io_errors()
100 * First we subtract refresh from count; each time we in bch_count_io_errors()
104 count = atomic_cmpxchg(&ca->io_count, old, new); in bch_count_io_errors()
109 errors = atomic_read(&ca->io_errors); in bch_count_io_errors()
113 errors = atomic_cmpxchg(&ca->io_errors, in bch_count_io_errors()
122 &ca->io_errors); in bch_count_io_errors()
125 if (errors < ca->set->error_limit) in bch_count_io_errors()
127 ca->cache_dev_name, m, in bch_count_io_errors()
130 bch_cache_set_error(ca->set, in bch_count_io_errors()
132 ca->cache_dev_name, m); in bch_count_io_errors()
140 struct cache *ca = PTR_CACHE(c, &b->key, 0); in bch_bbio_count_io_errors()
144 ? c->congested_write_threshold_us in bch_bbio_count_io_errors()
145 : c->congested_read_threshold_us; in bch_bbio_count_io_errors()
149 int us = t - b->submit_time_us; in bch_bbio_count_io_errors()
150 int congested = atomic_read(&c->congested); in bch_bbio_count_io_errors()
153 int ms = us / 1024; in bch_bbio_count_io_errors() local
155 c->congested_last_us = t; in bch_bbio_count_io_errors()
157 ms = min(ms, CONGESTED_MAX + congested); in bch_bbio_count_io_errors()
158 atomic_sub(ms, &c->congested); in bch_bbio_count_io_errors()
160 atomic_inc(&c->congested); in bch_bbio_count_io_errors()
169 struct closure *cl = bio->bi_private; in bch_bbio_endio()