1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_WRITEBACK_H
3 #define _BCACHE_WRITEBACK_H
4
5 #define CUTOFF_WRITEBACK 40
6 #define CUTOFF_WRITEBACK_SYNC 70
7
bcache_dev_sectors_dirty(struct bcache_device * d)8 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
9 {
10 uint64_t i, ret = 0;
11
12 for (i = 0; i < d->nr_stripes; i++)
13 ret += atomic_read(d->stripe_sectors_dirty + i);
14
15 return ret;
16 }
17
bcache_flash_devs_sectors_dirty(struct cache_set * c)18 static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
19 {
20 uint64_t i, ret = 0;
21
22 mutex_lock(&bch_register_lock);
23
24 for (i = 0; i < c->nr_uuids; i++) {
25 struct bcache_device *d = c->devices[i];
26
27 if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
28 continue;
29 ret += bcache_dev_sectors_dirty(d);
30 }
31
32 mutex_unlock(&bch_register_lock);
33
34 return ret;
35 }
36
offset_to_stripe(struct bcache_device * d,uint64_t offset)37 static inline unsigned offset_to_stripe(struct bcache_device *d,
38 uint64_t offset)
39 {
40 do_div(offset, d->stripe_size);
41 return offset;
42 }
43
bcache_dev_stripe_dirty(struct cached_dev * dc,uint64_t offset,unsigned nr_sectors)44 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
45 uint64_t offset,
46 unsigned nr_sectors)
47 {
48 unsigned stripe = offset_to_stripe(&dc->disk, offset);
49
50 while (1) {
51 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
52 return true;
53
54 if (nr_sectors <= dc->disk.stripe_size)
55 return false;
56
57 nr_sectors -= dc->disk.stripe_size;
58 stripe++;
59 }
60 }
61
should_writeback(struct cached_dev * dc,struct bio * bio,unsigned cache_mode,bool would_skip)62 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
63 unsigned cache_mode, bool would_skip)
64 {
65 unsigned in_use = dc->disk.c->gc_stats.in_use;
66
67 if (cache_mode != CACHE_MODE_WRITEBACK ||
68 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
69 in_use > CUTOFF_WRITEBACK_SYNC)
70 return false;
71
72 if (bio_op(bio) == REQ_OP_DISCARD)
73 return false;
74
75 if (dc->partial_stripes_expensive &&
76 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
77 bio_sectors(bio)))
78 return true;
79
80 if (would_skip)
81 return false;
82
83 return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK;
84 }
85
bch_writeback_queue(struct cached_dev * dc)86 static inline void bch_writeback_queue(struct cached_dev *dc)
87 {
88 if (!IS_ERR_OR_NULL(dc->writeback_thread))
89 wake_up_process(dc->writeback_thread);
90 }
91
bch_writeback_add(struct cached_dev * dc)92 static inline void bch_writeback_add(struct cached_dev *dc)
93 {
94 if (!atomic_read(&dc->has_dirty) &&
95 !atomic_xchg(&dc->has_dirty, 1)) {
96 atomic_inc(&dc->count);
97
98 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
99 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
100 /* XXX: should do this synchronously */
101 bch_write_bdev_super(dc, NULL);
102 }
103
104 bch_writeback_queue(dc);
105 }
106 }
107
108 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
109
110 void bch_sectors_dirty_init(struct bcache_device *);
111 void bch_cached_dev_writeback_init(struct cached_dev *);
112 int bch_cached_dev_writeback_start(struct cached_dev *);
113
114 #endif
115