1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_WRITEBACK_H
3 #define _BCACHE_WRITEBACK_H
4
5 #define CUTOFF_WRITEBACK 40
6 #define CUTOFF_WRITEBACK_SYNC 70
7
8 #define CUTOFF_WRITEBACK_MAX 70
9 #define CUTOFF_WRITEBACK_SYNC_MAX 90
10
11 #define MAX_WRITEBACKS_IN_PASS 5
12 #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
13
14 #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
15 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
16
17 #define BCH_AUTO_GC_DIRTY_THRESHOLD 50
18
19 /*
20 * 14 (16384ths) is chosen here as something that each backing device
21 * should be a reasonable fraction of the share, and not to blow up
22 * until individual backing devices are a petabyte.
23 */
24 #define WRITEBACK_SHARE_SHIFT 14
25
bcache_dev_sectors_dirty(struct bcache_device * d)26 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
27 {
28 uint64_t i, ret = 0;
29
30 for (i = 0; i < d->nr_stripes; i++)
31 ret += atomic_read(d->stripe_sectors_dirty + i);
32
33 return ret;
34 }
35
offset_to_stripe(struct bcache_device * d,uint64_t offset)36 static inline int offset_to_stripe(struct bcache_device *d,
37 uint64_t offset)
38 {
39 do_div(offset, d->stripe_size);
40
41 /* d->nr_stripes is in range [1, INT_MAX] */
42 if (unlikely(offset >= d->nr_stripes)) {
43 pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
44 offset, d->nr_stripes);
45 return -EINVAL;
46 }
47
48 /*
49 * Here offset is definitly smaller than INT_MAX,
50 * return it as int will never overflow.
51 */
52 return offset;
53 }
54
bcache_dev_stripe_dirty(struct cached_dev * dc,uint64_t offset,unsigned int nr_sectors)55 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
56 uint64_t offset,
57 unsigned int nr_sectors)
58 {
59 int stripe = offset_to_stripe(&dc->disk, offset);
60
61 if (stripe < 0)
62 return false;
63
64 while (1) {
65 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
66 return true;
67
68 if (nr_sectors <= dc->disk.stripe_size)
69 return false;
70
71 nr_sectors -= dc->disk.stripe_size;
72 stripe++;
73 }
74 }
75
76 extern unsigned int bch_cutoff_writeback;
77 extern unsigned int bch_cutoff_writeback_sync;
78
should_writeback(struct cached_dev * dc,struct bio * bio,unsigned int cache_mode,bool would_skip)79 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
80 unsigned int cache_mode, bool would_skip)
81 {
82 unsigned int in_use = dc->disk.c->gc_stats.in_use;
83
84 if (cache_mode != CACHE_MODE_WRITEBACK ||
85 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
86 in_use > bch_cutoff_writeback_sync)
87 return false;
88
89 if (bio_op(bio) == REQ_OP_DISCARD)
90 return false;
91
92 if (dc->partial_stripes_expensive &&
93 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
94 bio_sectors(bio)))
95 return true;
96
97 if (would_skip)
98 return false;
99
100 return (op_is_sync(bio->bi_opf) ||
101 bio->bi_opf & (REQ_META|REQ_PRIO) ||
102 in_use <= bch_cutoff_writeback);
103 }
104
bch_writeback_queue(struct cached_dev * dc)105 static inline void bch_writeback_queue(struct cached_dev *dc)
106 {
107 if (!IS_ERR_OR_NULL(dc->writeback_thread))
108 wake_up_process(dc->writeback_thread);
109 }
110
bch_writeback_add(struct cached_dev * dc)111 static inline void bch_writeback_add(struct cached_dev *dc)
112 {
113 if (!atomic_read(&dc->has_dirty) &&
114 !atomic_xchg(&dc->has_dirty, 1)) {
115 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
116 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
117 /* XXX: should do this synchronously */
118 bch_write_bdev_super(dc, NULL);
119 }
120
121 bch_writeback_queue(dc);
122 }
123 }
124
125 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
126 uint64_t offset, int nr_sectors);
127
128 void bch_sectors_dirty_init(struct bcache_device *d);
129 void bch_cached_dev_writeback_init(struct cached_dev *dc);
130 int bch_cached_dev_writeback_start(struct cached_dev *dc);
131
132 #endif
133