• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _BCACHE_WRITEBACK_H
2 #define _BCACHE_WRITEBACK_H
3 
4 #define CUTOFF_WRITEBACK	40
5 #define CUTOFF_WRITEBACK_SYNC	70
6 
bcache_dev_sectors_dirty(struct bcache_device * d)7 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
8 {
9 	uint64_t i, ret = 0;
10 
11 	for (i = 0; i < d->nr_stripes; i++)
12 		ret += atomic_read(d->stripe_sectors_dirty + i);
13 
14 	return ret;
15 }
16 
bcache_flash_devs_sectors_dirty(struct cache_set * c)17 static inline uint64_t  bcache_flash_devs_sectors_dirty(struct cache_set *c)
18 {
19 	uint64_t i, ret = 0;
20 
21 	mutex_lock(&bch_register_lock);
22 
23 	for (i = 0; i < c->nr_uuids; i++) {
24 		struct bcache_device *d = c->devices[i];
25 
26 		if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
27 			continue;
28 	   ret += bcache_dev_sectors_dirty(d);
29 	}
30 
31 	mutex_unlock(&bch_register_lock);
32 
33 	return ret;
34 }
35 
offset_to_stripe(struct bcache_device * d,uint64_t offset)36 static inline unsigned offset_to_stripe(struct bcache_device *d,
37 					uint64_t offset)
38 {
39 	do_div(offset, d->stripe_size);
40 	return offset;
41 }
42 
bcache_dev_stripe_dirty(struct cached_dev * dc,uint64_t offset,unsigned nr_sectors)43 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
44 					   uint64_t offset,
45 					   unsigned nr_sectors)
46 {
47 	unsigned stripe = offset_to_stripe(&dc->disk, offset);
48 
49 	while (1) {
50 		if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
51 			return true;
52 
53 		if (nr_sectors <= dc->disk.stripe_size)
54 			return false;
55 
56 		nr_sectors -= dc->disk.stripe_size;
57 		stripe++;
58 	}
59 }
60 
should_writeback(struct cached_dev * dc,struct bio * bio,unsigned cache_mode,bool would_skip)61 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
62 				    unsigned cache_mode, bool would_skip)
63 {
64 	unsigned in_use = dc->disk.c->gc_stats.in_use;
65 
66 	if (cache_mode != CACHE_MODE_WRITEBACK ||
67 	    test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
68 	    in_use > CUTOFF_WRITEBACK_SYNC)
69 		return false;
70 
71 	if (dc->partial_stripes_expensive &&
72 	    bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
73 				    bio_sectors(bio)))
74 		return true;
75 
76 	if (would_skip)
77 		return false;
78 
79 	return bio->bi_rw & REQ_SYNC ||
80 		in_use <= CUTOFF_WRITEBACK;
81 }
82 
bch_writeback_queue(struct cached_dev * dc)83 static inline void bch_writeback_queue(struct cached_dev *dc)
84 {
85 	if (!IS_ERR_OR_NULL(dc->writeback_thread))
86 		wake_up_process(dc->writeback_thread);
87 }
88 
bch_writeback_add(struct cached_dev * dc)89 static inline void bch_writeback_add(struct cached_dev *dc)
90 {
91 	if (!atomic_read(&dc->has_dirty) &&
92 	    !atomic_xchg(&dc->has_dirty, 1)) {
93 		atomic_inc(&dc->count);
94 
95 		if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
96 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
97 			/* XXX: should do this synchronously */
98 			bch_write_bdev_super(dc, NULL);
99 		}
100 
101 		bch_writeback_queue(dc);
102 	}
103 }
104 
105 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
106 
107 void bch_sectors_dirty_init(struct bcache_device *);
108 void bch_cached_dev_writeback_init(struct cached_dev *);
109 int bch_cached_dev_writeback_start(struct cached_dev *);
110 
111 #endif
112