1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * background writeback - scan btree for dirty data and write it to the backing
4 * device
5 *
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
8 */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19
update_gc_after_writeback(struct cache_set * c)20 static void update_gc_after_writeback(struct cache_set *c)
21 {
22 if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23 c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
24 return;
25
26 c->gc_after_writeback |= BCH_DO_AUTO_GC;
27 }
28
29 /* Rate limiting */
__calc_target_rate(struct cached_dev * dc)30 static uint64_t __calc_target_rate(struct cached_dev *dc)
31 {
32 struct cache_set *c = dc->disk.c;
33
34 /*
35 * This is the size of the cache, minus the amount used for
36 * flash-only devices
37 */
38 uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
39 atomic_long_read(&c->flash_dev_dirty_sectors);
40
41 /*
42 * Unfortunately there is no control of global dirty data. If the
43 * user states that they want 10% dirty data in the cache, and has,
44 * e.g., 5 backing volumes of equal size, we try and ensure each
45 * backing volume uses about 2% of the cache for dirty data.
46 */
47 uint32_t bdev_share =
48 div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49 c->cached_dev_sectors);
50
51 uint64_t cache_dirty_target =
52 div_u64(cache_sectors * dc->writeback_percent, 100);
53
54 /* Ensure each backing dev gets at least one dirty share */
55 if (bdev_share < 1)
56 bdev_share = 1;
57
58 return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
59 }
60
__update_writeback_rate(struct cached_dev * dc)61 static void __update_writeback_rate(struct cached_dev *dc)
62 {
63 /*
64 * PI controller:
65 * Figures out the amount that should be written per second.
66 *
67 * First, the error (number of sectors that are dirty beyond our
68 * target) is calculated. The error is accumulated (numerically
69 * integrated).
70 *
71 * Then, the proportional value and integral value are scaled
72 * based on configured values. These are stored as inverses to
73 * avoid fixed point math and to make configuration easy-- e.g.
74 * the default value of 40 for writeback_rate_p_term_inverse
75 * attempts to write at a rate that would retire all the dirty
76 * blocks in 40 seconds.
77 *
78 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79 * of the error is accumulated in the integral term per second.
80 * This acts as a slow, long-term average that is not subject to
81 * variations in usage like the p term.
82 */
83 int64_t target = __calc_target_rate(dc);
84 int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85 int64_t error = dirty - target;
86 int64_t proportional_scaled =
87 div_s64(error, dc->writeback_rate_p_term_inverse);
88 int64_t integral_scaled;
89 uint32_t new_rate;
90
91 /*
92 * We need to consider the number of dirty buckets as well
93 * when calculating the proportional_scaled, Otherwise we might
94 * have an unreasonable small writeback rate at a highly fragmented situation
95 * when very few dirty sectors consumed a lot dirty buckets, the
96 * worst case is when dirty buckets reached cutoff_writeback_sync and
97 * dirty data is still not even reached to writeback percent, so the rate
98 * still will be at the minimum value, which will cause the write
99 * stuck at a non-writeback mode.
100 */
101 struct cache_set *c = dc->disk.c;
102
103 int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets;
104
105 if (dc->writeback_consider_fragment &&
106 c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) {
107 int64_t fragment =
108 div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty);
109 int64_t fp_term;
110 int64_t fps;
111
112 if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) {
113 fp_term = (int64_t)dc->writeback_rate_fp_term_low *
114 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW);
115 } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) {
116 fp_term = (int64_t)dc->writeback_rate_fp_term_mid *
117 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID);
118 } else {
119 fp_term = (int64_t)dc->writeback_rate_fp_term_high *
120 (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH);
121 }
122 fps = div_s64(dirty, dirty_buckets) * fp_term;
123 if (fragment > 3 && fps > proportional_scaled) {
124 /* Only overrite the p when fragment > 3 */
125 proportional_scaled = fps;
126 }
127 }
128
129 if ((error < 0 && dc->writeback_rate_integral > 0) ||
130 (error > 0 && time_before64(local_clock(),
131 dc->writeback_rate.next + NSEC_PER_MSEC))) {
132 /*
133 * Only decrease the integral term if it's more than
134 * zero. Only increase the integral term if the device
135 * is keeping up. (Don't wind up the integral
136 * ineffectively in either case).
137 *
138 * It's necessary to scale this by
139 * writeback_rate_update_seconds to keep the integral
140 * term dimensioned properly.
141 */
142 dc->writeback_rate_integral += error *
143 dc->writeback_rate_update_seconds;
144 }
145
146 integral_scaled = div_s64(dc->writeback_rate_integral,
147 dc->writeback_rate_i_term_inverse);
148
149 new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
150 dc->writeback_rate_minimum, NSEC_PER_SEC);
151
152 dc->writeback_rate_proportional = proportional_scaled;
153 dc->writeback_rate_integral_scaled = integral_scaled;
154 dc->writeback_rate_change = new_rate -
155 atomic_long_read(&dc->writeback_rate.rate);
156 atomic_long_set(&dc->writeback_rate.rate, new_rate);
157 dc->writeback_rate_target = target;
158 }
159
idle_counter_exceeded(struct cache_set * c)160 static bool idle_counter_exceeded(struct cache_set *c)
161 {
162 int counter, dev_nr;
163
164 /*
165 * If c->idle_counter is overflow (idel for really long time),
166 * reset as 0 and not set maximum rate this time for code
167 * simplicity.
168 */
169 counter = atomic_inc_return(&c->idle_counter);
170 if (counter <= 0) {
171 atomic_set(&c->idle_counter, 0);
172 return false;
173 }
174
175 dev_nr = atomic_read(&c->attached_dev_nr);
176 if (dev_nr == 0)
177 return false;
178
179 /*
180 * c->idle_counter is increased by writeback thread of all
181 * attached backing devices, in order to represent a rough
182 * time period, counter should be divided by dev_nr.
183 * Otherwise the idle time cannot be larger with more backing
184 * device attached.
185 * The following calculation equals to checking
186 * (counter / dev_nr) < (dev_nr * 6)
187 */
188 if (counter < (dev_nr * dev_nr * 6))
189 return false;
190
191 return true;
192 }
193
194 /*
195 * Idle_counter is increased every time when update_writeback_rate() is
196 * called. If all backing devices attached to the same cache set have
197 * identical dc->writeback_rate_update_seconds values, it is about 6
198 * rounds of update_writeback_rate() on each backing device before
199 * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
200 * to each dc->writeback_rate.rate.
201 * In order to avoid extra locking cost for counting exact dirty cached
202 * devices number, c->attached_dev_nr is used to calculate the idle
203 * throushold. It might be bigger if not all cached device are in write-
204 * back mode, but it still works well with limited extra rounds of
205 * update_writeback_rate().
206 */
set_at_max_writeback_rate(struct cache_set * c,struct cached_dev * dc)207 static bool set_at_max_writeback_rate(struct cache_set *c,
208 struct cached_dev *dc)
209 {
210 /* Don't sst max writeback rate if it is disabled */
211 if (!c->idle_max_writeback_rate_enabled)
212 return false;
213
214 /* Don't set max writeback rate if gc is running */
215 if (!c->gc_mark_valid)
216 return false;
217
218 if (!idle_counter_exceeded(c))
219 return false;
220
221 if (atomic_read(&c->at_max_writeback_rate) != 1)
222 atomic_set(&c->at_max_writeback_rate, 1);
223
224 atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
225
226 /* keep writeback_rate_target as existing value */
227 dc->writeback_rate_proportional = 0;
228 dc->writeback_rate_integral_scaled = 0;
229 dc->writeback_rate_change = 0;
230
231 /*
232 * In case new I/O arrives during before
233 * set_at_max_writeback_rate() returns.
234 */
235 if (!idle_counter_exceeded(c) ||
236 !atomic_read(&c->at_max_writeback_rate))
237 return false;
238
239 return true;
240 }
241
update_writeback_rate(struct work_struct * work)242 static void update_writeback_rate(struct work_struct *work)
243 {
244 struct cached_dev *dc = container_of(to_delayed_work(work),
245 struct cached_dev,
246 writeback_rate_update);
247 struct cache_set *c = dc->disk.c;
248
249 /*
250 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
251 * cancel_delayed_work_sync().
252 */
253 set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
254 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
255 smp_mb__after_atomic();
256
257 /*
258 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
259 * check it here too.
260 */
261 if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
262 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
263 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
264 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
265 smp_mb__after_atomic();
266 return;
267 }
268
269 if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
270 /*
271 * If the whole cache set is idle, set_at_max_writeback_rate()
272 * will set writeback rate to a max number. Then it is
273 * unncessary to update writeback rate for an idle cache set
274 * in maximum writeback rate number(s).
275 */
276 if (!set_at_max_writeback_rate(c, dc)) {
277 down_read(&dc->writeback_lock);
278 __update_writeback_rate(dc);
279 update_gc_after_writeback(c);
280 up_read(&dc->writeback_lock);
281 }
282 }
283
284
285 /*
286 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
287 * check it here too.
288 */
289 if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
290 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
291 schedule_delayed_work(&dc->writeback_rate_update,
292 dc->writeback_rate_update_seconds * HZ);
293 }
294
295 /*
296 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
297 * cancel_delayed_work_sync().
298 */
299 clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
300 /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
301 smp_mb__after_atomic();
302 }
303
writeback_delay(struct cached_dev * dc,unsigned int sectors)304 static unsigned int writeback_delay(struct cached_dev *dc,
305 unsigned int sectors)
306 {
307 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
308 !dc->writeback_percent)
309 return 0;
310
311 return bch_next_delay(&dc->writeback_rate, sectors);
312 }
313
314 struct dirty_io {
315 struct closure cl;
316 struct cached_dev *dc;
317 uint16_t sequence;
318 struct bio bio;
319 };
320
dirty_init(struct keybuf_key * w)321 static void dirty_init(struct keybuf_key *w)
322 {
323 struct dirty_io *io = w->private;
324 struct bio *bio = &io->bio;
325
326 bio_init(bio, bio->bi_inline_vecs,
327 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
328 if (!io->dc->writeback_percent)
329 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
330
331 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
332 bio->bi_private = w;
333 bch_bio_map(bio, NULL);
334 }
335
dirty_io_destructor(struct closure * cl)336 static void dirty_io_destructor(struct closure *cl)
337 {
338 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
339
340 kfree(io);
341 }
342
write_dirty_finish(struct closure * cl)343 static void write_dirty_finish(struct closure *cl)
344 {
345 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
346 struct keybuf_key *w = io->bio.bi_private;
347 struct cached_dev *dc = io->dc;
348
349 bio_free_pages(&io->bio);
350
351 /* This is kind of a dumb way of signalling errors. */
352 if (KEY_DIRTY(&w->key)) {
353 int ret;
354 unsigned int i;
355 struct keylist keys;
356
357 bch_keylist_init(&keys);
358
359 bkey_copy(keys.top, &w->key);
360 SET_KEY_DIRTY(keys.top, false);
361 bch_keylist_push(&keys);
362
363 for (i = 0; i < KEY_PTRS(&w->key); i++)
364 atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
365
366 ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
367
368 if (ret)
369 trace_bcache_writeback_collision(&w->key);
370
371 atomic_long_inc(ret
372 ? &dc->disk.c->writeback_keys_failed
373 : &dc->disk.c->writeback_keys_done);
374 }
375
376 bch_keybuf_del(&dc->writeback_keys, w);
377 up(&dc->in_flight);
378
379 closure_return_with_destructor(cl, dirty_io_destructor);
380 }
381
dirty_endio(struct bio * bio)382 static void dirty_endio(struct bio *bio)
383 {
384 struct keybuf_key *w = bio->bi_private;
385 struct dirty_io *io = w->private;
386
387 if (bio->bi_status) {
388 SET_KEY_DIRTY(&w->key, false);
389 bch_count_backing_io_errors(io->dc, bio);
390 }
391
392 closure_put(&io->cl);
393 }
394
write_dirty(struct closure * cl)395 static void write_dirty(struct closure *cl)
396 {
397 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
398 struct keybuf_key *w = io->bio.bi_private;
399 struct cached_dev *dc = io->dc;
400
401 uint16_t next_sequence;
402
403 if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
404 /* Not our turn to write; wait for a write to complete */
405 closure_wait(&dc->writeback_ordering_wait, cl);
406
407 if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
408 /*
409 * Edge case-- it happened in indeterminate order
410 * relative to when we were added to wait list..
411 */
412 closure_wake_up(&dc->writeback_ordering_wait);
413 }
414
415 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
416 return;
417 }
418
419 next_sequence = io->sequence + 1;
420
421 /*
422 * IO errors are signalled using the dirty bit on the key.
423 * If we failed to read, we should not attempt to write to the
424 * backing device. Instead, immediately go to write_dirty_finish
425 * to clean up.
426 */
427 if (KEY_DIRTY(&w->key)) {
428 dirty_init(w);
429 bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
430 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
431 bio_set_dev(&io->bio, io->dc->bdev);
432 io->bio.bi_end_io = dirty_endio;
433
434 /* I/O request sent to backing device */
435 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
436 }
437
438 atomic_set(&dc->writeback_sequence_next, next_sequence);
439 closure_wake_up(&dc->writeback_ordering_wait);
440
441 continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
442 }
443
read_dirty_endio(struct bio * bio)444 static void read_dirty_endio(struct bio *bio)
445 {
446 struct keybuf_key *w = bio->bi_private;
447 struct dirty_io *io = w->private;
448
449 /* is_read = 1 */
450 bch_count_io_errors(io->dc->disk.c->cache,
451 bio->bi_status, 1,
452 "reading dirty data from cache");
453
454 dirty_endio(bio);
455 }
456
read_dirty_submit(struct closure * cl)457 static void read_dirty_submit(struct closure *cl)
458 {
459 struct dirty_io *io = container_of(cl, struct dirty_io, cl);
460
461 closure_bio_submit(io->dc->disk.c, &io->bio, cl);
462
463 continue_at(cl, write_dirty, io->dc->writeback_write_wq);
464 }
465
read_dirty(struct cached_dev * dc)466 static void read_dirty(struct cached_dev *dc)
467 {
468 unsigned int delay = 0;
469 struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
470 size_t size;
471 int nk, i;
472 struct dirty_io *io;
473 struct closure cl;
474 uint16_t sequence = 0;
475
476 BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
477 atomic_set(&dc->writeback_sequence_next, sequence);
478 closure_init_stack(&cl);
479
480 /*
481 * XXX: if we error, background writeback just spins. Should use some
482 * mempools.
483 */
484
485 next = bch_keybuf_next(&dc->writeback_keys);
486
487 while (!kthread_should_stop() &&
488 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
489 next) {
490 size = 0;
491 nk = 0;
492
493 do {
494 BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
495
496 /*
497 * Don't combine too many operations, even if they
498 * are all small.
499 */
500 if (nk >= MAX_WRITEBACKS_IN_PASS)
501 break;
502
503 /*
504 * If the current operation is very large, don't
505 * further combine operations.
506 */
507 if (size >= MAX_WRITESIZE_IN_PASS)
508 break;
509
510 /*
511 * Operations are only eligible to be combined
512 * if they are contiguous.
513 *
514 * TODO: add a heuristic willing to fire a
515 * certain amount of non-contiguous IO per pass,
516 * so that we can benefit from backing device
517 * command queueing.
518 */
519 if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
520 &START_KEY(&next->key)))
521 break;
522
523 size += KEY_SIZE(&next->key);
524 keys[nk++] = next;
525 } while ((next = bch_keybuf_next(&dc->writeback_keys)));
526
527 /* Now we have gathered a set of 1..5 keys to write back. */
528 for (i = 0; i < nk; i++) {
529 w = keys[i];
530
531 io = kzalloc(struct_size(io, bio.bi_inline_vecs,
532 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
533 GFP_KERNEL);
534 if (!io)
535 goto err;
536
537 w->private = io;
538 io->dc = dc;
539 io->sequence = sequence++;
540
541 dirty_init(w);
542 bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
543 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
544 bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
545 io->bio.bi_end_io = read_dirty_endio;
546
547 if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
548 goto err_free;
549
550 trace_bcache_writeback(&w->key);
551
552 down(&dc->in_flight);
553
554 /*
555 * We've acquired a semaphore for the maximum
556 * simultaneous number of writebacks; from here
557 * everything happens asynchronously.
558 */
559 closure_call(&io->cl, read_dirty_submit, NULL, &cl);
560 }
561
562 delay = writeback_delay(dc, size);
563
564 while (!kthread_should_stop() &&
565 !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
566 delay) {
567 schedule_timeout_interruptible(delay);
568 delay = writeback_delay(dc, 0);
569 }
570 }
571
572 if (0) {
573 err_free:
574 kfree(w->private);
575 err:
576 bch_keybuf_del(&dc->writeback_keys, w);
577 }
578
579 /*
580 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
581 * freed) before refilling again
582 */
583 closure_sync(&cl);
584 }
585
586 /* Scan for dirty data */
587
bcache_dev_sectors_dirty_add(struct cache_set * c,unsigned int inode,uint64_t offset,int nr_sectors)588 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
589 uint64_t offset, int nr_sectors)
590 {
591 struct bcache_device *d = c->devices[inode];
592 unsigned int stripe_offset, sectors_dirty;
593 int stripe;
594
595 if (!d)
596 return;
597
598 stripe = offset_to_stripe(d, offset);
599 if (stripe < 0)
600 return;
601
602 if (UUID_FLASH_ONLY(&c->uuids[inode]))
603 atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
604
605 stripe_offset = offset & (d->stripe_size - 1);
606
607 while (nr_sectors) {
608 int s = min_t(unsigned int, abs(nr_sectors),
609 d->stripe_size - stripe_offset);
610
611 if (nr_sectors < 0)
612 s = -s;
613
614 if (stripe >= d->nr_stripes)
615 return;
616
617 sectors_dirty = atomic_add_return(s,
618 d->stripe_sectors_dirty + stripe);
619 if (sectors_dirty == d->stripe_size)
620 set_bit(stripe, d->full_dirty_stripes);
621 else
622 clear_bit(stripe, d->full_dirty_stripes);
623
624 nr_sectors -= s;
625 stripe_offset = 0;
626 stripe++;
627 }
628 }
629
dirty_pred(struct keybuf * buf,struct bkey * k)630 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
631 {
632 struct cached_dev *dc = container_of(buf,
633 struct cached_dev,
634 writeback_keys);
635
636 BUG_ON(KEY_INODE(k) != dc->disk.id);
637
638 return KEY_DIRTY(k);
639 }
640
refill_full_stripes(struct cached_dev * dc)641 static void refill_full_stripes(struct cached_dev *dc)
642 {
643 struct keybuf *buf = &dc->writeback_keys;
644 unsigned int start_stripe, next_stripe;
645 int stripe;
646 bool wrapped = false;
647
648 stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
649 if (stripe < 0)
650 stripe = 0;
651
652 start_stripe = stripe;
653
654 while (1) {
655 stripe = find_next_bit(dc->disk.full_dirty_stripes,
656 dc->disk.nr_stripes, stripe);
657
658 if (stripe == dc->disk.nr_stripes)
659 goto next;
660
661 next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
662 dc->disk.nr_stripes, stripe);
663
664 buf->last_scanned = KEY(dc->disk.id,
665 stripe * dc->disk.stripe_size, 0);
666
667 bch_refill_keybuf(dc->disk.c, buf,
668 &KEY(dc->disk.id,
669 next_stripe * dc->disk.stripe_size, 0),
670 dirty_pred);
671
672 if (array_freelist_empty(&buf->freelist))
673 return;
674
675 stripe = next_stripe;
676 next:
677 if (wrapped && stripe > start_stripe)
678 return;
679
680 if (stripe == dc->disk.nr_stripes) {
681 stripe = 0;
682 wrapped = true;
683 }
684 }
685 }
686
687 /*
688 * Returns true if we scanned the entire disk
689 */
refill_dirty(struct cached_dev * dc)690 static bool refill_dirty(struct cached_dev *dc)
691 {
692 struct keybuf *buf = &dc->writeback_keys;
693 struct bkey start = KEY(dc->disk.id, 0, 0);
694 struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
695 struct bkey start_pos;
696
697 /*
698 * make sure keybuf pos is inside the range for this disk - at bringup
699 * we might not be attached yet so this disk's inode nr isn't
700 * initialized then
701 */
702 if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
703 bkey_cmp(&buf->last_scanned, &end) > 0)
704 buf->last_scanned = start;
705
706 if (dc->partial_stripes_expensive) {
707 refill_full_stripes(dc);
708 if (array_freelist_empty(&buf->freelist))
709 return false;
710 }
711
712 start_pos = buf->last_scanned;
713 bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
714
715 if (bkey_cmp(&buf->last_scanned, &end) < 0)
716 return false;
717
718 /*
719 * If we get to the end start scanning again from the beginning, and
720 * only scan up to where we initially started scanning from:
721 */
722 buf->last_scanned = start;
723 bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
724
725 return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
726 }
727
bch_writeback_thread(void * arg)728 static int bch_writeback_thread(void *arg)
729 {
730 struct cached_dev *dc = arg;
731 struct cache_set *c = dc->disk.c;
732 bool searched_full_index;
733
734 bch_ratelimit_reset(&dc->writeback_rate);
735
736 while (!kthread_should_stop() &&
737 !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
738 down_write(&dc->writeback_lock);
739 set_current_state(TASK_INTERRUPTIBLE);
740 /*
741 * If the bache device is detaching, skip here and continue
742 * to perform writeback. Otherwise, if no dirty data on cache,
743 * or there is dirty data on cache but writeback is disabled,
744 * the writeback thread should sleep here and wait for others
745 * to wake up it.
746 */
747 if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
748 (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
749 up_write(&dc->writeback_lock);
750
751 if (kthread_should_stop() ||
752 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
753 set_current_state(TASK_RUNNING);
754 break;
755 }
756
757 schedule();
758 continue;
759 }
760 set_current_state(TASK_RUNNING);
761
762 searched_full_index = refill_dirty(dc);
763
764 if (searched_full_index &&
765 RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
766 atomic_set(&dc->has_dirty, 0);
767 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
768 bch_write_bdev_super(dc, NULL);
769 /*
770 * If bcache device is detaching via sysfs interface,
771 * writeback thread should stop after there is no dirty
772 * data on cache. BCACHE_DEV_DETACHING flag is set in
773 * bch_cached_dev_detach().
774 */
775 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
776 struct closure cl;
777
778 closure_init_stack(&cl);
779 memset(&dc->sb.set_uuid, 0, 16);
780 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
781
782 bch_write_bdev_super(dc, &cl);
783 closure_sync(&cl);
784
785 up_write(&dc->writeback_lock);
786 break;
787 }
788
789 /*
790 * When dirty data rate is high (e.g. 50%+), there might
791 * be heavy buckets fragmentation after writeback
792 * finished, which hurts following write performance.
793 * If users really care about write performance they
794 * may set BCH_ENABLE_AUTO_GC via sysfs, then when
795 * BCH_DO_AUTO_GC is set, garbage collection thread
796 * will be wake up here. After moving gc, the shrunk
797 * btree and discarded free buckets SSD space may be
798 * helpful for following write requests.
799 */
800 if (c->gc_after_writeback ==
801 (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
802 c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
803 force_wake_up_gc(c);
804 }
805 }
806
807 up_write(&dc->writeback_lock);
808
809 read_dirty(dc);
810
811 if (searched_full_index) {
812 unsigned int delay = dc->writeback_delay * HZ;
813
814 while (delay &&
815 !kthread_should_stop() &&
816 !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
817 !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
818 delay = schedule_timeout_interruptible(delay);
819
820 bch_ratelimit_reset(&dc->writeback_rate);
821 }
822 }
823
824 if (dc->writeback_write_wq) {
825 flush_workqueue(dc->writeback_write_wq);
826 destroy_workqueue(dc->writeback_write_wq);
827 }
828 cached_dev_put(dc);
829 wait_for_kthread_stop();
830
831 return 0;
832 }
833
834 /* Init */
835 #define INIT_KEYS_EACH_TIME 500000
836
837 struct sectors_dirty_init {
838 struct btree_op op;
839 unsigned int inode;
840 size_t count;
841 };
842
sectors_dirty_init_fn(struct btree_op * _op,struct btree * b,struct bkey * k)843 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
844 struct bkey *k)
845 {
846 struct sectors_dirty_init *op = container_of(_op,
847 struct sectors_dirty_init, op);
848 if (KEY_INODE(k) > op->inode)
849 return MAP_DONE;
850
851 if (KEY_DIRTY(k))
852 bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
853 KEY_START(k), KEY_SIZE(k));
854
855 op->count++;
856 if (!(op->count % INIT_KEYS_EACH_TIME))
857 cond_resched();
858
859 return MAP_CONTINUE;
860 }
861
bch_root_node_dirty_init(struct cache_set * c,struct bcache_device * d,struct bkey * k)862 static int bch_root_node_dirty_init(struct cache_set *c,
863 struct bcache_device *d,
864 struct bkey *k)
865 {
866 struct sectors_dirty_init op;
867 int ret;
868
869 bch_btree_op_init(&op.op, -1);
870 op.inode = d->id;
871 op.count = 0;
872
873 ret = bcache_btree(map_keys_recurse,
874 k,
875 c->root,
876 &op.op,
877 &KEY(op.inode, 0, 0),
878 sectors_dirty_init_fn,
879 0);
880 if (ret < 0)
881 pr_warn("sectors dirty init failed, ret=%d!\n", ret);
882
883 /*
884 * The op may be added to cache_set's btree_cache_wait
885 * in mca_cannibalize(), must ensure it is removed from
886 * the list and release btree_cache_alloc_lock before
887 * free op memory.
888 * Otherwise, the btree_cache_wait will be damaged.
889 */
890 bch_cannibalize_unlock(c);
891 finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
892
893 return ret;
894 }
895
bch_dirty_init_thread(void * arg)896 static int bch_dirty_init_thread(void *arg)
897 {
898 struct dirty_init_thrd_info *info = arg;
899 struct bch_dirty_init_state *state = info->state;
900 struct cache_set *c = state->c;
901 struct btree_iter iter;
902 struct bkey *k, *p;
903 int cur_idx, prev_idx, skip_nr;
904
905 k = p = NULL;
906 prev_idx = 0;
907
908 bch_btree_iter_init(&c->root->keys, &iter, NULL);
909 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
910 BUG_ON(!k);
911
912 p = k;
913
914 while (k) {
915 spin_lock(&state->idx_lock);
916 cur_idx = state->key_idx;
917 state->key_idx++;
918 spin_unlock(&state->idx_lock);
919
920 skip_nr = cur_idx - prev_idx;
921
922 while (skip_nr) {
923 k = bch_btree_iter_next_filter(&iter,
924 &c->root->keys,
925 bch_ptr_bad);
926 if (k)
927 p = k;
928 else {
929 atomic_set(&state->enough, 1);
930 /* Update state->enough earlier */
931 smp_mb__after_atomic();
932 goto out;
933 }
934 skip_nr--;
935 }
936
937 if (p) {
938 if (bch_root_node_dirty_init(c, state->d, p) < 0)
939 goto out;
940 }
941
942 p = NULL;
943 prev_idx = cur_idx;
944 }
945
946 out:
947 /* In order to wake up state->wait in time */
948 smp_mb__before_atomic();
949 if (atomic_dec_and_test(&state->started))
950 wake_up(&state->wait);
951
952 return 0;
953 }
954
bch_btre_dirty_init_thread_nr(void)955 static int bch_btre_dirty_init_thread_nr(void)
956 {
957 int n = num_online_cpus()/2;
958
959 if (n == 0)
960 n = 1;
961 else if (n > BCH_DIRTY_INIT_THRD_MAX)
962 n = BCH_DIRTY_INIT_THRD_MAX;
963
964 return n;
965 }
966
bch_sectors_dirty_init(struct bcache_device * d)967 void bch_sectors_dirty_init(struct bcache_device *d)
968 {
969 int i;
970 struct btree *b = NULL;
971 struct bkey *k = NULL;
972 struct btree_iter iter;
973 struct sectors_dirty_init op;
974 struct cache_set *c = d->c;
975 struct bch_dirty_init_state state;
976
977 retry_lock:
978 b = c->root;
979 rw_lock(0, b, b->level);
980 if (b != c->root) {
981 rw_unlock(0, b);
982 goto retry_lock;
983 }
984
985 /* Just count root keys if no leaf node */
986 if (c->root->level == 0) {
987 bch_btree_op_init(&op.op, -1);
988 op.inode = d->id;
989 op.count = 0;
990
991 for_each_key_filter(&c->root->keys,
992 k, &iter, bch_ptr_invalid) {
993 if (KEY_INODE(k) != op.inode)
994 continue;
995 sectors_dirty_init_fn(&op.op, c->root, k);
996 }
997
998 rw_unlock(0, b);
999 return;
1000 }
1001
1002 memset(&state, 0, sizeof(struct bch_dirty_init_state));
1003 state.c = c;
1004 state.d = d;
1005 state.total_threads = bch_btre_dirty_init_thread_nr();
1006 state.key_idx = 0;
1007 spin_lock_init(&state.idx_lock);
1008 atomic_set(&state.started, 0);
1009 atomic_set(&state.enough, 0);
1010 init_waitqueue_head(&state.wait);
1011
1012 for (i = 0; i < state.total_threads; i++) {
1013 /* Fetch latest state.enough earlier */
1014 smp_mb__before_atomic();
1015 if (atomic_read(&state.enough))
1016 break;
1017
1018 atomic_inc(&state.started);
1019 state.infos[i].state = &state;
1020 state.infos[i].thread =
1021 kthread_run(bch_dirty_init_thread, &state.infos[i],
1022 "bch_dirtcnt[%d]", i);
1023 if (IS_ERR(state.infos[i].thread)) {
1024 pr_err("fails to run thread bch_dirty_init[%d]\n", i);
1025 atomic_dec(&state.started);
1026 for (--i; i >= 0; i--)
1027 kthread_stop(state.infos[i].thread);
1028 goto out;
1029 }
1030 }
1031
1032 out:
1033 /* Must wait for all threads to stop. */
1034 wait_event(state.wait, atomic_read(&state.started) == 0);
1035 rw_unlock(0, b);
1036 }
1037
bch_cached_dev_writeback_init(struct cached_dev * dc)1038 void bch_cached_dev_writeback_init(struct cached_dev *dc)
1039 {
1040 sema_init(&dc->in_flight, 64);
1041 init_rwsem(&dc->writeback_lock);
1042 bch_keybuf_init(&dc->writeback_keys);
1043
1044 dc->writeback_metadata = true;
1045 dc->writeback_running = false;
1046 dc->writeback_consider_fragment = true;
1047 dc->writeback_percent = 10;
1048 dc->writeback_delay = 30;
1049 atomic_long_set(&dc->writeback_rate.rate, 1024);
1050 dc->writeback_rate_minimum = 8;
1051
1052 dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
1053 dc->writeback_rate_p_term_inverse = 40;
1054 dc->writeback_rate_fp_term_low = 1;
1055 dc->writeback_rate_fp_term_mid = 10;
1056 dc->writeback_rate_fp_term_high = 1000;
1057 dc->writeback_rate_i_term_inverse = 10000;
1058
1059 WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1060 INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
1061 }
1062
bch_cached_dev_writeback_start(struct cached_dev * dc)1063 int bch_cached_dev_writeback_start(struct cached_dev *dc)
1064 {
1065 dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1066 WQ_MEM_RECLAIM, 0);
1067 if (!dc->writeback_write_wq)
1068 return -ENOMEM;
1069
1070 cached_dev_get(dc);
1071 dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1072 "bcache_writeback");
1073 if (IS_ERR(dc->writeback_thread)) {
1074 cached_dev_put(dc);
1075 destroy_workqueue(dc->writeback_write_wq);
1076 return PTR_ERR(dc->writeback_thread);
1077 }
1078 dc->writeback_running = true;
1079
1080 WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1081 schedule_delayed_work(&dc->writeback_rate_update,
1082 dc->writeback_rate_update_seconds * HZ);
1083
1084 bch_writeback_queue(dc);
1085
1086 return 0;
1087 }
1088