• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * background writeback - scan btree for dirty data and write it to the backing
4  * device
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9 
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "writeback.h"
14 
15 #include <linux/delay.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/clock.h>
18 #include <trace/events/bcache.h>
19 
update_gc_after_writeback(struct cache_set * c)20 static void update_gc_after_writeback(struct cache_set *c)
21 {
22 	if (c->gc_after_writeback != (BCH_ENABLE_AUTO_GC) ||
23 	    c->gc_stats.in_use < BCH_AUTO_GC_DIRTY_THRESHOLD)
24 		return;
25 
26 	c->gc_after_writeback |= BCH_DO_AUTO_GC;
27 }
28 
29 /* Rate limiting */
__calc_target_rate(struct cached_dev * dc)30 static uint64_t __calc_target_rate(struct cached_dev *dc)
31 {
32 	struct cache_set *c = dc->disk.c;
33 
34 	/*
35 	 * This is the size of the cache, minus the amount used for
36 	 * flash-only devices
37 	 */
38 	uint64_t cache_sectors = c->nbuckets * c->cache->sb.bucket_size -
39 				atomic_long_read(&c->flash_dev_dirty_sectors);
40 
41 	/*
42 	 * Unfortunately there is no control of global dirty data.  If the
43 	 * user states that they want 10% dirty data in the cache, and has,
44 	 * e.g., 5 backing volumes of equal size, we try and ensure each
45 	 * backing volume uses about 2% of the cache for dirty data.
46 	 */
47 	uint32_t bdev_share =
48 		div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
49 				c->cached_dev_sectors);
50 
51 	uint64_t cache_dirty_target =
52 		div_u64(cache_sectors * dc->writeback_percent, 100);
53 
54 	/* Ensure each backing dev gets at least one dirty share */
55 	if (bdev_share < 1)
56 		bdev_share = 1;
57 
58 	return (cache_dirty_target * bdev_share) >> WRITEBACK_SHARE_SHIFT;
59 }
60 
__update_writeback_rate(struct cached_dev * dc)61 static void __update_writeback_rate(struct cached_dev *dc)
62 {
63 	/*
64 	 * PI controller:
65 	 * Figures out the amount that should be written per second.
66 	 *
67 	 * First, the error (number of sectors that are dirty beyond our
68 	 * target) is calculated.  The error is accumulated (numerically
69 	 * integrated).
70 	 *
71 	 * Then, the proportional value and integral value are scaled
72 	 * based on configured values.  These are stored as inverses to
73 	 * avoid fixed point math and to make configuration easy-- e.g.
74 	 * the default value of 40 for writeback_rate_p_term_inverse
75 	 * attempts to write at a rate that would retire all the dirty
76 	 * blocks in 40 seconds.
77 	 *
78 	 * The writeback_rate_i_inverse value of 10000 means that 1/10000th
79 	 * of the error is accumulated in the integral term per second.
80 	 * This acts as a slow, long-term average that is not subject to
81 	 * variations in usage like the p term.
82 	 */
83 	int64_t target = __calc_target_rate(dc);
84 	int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
85 	int64_t error = dirty - target;
86 	int64_t proportional_scaled =
87 		div_s64(error, dc->writeback_rate_p_term_inverse);
88 	int64_t integral_scaled;
89 	uint32_t new_rate;
90 
91 	if ((error < 0 && dc->writeback_rate_integral > 0) ||
92 	    (error > 0 && time_before64(local_clock(),
93 			 dc->writeback_rate.next + NSEC_PER_MSEC))) {
94 		/*
95 		 * Only decrease the integral term if it's more than
96 		 * zero.  Only increase the integral term if the device
97 		 * is keeping up.  (Don't wind up the integral
98 		 * ineffectively in either case).
99 		 *
100 		 * It's necessary to scale this by
101 		 * writeback_rate_update_seconds to keep the integral
102 		 * term dimensioned properly.
103 		 */
104 		dc->writeback_rate_integral += error *
105 			dc->writeback_rate_update_seconds;
106 	}
107 
108 	integral_scaled = div_s64(dc->writeback_rate_integral,
109 			dc->writeback_rate_i_term_inverse);
110 
111 	new_rate = clamp_t(int32_t, (proportional_scaled + integral_scaled),
112 			dc->writeback_rate_minimum, NSEC_PER_SEC);
113 
114 	dc->writeback_rate_proportional = proportional_scaled;
115 	dc->writeback_rate_integral_scaled = integral_scaled;
116 	dc->writeback_rate_change = new_rate -
117 			atomic_long_read(&dc->writeback_rate.rate);
118 	atomic_long_set(&dc->writeback_rate.rate, new_rate);
119 	dc->writeback_rate_target = target;
120 }
121 
idle_counter_exceeded(struct cache_set * c)122 static bool idle_counter_exceeded(struct cache_set *c)
123 {
124 	int counter, dev_nr;
125 
126 	/*
127 	 * If c->idle_counter is overflow (idel for really long time),
128 	 * reset as 0 and not set maximum rate this time for code
129 	 * simplicity.
130 	 */
131 	counter = atomic_inc_return(&c->idle_counter);
132 	if (counter <= 0) {
133 		atomic_set(&c->idle_counter, 0);
134 		return false;
135 	}
136 
137 	dev_nr = atomic_read(&c->attached_dev_nr);
138 	if (dev_nr == 0)
139 		return false;
140 
141 	/*
142 	 * c->idle_counter is increased by writeback thread of all
143 	 * attached backing devices, in order to represent a rough
144 	 * time period, counter should be divided by dev_nr.
145 	 * Otherwise the idle time cannot be larger with more backing
146 	 * device attached.
147 	 * The following calculation equals to checking
148 	 *	(counter / dev_nr) < (dev_nr * 6)
149 	 */
150 	if (counter < (dev_nr * dev_nr * 6))
151 		return false;
152 
153 	return true;
154 }
155 
156 /*
157  * Idle_counter is increased every time when update_writeback_rate() is
158  * called. If all backing devices attached to the same cache set have
159  * identical dc->writeback_rate_update_seconds values, it is about 6
160  * rounds of update_writeback_rate() on each backing device before
161  * c->at_max_writeback_rate is set to 1, and then max wrteback rate set
162  * to each dc->writeback_rate.rate.
163  * In order to avoid extra locking cost for counting exact dirty cached
164  * devices number, c->attached_dev_nr is used to calculate the idle
165  * throushold. It might be bigger if not all cached device are in write-
166  * back mode, but it still works well with limited extra rounds of
167  * update_writeback_rate().
168  */
set_at_max_writeback_rate(struct cache_set * c,struct cached_dev * dc)169 static bool set_at_max_writeback_rate(struct cache_set *c,
170 				       struct cached_dev *dc)
171 {
172 	/* Don't sst max writeback rate if it is disabled */
173 	if (!c->idle_max_writeback_rate_enabled)
174 		return false;
175 
176 	/* Don't set max writeback rate if gc is running */
177 	if (!c->gc_mark_valid)
178 		return false;
179 
180 	if (!idle_counter_exceeded(c))
181 		return false;
182 
183 	if (atomic_read(&c->at_max_writeback_rate) != 1)
184 		atomic_set(&c->at_max_writeback_rate, 1);
185 
186 	atomic_long_set(&dc->writeback_rate.rate, INT_MAX);
187 
188 	/* keep writeback_rate_target as existing value */
189 	dc->writeback_rate_proportional = 0;
190 	dc->writeback_rate_integral_scaled = 0;
191 	dc->writeback_rate_change = 0;
192 
193 	/*
194 	 * In case new I/O arrives during before
195 	 * set_at_max_writeback_rate() returns.
196 	 */
197 	if (!idle_counter_exceeded(c) ||
198 	    !atomic_read(&c->at_max_writeback_rate))
199 		return false;
200 
201 	return true;
202 }
203 
update_writeback_rate(struct work_struct * work)204 static void update_writeback_rate(struct work_struct *work)
205 {
206 	struct cached_dev *dc = container_of(to_delayed_work(work),
207 					     struct cached_dev,
208 					     writeback_rate_update);
209 	struct cache_set *c = dc->disk.c;
210 
211 	/*
212 	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
213 	 * cancel_delayed_work_sync().
214 	 */
215 	set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
216 	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
217 	smp_mb__after_atomic();
218 
219 	/*
220 	 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
221 	 * check it here too.
222 	 */
223 	if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) ||
224 	    test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
225 		clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
226 		/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
227 		smp_mb__after_atomic();
228 		return;
229 	}
230 
231 	if (atomic_read(&dc->has_dirty) && dc->writeback_percent) {
232 		/*
233 		 * If the whole cache set is idle, set_at_max_writeback_rate()
234 		 * will set writeback rate to a max number. Then it is
235 		 * unncessary to update writeback rate for an idle cache set
236 		 * in maximum writeback rate number(s).
237 		 */
238 		if (!set_at_max_writeback_rate(c, dc)) {
239 			down_read(&dc->writeback_lock);
240 			__update_writeback_rate(dc);
241 			update_gc_after_writeback(c);
242 			up_read(&dc->writeback_lock);
243 		}
244 	}
245 
246 
247 	/*
248 	 * CACHE_SET_IO_DISABLE might be set via sysfs interface,
249 	 * check it here too.
250 	 */
251 	if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) &&
252 	    !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
253 		schedule_delayed_work(&dc->writeback_rate_update,
254 			      dc->writeback_rate_update_seconds * HZ);
255 	}
256 
257 	/*
258 	 * should check BCACHE_DEV_RATE_DW_RUNNING before calling
259 	 * cancel_delayed_work_sync().
260 	 */
261 	clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags);
262 	/* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */
263 	smp_mb__after_atomic();
264 }
265 
writeback_delay(struct cached_dev * dc,unsigned int sectors)266 static unsigned int writeback_delay(struct cached_dev *dc,
267 				    unsigned int sectors)
268 {
269 	if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
270 	    !dc->writeback_percent)
271 		return 0;
272 
273 	return bch_next_delay(&dc->writeback_rate, sectors);
274 }
275 
276 struct dirty_io {
277 	struct closure		cl;
278 	struct cached_dev	*dc;
279 	uint16_t		sequence;
280 	struct bio		bio;
281 };
282 
dirty_init(struct keybuf_key * w)283 static void dirty_init(struct keybuf_key *w)
284 {
285 	struct dirty_io *io = w->private;
286 	struct bio *bio = &io->bio;
287 
288 	bio_init(bio, bio->bi_inline_vecs,
289 		 DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS));
290 	if (!io->dc->writeback_percent)
291 		bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
292 
293 	bio->bi_iter.bi_size	= KEY_SIZE(&w->key) << 9;
294 	bio->bi_private		= w;
295 	bch_bio_map(bio, NULL);
296 }
297 
dirty_io_destructor(struct closure * cl)298 static void dirty_io_destructor(struct closure *cl)
299 {
300 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
301 
302 	kfree(io);
303 }
304 
write_dirty_finish(struct closure * cl)305 static void write_dirty_finish(struct closure *cl)
306 {
307 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
308 	struct keybuf_key *w = io->bio.bi_private;
309 	struct cached_dev *dc = io->dc;
310 
311 	bio_free_pages(&io->bio);
312 
313 	/* This is kind of a dumb way of signalling errors. */
314 	if (KEY_DIRTY(&w->key)) {
315 		int ret;
316 		unsigned int i;
317 		struct keylist keys;
318 
319 		bch_keylist_init(&keys);
320 
321 		bkey_copy(keys.top, &w->key);
322 		SET_KEY_DIRTY(keys.top, false);
323 		bch_keylist_push(&keys);
324 
325 		for (i = 0; i < KEY_PTRS(&w->key); i++)
326 			atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
327 
328 		ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
329 
330 		if (ret)
331 			trace_bcache_writeback_collision(&w->key);
332 
333 		atomic_long_inc(ret
334 				? &dc->disk.c->writeback_keys_failed
335 				: &dc->disk.c->writeback_keys_done);
336 	}
337 
338 	bch_keybuf_del(&dc->writeback_keys, w);
339 	up(&dc->in_flight);
340 
341 	closure_return_with_destructor(cl, dirty_io_destructor);
342 }
343 
dirty_endio(struct bio * bio)344 static void dirty_endio(struct bio *bio)
345 {
346 	struct keybuf_key *w = bio->bi_private;
347 	struct dirty_io *io = w->private;
348 
349 	if (bio->bi_status) {
350 		SET_KEY_DIRTY(&w->key, false);
351 		bch_count_backing_io_errors(io->dc, bio);
352 	}
353 
354 	closure_put(&io->cl);
355 }
356 
write_dirty(struct closure * cl)357 static void write_dirty(struct closure *cl)
358 {
359 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
360 	struct keybuf_key *w = io->bio.bi_private;
361 	struct cached_dev *dc = io->dc;
362 
363 	uint16_t next_sequence;
364 
365 	if (atomic_read(&dc->writeback_sequence_next) != io->sequence) {
366 		/* Not our turn to write; wait for a write to complete */
367 		closure_wait(&dc->writeback_ordering_wait, cl);
368 
369 		if (atomic_read(&dc->writeback_sequence_next) == io->sequence) {
370 			/*
371 			 * Edge case-- it happened in indeterminate order
372 			 * relative to when we were added to wait list..
373 			 */
374 			closure_wake_up(&dc->writeback_ordering_wait);
375 		}
376 
377 		continue_at(cl, write_dirty, io->dc->writeback_write_wq);
378 		return;
379 	}
380 
381 	next_sequence = io->sequence + 1;
382 
383 	/*
384 	 * IO errors are signalled using the dirty bit on the key.
385 	 * If we failed to read, we should not attempt to write to the
386 	 * backing device.  Instead, immediately go to write_dirty_finish
387 	 * to clean up.
388 	 */
389 	if (KEY_DIRTY(&w->key)) {
390 		dirty_init(w);
391 		bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
392 		io->bio.bi_iter.bi_sector = KEY_START(&w->key);
393 		bio_set_dev(&io->bio, io->dc->bdev);
394 		io->bio.bi_end_io	= dirty_endio;
395 
396 		/* I/O request sent to backing device */
397 		closure_bio_submit(io->dc->disk.c, &io->bio, cl);
398 	}
399 
400 	atomic_set(&dc->writeback_sequence_next, next_sequence);
401 	closure_wake_up(&dc->writeback_ordering_wait);
402 
403 	continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
404 }
405 
read_dirty_endio(struct bio * bio)406 static void read_dirty_endio(struct bio *bio)
407 {
408 	struct keybuf_key *w = bio->bi_private;
409 	struct dirty_io *io = w->private;
410 
411 	/* is_read = 1 */
412 	bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
413 			    bio->bi_status, 1,
414 			    "reading dirty data from cache");
415 
416 	dirty_endio(bio);
417 }
418 
read_dirty_submit(struct closure * cl)419 static void read_dirty_submit(struct closure *cl)
420 {
421 	struct dirty_io *io = container_of(cl, struct dirty_io, cl);
422 
423 	closure_bio_submit(io->dc->disk.c, &io->bio, cl);
424 
425 	continue_at(cl, write_dirty, io->dc->writeback_write_wq);
426 }
427 
read_dirty(struct cached_dev * dc)428 static void read_dirty(struct cached_dev *dc)
429 {
430 	unsigned int delay = 0;
431 	struct keybuf_key *next, *keys[MAX_WRITEBACKS_IN_PASS], *w;
432 	size_t size;
433 	int nk, i;
434 	struct dirty_io *io;
435 	struct closure cl;
436 	uint16_t sequence = 0;
437 
438 	BUG_ON(!llist_empty(&dc->writeback_ordering_wait.list));
439 	atomic_set(&dc->writeback_sequence_next, sequence);
440 	closure_init_stack(&cl);
441 
442 	/*
443 	 * XXX: if we error, background writeback just spins. Should use some
444 	 * mempools.
445 	 */
446 
447 	next = bch_keybuf_next(&dc->writeback_keys);
448 
449 	while (!kthread_should_stop() &&
450 	       !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
451 	       next) {
452 		size = 0;
453 		nk = 0;
454 
455 		do {
456 			BUG_ON(ptr_stale(dc->disk.c, &next->key, 0));
457 
458 			/*
459 			 * Don't combine too many operations, even if they
460 			 * are all small.
461 			 */
462 			if (nk >= MAX_WRITEBACKS_IN_PASS)
463 				break;
464 
465 			/*
466 			 * If the current operation is very large, don't
467 			 * further combine operations.
468 			 */
469 			if (size >= MAX_WRITESIZE_IN_PASS)
470 				break;
471 
472 			/*
473 			 * Operations are only eligible to be combined
474 			 * if they are contiguous.
475 			 *
476 			 * TODO: add a heuristic willing to fire a
477 			 * certain amount of non-contiguous IO per pass,
478 			 * so that we can benefit from backing device
479 			 * command queueing.
480 			 */
481 			if ((nk != 0) && bkey_cmp(&keys[nk-1]->key,
482 						&START_KEY(&next->key)))
483 				break;
484 
485 			size += KEY_SIZE(&next->key);
486 			keys[nk++] = next;
487 		} while ((next = bch_keybuf_next(&dc->writeback_keys)));
488 
489 		/* Now we have gathered a set of 1..5 keys to write back. */
490 		for (i = 0; i < nk; i++) {
491 			w = keys[i];
492 
493 			io = kzalloc(struct_size(io, bio.bi_inline_vecs,
494 						DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS)),
495 				     GFP_KERNEL);
496 			if (!io)
497 				goto err;
498 
499 			w->private	= io;
500 			io->dc		= dc;
501 			io->sequence    = sequence++;
502 
503 			dirty_init(w);
504 			bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
505 			io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
506 			bio_set_dev(&io->bio,
507 				    PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
508 			io->bio.bi_end_io	= read_dirty_endio;
509 
510 			if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
511 				goto err_free;
512 
513 			trace_bcache_writeback(&w->key);
514 
515 			down(&dc->in_flight);
516 
517 			/*
518 			 * We've acquired a semaphore for the maximum
519 			 * simultaneous number of writebacks; from here
520 			 * everything happens asynchronously.
521 			 */
522 			closure_call(&io->cl, read_dirty_submit, NULL, &cl);
523 		}
524 
525 		delay = writeback_delay(dc, size);
526 
527 		while (!kthread_should_stop() &&
528 		       !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) &&
529 		       delay) {
530 			schedule_timeout_interruptible(delay);
531 			delay = writeback_delay(dc, 0);
532 		}
533 	}
534 
535 	if (0) {
536 err_free:
537 		kfree(w->private);
538 err:
539 		bch_keybuf_del(&dc->writeback_keys, w);
540 	}
541 
542 	/*
543 	 * Wait for outstanding writeback IOs to finish (and keybuf slots to be
544 	 * freed) before refilling again
545 	 */
546 	closure_sync(&cl);
547 }
548 
549 /* Scan for dirty data */
550 
bcache_dev_sectors_dirty_add(struct cache_set * c,unsigned int inode,uint64_t offset,int nr_sectors)551 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
552 				  uint64_t offset, int nr_sectors)
553 {
554 	struct bcache_device *d = c->devices[inode];
555 	unsigned int stripe_offset, sectors_dirty;
556 	int stripe;
557 
558 	if (!d)
559 		return;
560 
561 	stripe = offset_to_stripe(d, offset);
562 	if (stripe < 0)
563 		return;
564 
565 	if (UUID_FLASH_ONLY(&c->uuids[inode]))
566 		atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
567 
568 	stripe_offset = offset & (d->stripe_size - 1);
569 
570 	while (nr_sectors) {
571 		int s = min_t(unsigned int, abs(nr_sectors),
572 			      d->stripe_size - stripe_offset);
573 
574 		if (nr_sectors < 0)
575 			s = -s;
576 
577 		if (stripe >= d->nr_stripes)
578 			return;
579 
580 		sectors_dirty = atomic_add_return(s,
581 					d->stripe_sectors_dirty + stripe);
582 		if (sectors_dirty == d->stripe_size)
583 			set_bit(stripe, d->full_dirty_stripes);
584 		else
585 			clear_bit(stripe, d->full_dirty_stripes);
586 
587 		nr_sectors -= s;
588 		stripe_offset = 0;
589 		stripe++;
590 	}
591 }
592 
dirty_pred(struct keybuf * buf,struct bkey * k)593 static bool dirty_pred(struct keybuf *buf, struct bkey *k)
594 {
595 	struct cached_dev *dc = container_of(buf,
596 					     struct cached_dev,
597 					     writeback_keys);
598 
599 	BUG_ON(KEY_INODE(k) != dc->disk.id);
600 
601 	return KEY_DIRTY(k);
602 }
603 
refill_full_stripes(struct cached_dev * dc)604 static void refill_full_stripes(struct cached_dev *dc)
605 {
606 	struct keybuf *buf = &dc->writeback_keys;
607 	unsigned int start_stripe, next_stripe;
608 	int stripe;
609 	bool wrapped = false;
610 
611 	stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
612 	if (stripe < 0)
613 		stripe = 0;
614 
615 	start_stripe = stripe;
616 
617 	while (1) {
618 		stripe = find_next_bit(dc->disk.full_dirty_stripes,
619 				       dc->disk.nr_stripes, stripe);
620 
621 		if (stripe == dc->disk.nr_stripes)
622 			goto next;
623 
624 		next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
625 						 dc->disk.nr_stripes, stripe);
626 
627 		buf->last_scanned = KEY(dc->disk.id,
628 					stripe * dc->disk.stripe_size, 0);
629 
630 		bch_refill_keybuf(dc->disk.c, buf,
631 				  &KEY(dc->disk.id,
632 				       next_stripe * dc->disk.stripe_size, 0),
633 				  dirty_pred);
634 
635 		if (array_freelist_empty(&buf->freelist))
636 			return;
637 
638 		stripe = next_stripe;
639 next:
640 		if (wrapped && stripe > start_stripe)
641 			return;
642 
643 		if (stripe == dc->disk.nr_stripes) {
644 			stripe = 0;
645 			wrapped = true;
646 		}
647 	}
648 }
649 
650 /*
651  * Returns true if we scanned the entire disk
652  */
refill_dirty(struct cached_dev * dc)653 static bool refill_dirty(struct cached_dev *dc)
654 {
655 	struct keybuf *buf = &dc->writeback_keys;
656 	struct bkey start = KEY(dc->disk.id, 0, 0);
657 	struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
658 	struct bkey start_pos;
659 
660 	/*
661 	 * make sure keybuf pos is inside the range for this disk - at bringup
662 	 * we might not be attached yet so this disk's inode nr isn't
663 	 * initialized then
664 	 */
665 	if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
666 	    bkey_cmp(&buf->last_scanned, &end) > 0)
667 		buf->last_scanned = start;
668 
669 	if (dc->partial_stripes_expensive) {
670 		refill_full_stripes(dc);
671 		if (array_freelist_empty(&buf->freelist))
672 			return false;
673 	}
674 
675 	start_pos = buf->last_scanned;
676 	bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
677 
678 	if (bkey_cmp(&buf->last_scanned, &end) < 0)
679 		return false;
680 
681 	/*
682 	 * If we get to the end start scanning again from the beginning, and
683 	 * only scan up to where we initially started scanning from:
684 	 */
685 	buf->last_scanned = start;
686 	bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
687 
688 	return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
689 }
690 
bch_writeback_thread(void * arg)691 static int bch_writeback_thread(void *arg)
692 {
693 	struct cached_dev *dc = arg;
694 	struct cache_set *c = dc->disk.c;
695 	bool searched_full_index;
696 
697 	bch_ratelimit_reset(&dc->writeback_rate);
698 
699 	while (!kthread_should_stop() &&
700 	       !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
701 		down_write(&dc->writeback_lock);
702 		set_current_state(TASK_INTERRUPTIBLE);
703 		/*
704 		 * If the bache device is detaching, skip here and continue
705 		 * to perform writeback. Otherwise, if no dirty data on cache,
706 		 * or there is dirty data on cache but writeback is disabled,
707 		 * the writeback thread should sleep here and wait for others
708 		 * to wake up it.
709 		 */
710 		if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
711 		    (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) {
712 			up_write(&dc->writeback_lock);
713 
714 			if (kthread_should_stop() ||
715 			    test_bit(CACHE_SET_IO_DISABLE, &c->flags)) {
716 				set_current_state(TASK_RUNNING);
717 				break;
718 			}
719 
720 			schedule();
721 			continue;
722 		}
723 		set_current_state(TASK_RUNNING);
724 
725 		searched_full_index = refill_dirty(dc);
726 
727 		if (searched_full_index &&
728 		    RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
729 			atomic_set(&dc->has_dirty, 0);
730 			SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
731 			bch_write_bdev_super(dc, NULL);
732 			/*
733 			 * If bcache device is detaching via sysfs interface,
734 			 * writeback thread should stop after there is no dirty
735 			 * data on cache. BCACHE_DEV_DETACHING flag is set in
736 			 * bch_cached_dev_detach().
737 			 */
738 			if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
739 				up_write(&dc->writeback_lock);
740 				break;
741 			}
742 
743 			/*
744 			 * When dirty data rate is high (e.g. 50%+), there might
745 			 * be heavy buckets fragmentation after writeback
746 			 * finished, which hurts following write performance.
747 			 * If users really care about write performance they
748 			 * may set BCH_ENABLE_AUTO_GC via sysfs, then when
749 			 * BCH_DO_AUTO_GC is set, garbage collection thread
750 			 * will be wake up here. After moving gc, the shrunk
751 			 * btree and discarded free buckets SSD space may be
752 			 * helpful for following write requests.
753 			 */
754 			if (c->gc_after_writeback ==
755 			    (BCH_ENABLE_AUTO_GC|BCH_DO_AUTO_GC)) {
756 				c->gc_after_writeback &= ~BCH_DO_AUTO_GC;
757 				force_wake_up_gc(c);
758 			}
759 		}
760 
761 		up_write(&dc->writeback_lock);
762 
763 		read_dirty(dc);
764 
765 		if (searched_full_index) {
766 			unsigned int delay = dc->writeback_delay * HZ;
767 
768 			while (delay &&
769 			       !kthread_should_stop() &&
770 			       !test_bit(CACHE_SET_IO_DISABLE, &c->flags) &&
771 			       !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
772 				delay = schedule_timeout_interruptible(delay);
773 
774 			bch_ratelimit_reset(&dc->writeback_rate);
775 		}
776 	}
777 
778 	if (dc->writeback_write_wq) {
779 		flush_workqueue(dc->writeback_write_wq);
780 		destroy_workqueue(dc->writeback_write_wq);
781 	}
782 	cached_dev_put(dc);
783 	wait_for_kthread_stop();
784 
785 	return 0;
786 }
787 
788 /* Init */
789 #define INIT_KEYS_EACH_TIME	500000
790 
791 struct sectors_dirty_init {
792 	struct btree_op	op;
793 	unsigned int	inode;
794 	size_t		count;
795 };
796 
sectors_dirty_init_fn(struct btree_op * _op,struct btree * b,struct bkey * k)797 static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
798 				 struct bkey *k)
799 {
800 	struct sectors_dirty_init *op = container_of(_op,
801 						struct sectors_dirty_init, op);
802 	if (KEY_INODE(k) > op->inode)
803 		return MAP_DONE;
804 
805 	if (KEY_DIRTY(k))
806 		bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
807 					     KEY_START(k), KEY_SIZE(k));
808 
809 	op->count++;
810 	if (!(op->count % INIT_KEYS_EACH_TIME))
811 		cond_resched();
812 
813 	return MAP_CONTINUE;
814 }
815 
bch_root_node_dirty_init(struct cache_set * c,struct bcache_device * d,struct bkey * k)816 static int bch_root_node_dirty_init(struct cache_set *c,
817 				     struct bcache_device *d,
818 				     struct bkey *k)
819 {
820 	struct sectors_dirty_init op;
821 	int ret;
822 
823 	bch_btree_op_init(&op.op, -1);
824 	op.inode = d->id;
825 	op.count = 0;
826 
827 	ret = bcache_btree(map_keys_recurse,
828 			   k,
829 			   c->root,
830 			   &op.op,
831 			   &KEY(op.inode, 0, 0),
832 			   sectors_dirty_init_fn,
833 			   0);
834 	if (ret < 0)
835 		pr_warn("sectors dirty init failed, ret=%d!\n", ret);
836 
837 	/*
838 	 * The op may be added to cache_set's btree_cache_wait
839 	 * in mca_cannibalize(), must ensure it is removed from
840 	 * the list and release btree_cache_alloc_lock before
841 	 * free op memory.
842 	 * Otherwise, the btree_cache_wait will be damaged.
843 	 */
844 	bch_cannibalize_unlock(c);
845 	finish_wait(&c->btree_cache_wait, &(&op.op)->wait);
846 
847 	return ret;
848 }
849 
bch_dirty_init_thread(void * arg)850 static int bch_dirty_init_thread(void *arg)
851 {
852 	struct dirty_init_thrd_info *info = arg;
853 	struct bch_dirty_init_state *state = info->state;
854 	struct cache_set *c = state->c;
855 	struct btree_iter iter;
856 	struct bkey *k, *p;
857 	int cur_idx, prev_idx, skip_nr;
858 
859 	k = p = NULL;
860 	prev_idx = 0;
861 
862 	bch_btree_iter_init(&c->root->keys, &iter, NULL);
863 	k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
864 	BUG_ON(!k);
865 
866 	p = k;
867 
868 	while (k) {
869 		spin_lock(&state->idx_lock);
870 		cur_idx = state->key_idx;
871 		state->key_idx++;
872 		spin_unlock(&state->idx_lock);
873 
874 		skip_nr = cur_idx - prev_idx;
875 
876 		while (skip_nr) {
877 			k = bch_btree_iter_next_filter(&iter,
878 						       &c->root->keys,
879 						       bch_ptr_bad);
880 			if (k)
881 				p = k;
882 			else {
883 				atomic_set(&state->enough, 1);
884 				/* Update state->enough earlier */
885 				smp_mb__after_atomic();
886 				goto out;
887 			}
888 			skip_nr--;
889 		}
890 
891 		if (p) {
892 			if (bch_root_node_dirty_init(c, state->d, p) < 0)
893 				goto out;
894 		}
895 
896 		p = NULL;
897 		prev_idx = cur_idx;
898 	}
899 
900 out:
901 	/* In order to wake up state->wait in time */
902 	smp_mb__before_atomic();
903 	if (atomic_dec_and_test(&state->started))
904 		wake_up(&state->wait);
905 
906 	return 0;
907 }
908 
bch_btre_dirty_init_thread_nr(void)909 static int bch_btre_dirty_init_thread_nr(void)
910 {
911 	int n = num_online_cpus()/2;
912 
913 	if (n == 0)
914 		n = 1;
915 	else if (n > BCH_DIRTY_INIT_THRD_MAX)
916 		n = BCH_DIRTY_INIT_THRD_MAX;
917 
918 	return n;
919 }
920 
bch_sectors_dirty_init(struct bcache_device * d)921 void bch_sectors_dirty_init(struct bcache_device *d)
922 {
923 	int i;
924 	struct btree *b = NULL;
925 	struct bkey *k = NULL;
926 	struct btree_iter iter;
927 	struct sectors_dirty_init op;
928 	struct cache_set *c = d->c;
929 	struct bch_dirty_init_state state;
930 
931 retry_lock:
932 	b = c->root;
933 	rw_lock(0, b, b->level);
934 	if (b != c->root) {
935 		rw_unlock(0, b);
936 		goto retry_lock;
937 	}
938 
939 	/* Just count root keys if no leaf node */
940 	if (c->root->level == 0) {
941 		bch_btree_op_init(&op.op, -1);
942 		op.inode = d->id;
943 		op.count = 0;
944 
945 		for_each_key_filter(&c->root->keys,
946 				    k, &iter, bch_ptr_invalid) {
947 			if (KEY_INODE(k) != op.inode)
948 				continue;
949 			sectors_dirty_init_fn(&op.op, c->root, k);
950 		}
951 
952 		rw_unlock(0, b);
953 		return;
954 	}
955 
956 	memset(&state, 0, sizeof(struct bch_dirty_init_state));
957 	state.c = c;
958 	state.d = d;
959 	state.total_threads = bch_btre_dirty_init_thread_nr();
960 	state.key_idx = 0;
961 	spin_lock_init(&state.idx_lock);
962 	atomic_set(&state.started, 0);
963 	atomic_set(&state.enough, 0);
964 	init_waitqueue_head(&state.wait);
965 
966 	for (i = 0; i < state.total_threads; i++) {
967 		/* Fetch latest state.enough earlier */
968 		smp_mb__before_atomic();
969 		if (atomic_read(&state.enough))
970 			break;
971 
972 		atomic_inc(&state.started);
973 		state.infos[i].state = &state;
974 		state.infos[i].thread =
975 			kthread_run(bch_dirty_init_thread, &state.infos[i],
976 				    "bch_dirtcnt[%d]", i);
977 		if (IS_ERR(state.infos[i].thread)) {
978 			pr_err("fails to run thread bch_dirty_init[%d]\n", i);
979 			atomic_dec(&state.started);
980 			for (--i; i >= 0; i--)
981 				kthread_stop(state.infos[i].thread);
982 			goto out;
983 		}
984 	}
985 
986 out:
987 	/* Must wait for all threads to stop. */
988 	wait_event(state.wait, atomic_read(&state.started) == 0);
989 	rw_unlock(0, b);
990 }
991 
bch_cached_dev_writeback_init(struct cached_dev * dc)992 void bch_cached_dev_writeback_init(struct cached_dev *dc)
993 {
994 	sema_init(&dc->in_flight, 64);
995 	init_rwsem(&dc->writeback_lock);
996 	bch_keybuf_init(&dc->writeback_keys);
997 
998 	dc->writeback_metadata		= true;
999 	dc->writeback_running		= false;
1000 	dc->writeback_percent		= 10;
1001 	dc->writeback_delay		= 30;
1002 	atomic_long_set(&dc->writeback_rate.rate, 1024);
1003 	dc->writeback_rate_minimum	= 8;
1004 
1005 	dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT;
1006 	dc->writeback_rate_p_term_inverse = 40;
1007 	dc->writeback_rate_i_term_inverse = 10000;
1008 
1009 	WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1010 	INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
1011 }
1012 
bch_cached_dev_writeback_start(struct cached_dev * dc)1013 int bch_cached_dev_writeback_start(struct cached_dev *dc)
1014 {
1015 	dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
1016 						WQ_MEM_RECLAIM, 0);
1017 	if (!dc->writeback_write_wq)
1018 		return -ENOMEM;
1019 
1020 	cached_dev_get(dc);
1021 	dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
1022 					      "bcache_writeback");
1023 	if (IS_ERR(dc->writeback_thread)) {
1024 		cached_dev_put(dc);
1025 		destroy_workqueue(dc->writeback_write_wq);
1026 		return PTR_ERR(dc->writeback_thread);
1027 	}
1028 	dc->writeback_running = true;
1029 
1030 	WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags));
1031 	schedule_delayed_work(&dc->writeback_rate_update,
1032 			      dc->writeback_rate_update_seconds * HZ);
1033 
1034 	bch_writeback_queue(dc);
1035 
1036 	return 0;
1037 }
1038