1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8 #include "bcache.h"
9 #include "btree.h"
10 #include "debug.h"
11 #include "extents.h"
12
13 #include <trace/events/bcache.h>
14
15 /*
16 * Journal replay/recovery:
17 *
18 * This code is all driven from run_cache_set(); we first read the journal
19 * entries, do some other stuff, then we mark all the keys in the journal
20 * entries (same as garbage collection would), then we replay them - reinserting
21 * them into the cache in precisely the same order as they appear in the
22 * journal.
23 *
24 * We only journal keys that go in leaf nodes, which simplifies things quite a
25 * bit.
26 */
27
journal_read_endio(struct bio * bio)28 static void journal_read_endio(struct bio *bio)
29 {
30 struct closure *cl = bio->bi_private;
31
32 closure_put(cl);
33 }
34
journal_read_bucket(struct cache * ca,struct list_head * list,unsigned int bucket_index)35 static int journal_read_bucket(struct cache *ca, struct list_head *list,
36 unsigned int bucket_index)
37 {
38 struct journal_device *ja = &ca->journal;
39 struct bio *bio = &ja->bio;
40
41 struct journal_replay *i;
42 struct jset *j, *data = ca->set->journal.w[0].data;
43 struct closure cl;
44 unsigned int len, left, offset = 0;
45 int ret = 0;
46 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
47
48 closure_init_stack(&cl);
49
50 pr_debug("reading %u\n", bucket_index);
51
52 while (offset < ca->sb.bucket_size) {
53 reread: left = ca->sb.bucket_size - offset;
54 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
55
56 bio_reset(bio);
57 bio->bi_iter.bi_sector = bucket + offset;
58 bio_set_dev(bio, ca->bdev);
59 bio->bi_iter.bi_size = len << 9;
60
61 bio->bi_end_io = journal_read_endio;
62 bio->bi_private = &cl;
63 bio_set_op_attrs(bio, REQ_OP_READ, 0);
64 bch_bio_map(bio, data);
65
66 closure_bio_submit(ca->set, bio, &cl);
67 closure_sync(&cl);
68
69 /* This function could be simpler now since we no longer write
70 * journal entries that overlap bucket boundaries; this means
71 * the start of a bucket will always have a valid journal entry
72 * if it has any journal entries at all.
73 */
74
75 j = data;
76 while (len) {
77 struct list_head *where;
78 size_t blocks, bytes = set_bytes(j);
79
80 if (j->magic != jset_magic(&ca->sb)) {
81 pr_debug("%u: bad magic\n", bucket_index);
82 return ret;
83 }
84
85 if (bytes > left << 9 ||
86 bytes > PAGE_SIZE << JSET_BITS) {
87 pr_info("%u: too big, %zu bytes, offset %u\n",
88 bucket_index, bytes, offset);
89 return ret;
90 }
91
92 if (bytes > len << 9)
93 goto reread;
94
95 if (j->csum != csum_set(j)) {
96 pr_info("%u: bad csum, %zu bytes, offset %u\n",
97 bucket_index, bytes, offset);
98 return ret;
99 }
100
101 blocks = set_blocks(j, block_bytes(ca));
102
103 /*
104 * Nodes in 'list' are in linear increasing order of
105 * i->j.seq, the node on head has the smallest (oldest)
106 * journal seq, the node on tail has the biggest
107 * (latest) journal seq.
108 */
109
110 /*
111 * Check from the oldest jset for last_seq. If
112 * i->j.seq < j->last_seq, it means the oldest jset
113 * in list is expired and useless, remove it from
114 * this list. Otherwise, j is a condidate jset for
115 * further following checks.
116 */
117 while (!list_empty(list)) {
118 i = list_first_entry(list,
119 struct journal_replay, list);
120 if (i->j.seq >= j->last_seq)
121 break;
122 list_del(&i->list);
123 kfree(i);
124 }
125
126 /* iterate list in reverse order (from latest jset) */
127 list_for_each_entry_reverse(i, list, list) {
128 if (j->seq == i->j.seq)
129 goto next_set;
130
131 /*
132 * if j->seq is less than any i->j.last_seq
133 * in list, j is an expired and useless jset.
134 */
135 if (j->seq < i->j.last_seq)
136 goto next_set;
137
138 /*
139 * 'where' points to first jset in list which
140 * is elder then j.
141 */
142 if (j->seq > i->j.seq) {
143 where = &i->list;
144 goto add;
145 }
146 }
147
148 where = list;
149 add:
150 i = kmalloc(offsetof(struct journal_replay, j) +
151 bytes, GFP_KERNEL);
152 if (!i)
153 return -ENOMEM;
154 memcpy(&i->j, j, bytes);
155 /* Add to the location after 'where' points to */
156 list_add(&i->list, where);
157 ret = 1;
158
159 if (j->seq > ja->seq[bucket_index])
160 ja->seq[bucket_index] = j->seq;
161 next_set:
162 offset += blocks * ca->sb.block_size;
163 len -= blocks * ca->sb.block_size;
164 j = ((void *) j) + blocks * block_bytes(ca);
165 }
166 }
167
168 return ret;
169 }
170
bch_journal_read(struct cache_set * c,struct list_head * list)171 int bch_journal_read(struct cache_set *c, struct list_head *list)
172 {
173 #define read_bucket(b) \
174 ({ \
175 ret = journal_read_bucket(ca, list, b); \
176 __set_bit(b, bitmap); \
177 if (ret < 0) \
178 return ret; \
179 ret; \
180 })
181
182 struct cache *ca = c->cache;
183 int ret = 0;
184 struct journal_device *ja = &ca->journal;
185 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
186 unsigned int i, l, r, m;
187 uint64_t seq;
188
189 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
190 pr_debug("%u journal buckets\n", ca->sb.njournal_buckets);
191
192 /*
193 * Read journal buckets ordered by golden ratio hash to quickly
194 * find a sequence of buckets with valid journal entries
195 */
196 for (i = 0; i < ca->sb.njournal_buckets; i++) {
197 /*
198 * We must try the index l with ZERO first for
199 * correctness due to the scenario that the journal
200 * bucket is circular buffer which might have wrapped
201 */
202 l = (i * 2654435769U) % ca->sb.njournal_buckets;
203
204 if (test_bit(l, bitmap))
205 break;
206
207 if (read_bucket(l))
208 goto bsearch;
209 }
210
211 /*
212 * If that fails, check all the buckets we haven't checked
213 * already
214 */
215 pr_debug("falling back to linear search\n");
216
217 for_each_clear_bit(l, bitmap, ca->sb.njournal_buckets)
218 if (read_bucket(l))
219 goto bsearch;
220
221 /* no journal entries on this device? */
222 if (l == ca->sb.njournal_buckets)
223 goto out;
224 bsearch:
225 BUG_ON(list_empty(list));
226
227 /* Binary search */
228 m = l;
229 r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
230 pr_debug("starting binary search, l %u r %u\n", l, r);
231
232 while (l + 1 < r) {
233 seq = list_entry(list->prev, struct journal_replay,
234 list)->j.seq;
235
236 m = (l + r) >> 1;
237 read_bucket(m);
238
239 if (seq != list_entry(list->prev, struct journal_replay,
240 list)->j.seq)
241 l = m;
242 else
243 r = m;
244 }
245
246 /*
247 * Read buckets in reverse order until we stop finding more
248 * journal entries
249 */
250 pr_debug("finishing up: m %u njournal_buckets %u\n",
251 m, ca->sb.njournal_buckets);
252 l = m;
253
254 while (1) {
255 if (!l--)
256 l = ca->sb.njournal_buckets - 1;
257
258 if (l == m)
259 break;
260
261 if (test_bit(l, bitmap))
262 continue;
263
264 if (!read_bucket(l))
265 break;
266 }
267
268 seq = 0;
269
270 for (i = 0; i < ca->sb.njournal_buckets; i++)
271 if (ja->seq[i] > seq) {
272 seq = ja->seq[i];
273 /*
274 * When journal_reclaim() goes to allocate for
275 * the first time, it'll use the bucket after
276 * ja->cur_idx
277 */
278 ja->cur_idx = i;
279 ja->last_idx = ja->discard_idx = (i + 1) %
280 ca->sb.njournal_buckets;
281
282 }
283
284 out:
285 if (!list_empty(list))
286 c->journal.seq = list_entry(list->prev,
287 struct journal_replay,
288 list)->j.seq;
289
290 return 0;
291 #undef read_bucket
292 }
293
bch_journal_mark(struct cache_set * c,struct list_head * list)294 void bch_journal_mark(struct cache_set *c, struct list_head *list)
295 {
296 atomic_t p = { 0 };
297 struct bkey *k;
298 struct journal_replay *i;
299 struct journal *j = &c->journal;
300 uint64_t last = j->seq;
301
302 /*
303 * journal.pin should never fill up - we never write a journal
304 * entry when it would fill up. But if for some reason it does, we
305 * iterate over the list in reverse order so that we can just skip that
306 * refcount instead of bugging.
307 */
308
309 list_for_each_entry_reverse(i, list, list) {
310 BUG_ON(last < i->j.seq);
311 i->pin = NULL;
312
313 while (last-- != i->j.seq)
314 if (fifo_free(&j->pin) > 1) {
315 fifo_push_front(&j->pin, p);
316 atomic_set(&fifo_front(&j->pin), 0);
317 }
318
319 if (fifo_free(&j->pin) > 1) {
320 fifo_push_front(&j->pin, p);
321 i->pin = &fifo_front(&j->pin);
322 atomic_set(i->pin, 1);
323 }
324
325 for (k = i->j.start;
326 k < bset_bkey_last(&i->j);
327 k = bkey_next(k))
328 if (!__bch_extent_invalid(c, k)) {
329 unsigned int j;
330
331 for (j = 0; j < KEY_PTRS(k); j++)
332 if (ptr_available(c, k, j))
333 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
334
335 bch_initial_mark_key(c, 0, k);
336 }
337 }
338 }
339
is_discard_enabled(struct cache_set * s)340 static bool is_discard_enabled(struct cache_set *s)
341 {
342 struct cache *ca = s->cache;
343
344 if (ca->discard)
345 return true;
346
347 return false;
348 }
349
bch_journal_replay(struct cache_set * s,struct list_head * list)350 int bch_journal_replay(struct cache_set *s, struct list_head *list)
351 {
352 int ret = 0, keys = 0, entries = 0;
353 struct bkey *k;
354 struct journal_replay *i =
355 list_entry(list->prev, struct journal_replay, list);
356
357 uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
358 struct keylist keylist;
359
360 list_for_each_entry(i, list, list) {
361 BUG_ON(i->pin && atomic_read(i->pin) != 1);
362
363 if (n != i->j.seq) {
364 if (n == start && is_discard_enabled(s))
365 pr_info("journal entries %llu-%llu may be discarded! (replaying %llu-%llu)\n",
366 n, i->j.seq - 1, start, end);
367 else {
368 pr_err("journal entries %llu-%llu missing! (replaying %llu-%llu)\n",
369 n, i->j.seq - 1, start, end);
370 ret = -EIO;
371 goto err;
372 }
373 }
374
375 for (k = i->j.start;
376 k < bset_bkey_last(&i->j);
377 k = bkey_next(k)) {
378 trace_bcache_journal_replay_key(k);
379
380 bch_keylist_init_single(&keylist, k);
381
382 ret = bch_btree_insert(s, &keylist, i->pin, NULL);
383 if (ret)
384 goto err;
385
386 BUG_ON(!bch_keylist_empty(&keylist));
387 keys++;
388
389 cond_resched();
390 }
391
392 if (i->pin)
393 atomic_dec(i->pin);
394 n = i->j.seq + 1;
395 entries++;
396 }
397
398 pr_info("journal replay done, %i keys in %i entries, seq %llu\n",
399 keys, entries, end);
400 err:
401 while (!list_empty(list)) {
402 i = list_first_entry(list, struct journal_replay, list);
403 list_del(&i->list);
404 kfree(i);
405 }
406
407 return ret;
408 }
409
bch_journal_space_reserve(struct journal * j)410 void bch_journal_space_reserve(struct journal *j)
411 {
412 j->do_reserve = true;
413 }
414
415 /* Journalling */
416
btree_flush_write(struct cache_set * c)417 static void btree_flush_write(struct cache_set *c)
418 {
419 struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
420 unsigned int i, nr;
421 int ref_nr;
422 atomic_t *fifo_front_p, *now_fifo_front_p;
423 size_t mask;
424
425 if (c->journal.btree_flushing)
426 return;
427
428 spin_lock(&c->journal.flush_write_lock);
429 if (c->journal.btree_flushing) {
430 spin_unlock(&c->journal.flush_write_lock);
431 return;
432 }
433 c->journal.btree_flushing = true;
434 spin_unlock(&c->journal.flush_write_lock);
435
436 /* get the oldest journal entry and check its refcount */
437 spin_lock(&c->journal.lock);
438 fifo_front_p = &fifo_front(&c->journal.pin);
439 ref_nr = atomic_read(fifo_front_p);
440 if (ref_nr <= 0) {
441 /*
442 * do nothing if no btree node references
443 * the oldest journal entry
444 */
445 spin_unlock(&c->journal.lock);
446 goto out;
447 }
448 spin_unlock(&c->journal.lock);
449
450 mask = c->journal.pin.mask;
451 nr = 0;
452 atomic_long_inc(&c->flush_write);
453 memset(btree_nodes, 0, sizeof(btree_nodes));
454
455 mutex_lock(&c->bucket_lock);
456 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
457 /*
458 * It is safe to get now_fifo_front_p without holding
459 * c->journal.lock here, because we don't need to know
460 * the exactly accurate value, just check whether the
461 * front pointer of c->journal.pin is changed.
462 */
463 now_fifo_front_p = &fifo_front(&c->journal.pin);
464 /*
465 * If the oldest journal entry is reclaimed and front
466 * pointer of c->journal.pin changes, it is unnecessary
467 * to scan c->btree_cache anymore, just quit the loop and
468 * flush out what we have already.
469 */
470 if (now_fifo_front_p != fifo_front_p)
471 break;
472 /*
473 * quit this loop if all matching btree nodes are
474 * scanned and record in btree_nodes[] already.
475 */
476 ref_nr = atomic_read(fifo_front_p);
477 if (nr >= ref_nr)
478 break;
479
480 if (btree_node_journal_flush(b))
481 pr_err("BUG: flush_write bit should not be set here!\n");
482
483 mutex_lock(&b->write_lock);
484
485 if (!btree_node_dirty(b)) {
486 mutex_unlock(&b->write_lock);
487 continue;
488 }
489
490 if (!btree_current_write(b)->journal) {
491 mutex_unlock(&b->write_lock);
492 continue;
493 }
494
495 /*
496 * Only select the btree node which exactly references
497 * the oldest journal entry.
498 *
499 * If the journal entry pointed by fifo_front_p is
500 * reclaimed in parallel, don't worry:
501 * - the list_for_each_xxx loop will quit when checking
502 * next now_fifo_front_p.
503 * - If there are matched nodes recorded in btree_nodes[],
504 * they are clean now (this is why and how the oldest
505 * journal entry can be reclaimed). These selected nodes
506 * will be ignored and skipped in the folowing for-loop.
507 */
508 if (((btree_current_write(b)->journal - fifo_front_p) &
509 mask) != 0) {
510 mutex_unlock(&b->write_lock);
511 continue;
512 }
513
514 set_btree_node_journal_flush(b);
515
516 mutex_unlock(&b->write_lock);
517
518 btree_nodes[nr++] = b;
519 /*
520 * To avoid holding c->bucket_lock too long time,
521 * only scan for BTREE_FLUSH_NR matched btree nodes
522 * at most. If there are more btree nodes reference
523 * the oldest journal entry, try to flush them next
524 * time when btree_flush_write() is called.
525 */
526 if (nr == BTREE_FLUSH_NR)
527 break;
528 }
529 mutex_unlock(&c->bucket_lock);
530
531 for (i = 0; i < nr; i++) {
532 b = btree_nodes[i];
533 if (!b) {
534 pr_err("BUG: btree_nodes[%d] is NULL\n", i);
535 continue;
536 }
537
538 /* safe to check without holding b->write_lock */
539 if (!btree_node_journal_flush(b)) {
540 pr_err("BUG: bnode %p: journal_flush bit cleaned\n", b);
541 continue;
542 }
543
544 mutex_lock(&b->write_lock);
545 if (!btree_current_write(b)->journal) {
546 clear_bit(BTREE_NODE_journal_flush, &b->flags);
547 mutex_unlock(&b->write_lock);
548 pr_debug("bnode %p: written by others\n", b);
549 continue;
550 }
551
552 if (!btree_node_dirty(b)) {
553 clear_bit(BTREE_NODE_journal_flush, &b->flags);
554 mutex_unlock(&b->write_lock);
555 pr_debug("bnode %p: dirty bit cleaned by others\n", b);
556 continue;
557 }
558
559 __bch_btree_node_write(b, NULL);
560 clear_bit(BTREE_NODE_journal_flush, &b->flags);
561 mutex_unlock(&b->write_lock);
562 }
563
564 out:
565 spin_lock(&c->journal.flush_write_lock);
566 c->journal.btree_flushing = false;
567 spin_unlock(&c->journal.flush_write_lock);
568 }
569
570 #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
571
journal_discard_endio(struct bio * bio)572 static void journal_discard_endio(struct bio *bio)
573 {
574 struct journal_device *ja =
575 container_of(bio, struct journal_device, discard_bio);
576 struct cache *ca = container_of(ja, struct cache, journal);
577
578 atomic_set(&ja->discard_in_flight, DISCARD_DONE);
579
580 closure_wake_up(&ca->set->journal.wait);
581 closure_put(&ca->set->cl);
582 }
583
journal_discard_work(struct work_struct * work)584 static void journal_discard_work(struct work_struct *work)
585 {
586 struct journal_device *ja =
587 container_of(work, struct journal_device, discard_work);
588
589 submit_bio(&ja->discard_bio);
590 }
591
do_journal_discard(struct cache * ca)592 static void do_journal_discard(struct cache *ca)
593 {
594 struct journal_device *ja = &ca->journal;
595 struct bio *bio = &ja->discard_bio;
596
597 if (!ca->discard) {
598 ja->discard_idx = ja->last_idx;
599 return;
600 }
601
602 switch (atomic_read(&ja->discard_in_flight)) {
603 case DISCARD_IN_FLIGHT:
604 return;
605
606 case DISCARD_DONE:
607 ja->discard_idx = (ja->discard_idx + 1) %
608 ca->sb.njournal_buckets;
609
610 atomic_set(&ja->discard_in_flight, DISCARD_READY);
611 fallthrough;
612
613 case DISCARD_READY:
614 if (ja->discard_idx == ja->last_idx)
615 return;
616
617 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
618
619 bio_init(bio, bio->bi_inline_vecs, 1);
620 bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
621 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
622 ca->sb.d[ja->discard_idx]);
623 bio_set_dev(bio, ca->bdev);
624 bio->bi_iter.bi_size = bucket_bytes(ca);
625 bio->bi_end_io = journal_discard_endio;
626
627 closure_get(&ca->set->cl);
628 INIT_WORK(&ja->discard_work, journal_discard_work);
629 queue_work(bch_journal_wq, &ja->discard_work);
630 }
631 }
632
free_journal_buckets(struct cache_set * c)633 static unsigned int free_journal_buckets(struct cache_set *c)
634 {
635 struct journal *j = &c->journal;
636 struct cache *ca = c->cache;
637 struct journal_device *ja = &c->cache->journal;
638 unsigned int n;
639
640 /* In case njournal_buckets is not power of 2 */
641 if (ja->cur_idx >= ja->discard_idx)
642 n = ca->sb.njournal_buckets + ja->discard_idx - ja->cur_idx;
643 else
644 n = ja->discard_idx - ja->cur_idx;
645
646 if (n > (1 + j->do_reserve))
647 return n - (1 + j->do_reserve);
648
649 return 0;
650 }
651
journal_reclaim(struct cache_set * c)652 static void journal_reclaim(struct cache_set *c)
653 {
654 struct bkey *k = &c->journal.key;
655 struct cache *ca = c->cache;
656 uint64_t last_seq;
657 struct journal_device *ja = &ca->journal;
658 atomic_t p __maybe_unused;
659
660 atomic_long_inc(&c->reclaim);
661
662 while (!atomic_read(&fifo_front(&c->journal.pin)))
663 fifo_pop(&c->journal.pin, p);
664
665 last_seq = last_seq(&c->journal);
666
667 /* Update last_idx */
668
669 while (ja->last_idx != ja->cur_idx &&
670 ja->seq[ja->last_idx] < last_seq)
671 ja->last_idx = (ja->last_idx + 1) %
672 ca->sb.njournal_buckets;
673
674 do_journal_discard(ca);
675
676 if (c->journal.blocks_free)
677 goto out;
678
679 if (!free_journal_buckets(c))
680 goto out;
681
682 ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
683 k->ptr[0] = MAKE_PTR(0,
684 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
685 ca->sb.nr_this_dev);
686 atomic_long_inc(&c->reclaimed_journal_buckets);
687
688 bkey_init(k);
689 SET_KEY_PTRS(k, 1);
690 c->journal.blocks_free = ca->sb.bucket_size >> c->block_bits;
691
692 out:
693 if (!journal_full(&c->journal))
694 __closure_wake_up(&c->journal.wait);
695 }
696
bch_journal_next(struct journal * j)697 void bch_journal_next(struct journal *j)
698 {
699 atomic_t p = { 1 };
700
701 j->cur = (j->cur == j->w)
702 ? &j->w[1]
703 : &j->w[0];
704
705 /*
706 * The fifo_push() needs to happen at the same time as j->seq is
707 * incremented for last_seq() to be calculated correctly
708 */
709 BUG_ON(!fifo_push(&j->pin, p));
710 atomic_set(&fifo_back(&j->pin), 1);
711
712 j->cur->data->seq = ++j->seq;
713 j->cur->dirty = false;
714 j->cur->need_write = false;
715 j->cur->data->keys = 0;
716
717 if (fifo_full(&j->pin))
718 pr_debug("journal_pin full (%zu)\n", fifo_used(&j->pin));
719 }
720
journal_write_endio(struct bio * bio)721 static void journal_write_endio(struct bio *bio)
722 {
723 struct journal_write *w = bio->bi_private;
724
725 cache_set_err_on(bio->bi_status, w->c, "journal io error");
726 closure_put(&w->c->journal.io);
727 }
728
729 static void journal_write(struct closure *cl);
730
journal_write_done(struct closure * cl)731 static void journal_write_done(struct closure *cl)
732 {
733 struct journal *j = container_of(cl, struct journal, io);
734 struct journal_write *w = (j->cur == j->w)
735 ? &j->w[1]
736 : &j->w[0];
737
738 __closure_wake_up(&w->wait);
739 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
740 }
741
journal_write_unlock(struct closure * cl)742 static void journal_write_unlock(struct closure *cl)
743 __releases(&c->journal.lock)
744 {
745 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
746
747 c->journal.io_in_flight = 0;
748 spin_unlock(&c->journal.lock);
749 }
750
journal_write_unlocked(struct closure * cl)751 static void journal_write_unlocked(struct closure *cl)
752 __releases(c->journal.lock)
753 {
754 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
755 struct cache *ca = c->cache;
756 struct journal_write *w = c->journal.cur;
757 struct bkey *k = &c->journal.key;
758 unsigned int i, sectors = set_blocks(w->data, block_bytes(ca)) *
759 ca->sb.block_size;
760
761 struct bio *bio;
762 struct bio_list list;
763
764 bio_list_init(&list);
765
766 if (!w->need_write) {
767 closure_return_with_destructor(cl, journal_write_unlock);
768 return;
769 } else if (journal_full(&c->journal)) {
770 journal_reclaim(c);
771 spin_unlock(&c->journal.lock);
772
773 btree_flush_write(c);
774 continue_at(cl, journal_write, bch_journal_wq);
775 return;
776 }
777
778 c->journal.blocks_free -= set_blocks(w->data, block_bytes(ca));
779
780 w->data->btree_level = c->root->level;
781
782 bkey_copy(&w->data->btree_root, &c->root->key);
783 bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
784
785 w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
786 w->data->magic = jset_magic(&ca->sb);
787 w->data->version = BCACHE_JSET_VERSION;
788 w->data->last_seq = last_seq(&c->journal);
789 w->data->csum = csum_set(w->data);
790
791 for (i = 0; i < KEY_PTRS(k); i++) {
792 ca = PTR_CACHE(c, k, i);
793 bio = &ca->journal.bio;
794
795 atomic_long_add(sectors, &ca->meta_sectors_written);
796
797 bio_reset(bio);
798 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
799 bio_set_dev(bio, ca->bdev);
800 bio->bi_iter.bi_size = sectors << 9;
801
802 bio->bi_end_io = journal_write_endio;
803 bio->bi_private = w;
804 bio_set_op_attrs(bio, REQ_OP_WRITE,
805 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
806 bch_bio_map(bio, w->data);
807
808 trace_bcache_journal_write(bio, w->data->keys);
809 bio_list_add(&list, bio);
810
811 SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
812
813 ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
814 }
815
816 /* If KEY_PTRS(k) == 0, this jset gets lost in air */
817 BUG_ON(i == 0);
818
819 atomic_dec_bug(&fifo_back(&c->journal.pin));
820 bch_journal_next(&c->journal);
821 journal_reclaim(c);
822
823 spin_unlock(&c->journal.lock);
824
825 while ((bio = bio_list_pop(&list)))
826 closure_bio_submit(c, bio, cl);
827
828 continue_at(cl, journal_write_done, NULL);
829 }
830
journal_write(struct closure * cl)831 static void journal_write(struct closure *cl)
832 {
833 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
834
835 spin_lock(&c->journal.lock);
836 journal_write_unlocked(cl);
837 }
838
journal_try_write(struct cache_set * c)839 static void journal_try_write(struct cache_set *c)
840 __releases(c->journal.lock)
841 {
842 struct closure *cl = &c->journal.io;
843 struct journal_write *w = c->journal.cur;
844
845 w->need_write = true;
846
847 if (!c->journal.io_in_flight) {
848 c->journal.io_in_flight = 1;
849 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
850 } else {
851 spin_unlock(&c->journal.lock);
852 }
853 }
854
journal_wait_for_write(struct cache_set * c,unsigned int nkeys)855 static struct journal_write *journal_wait_for_write(struct cache_set *c,
856 unsigned int nkeys)
857 __acquires(&c->journal.lock)
858 {
859 size_t sectors;
860 struct closure cl;
861 bool wait = false;
862 struct cache *ca = c->cache;
863
864 closure_init_stack(&cl);
865
866 spin_lock(&c->journal.lock);
867
868 while (1) {
869 struct journal_write *w = c->journal.cur;
870
871 sectors = __set_blocks(w->data, w->data->keys + nkeys,
872 block_bytes(ca)) * ca->sb.block_size;
873
874 if (sectors <= min_t(size_t,
875 c->journal.blocks_free * ca->sb.block_size,
876 PAGE_SECTORS << JSET_BITS))
877 return w;
878
879 if (wait)
880 closure_wait(&c->journal.wait, &cl);
881
882 if (!journal_full(&c->journal)) {
883 if (wait)
884 trace_bcache_journal_entry_full(c);
885
886 /*
887 * XXX: If we were inserting so many keys that they
888 * won't fit in an _empty_ journal write, we'll
889 * deadlock. For now, handle this in
890 * bch_keylist_realloc() - but something to think about.
891 */
892 BUG_ON(!w->data->keys);
893
894 journal_try_write(c); /* unlocks */
895 } else {
896 if (wait)
897 trace_bcache_journal_full(c);
898
899 journal_reclaim(c);
900 spin_unlock(&c->journal.lock);
901
902 btree_flush_write(c);
903 }
904
905 closure_sync(&cl);
906 spin_lock(&c->journal.lock);
907 wait = true;
908 }
909 }
910
journal_write_work(struct work_struct * work)911 static void journal_write_work(struct work_struct *work)
912 {
913 struct cache_set *c = container_of(to_delayed_work(work),
914 struct cache_set,
915 journal.work);
916 spin_lock(&c->journal.lock);
917 if (c->journal.cur->dirty)
918 journal_try_write(c);
919 else
920 spin_unlock(&c->journal.lock);
921 }
922
923 /*
924 * Entry point to the journalling code - bio_insert() and btree_invalidate()
925 * pass bch_journal() a list of keys to be journalled, and then
926 * bch_journal() hands those same keys off to btree_insert_async()
927 */
928
bch_journal(struct cache_set * c,struct keylist * keys,struct closure * parent)929 atomic_t *bch_journal(struct cache_set *c,
930 struct keylist *keys,
931 struct closure *parent)
932 {
933 struct journal_write *w;
934 atomic_t *ret;
935
936 /* No journaling if CACHE_SET_IO_DISABLE set already */
937 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags)))
938 return NULL;
939
940 if (!CACHE_SYNC(&c->cache->sb))
941 return NULL;
942
943 w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
944
945 memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
946 w->data->keys += bch_keylist_nkeys(keys);
947
948 ret = &fifo_back(&c->journal.pin);
949 atomic_inc(ret);
950
951 if (parent) {
952 closure_wait(&w->wait, parent);
953 journal_try_write(c);
954 } else if (!w->dirty) {
955 w->dirty = true;
956 queue_delayed_work(bch_flush_wq, &c->journal.work,
957 msecs_to_jiffies(c->journal_delay_ms));
958 spin_unlock(&c->journal.lock);
959 } else {
960 spin_unlock(&c->journal.lock);
961 }
962
963
964 return ret;
965 }
966
bch_journal_meta(struct cache_set * c,struct closure * cl)967 void bch_journal_meta(struct cache_set *c, struct closure *cl)
968 {
969 struct keylist keys;
970 atomic_t *ref;
971
972 bch_keylist_init(&keys);
973
974 ref = bch_journal(c, &keys, cl);
975 if (ref)
976 atomic_dec_bug(ref);
977 }
978
bch_journal_free(struct cache_set * c)979 void bch_journal_free(struct cache_set *c)
980 {
981 free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
982 free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
983 free_fifo(&c->journal.pin);
984 }
985
bch_journal_alloc(struct cache_set * c)986 int bch_journal_alloc(struct cache_set *c)
987 {
988 struct journal *j = &c->journal;
989
990 spin_lock_init(&j->lock);
991 spin_lock_init(&j->flush_write_lock);
992 INIT_DELAYED_WORK(&j->work, journal_write_work);
993
994 c->journal_delay_ms = 100;
995
996 j->w[0].c = c;
997 j->w[1].c = c;
998
999 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1000 !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
1001 !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
1002 return -ENOMEM;
1003
1004 return 0;
1005 }
1006