1 /*
2 * Primary bucket allocation code
3 *
4 * Copyright 2012 Google, Inc.
5 *
6 * Allocation in bcache is done in terms of buckets:
7 *
8 * Each bucket has associated an 8 bit gen; this gen corresponds to the gen in
9 * btree pointers - they must match for the pointer to be considered valid.
10 *
11 * Thus (assuming a bucket has no dirty data or metadata in it) we can reuse a
12 * bucket simply by incrementing its gen.
13 *
14 * The gens (along with the priorities; it's really the gens are important but
15 * the code is named as if it's the priorities) are written in an arbitrary list
16 * of buckets on disk, with a pointer to them in the journal header.
17 *
18 * When we invalidate a bucket, we have to write its new gen to disk and wait
19 * for that write to complete before we use it - otherwise after a crash we
20 * could have pointers that appeared to be good but pointed to data that had
21 * been overwritten.
22 *
23 * Since the gens and priorities are all stored contiguously on disk, we can
24 * batch this up: We fill up the free_inc list with freshly invalidated buckets,
25 * call prio_write(), and when prio_write() finishes we pull buckets off the
26 * free_inc list and optionally discard them.
27 *
28 * free_inc isn't the only freelist - if it was, we'd often to sleep while
29 * priorities and gens were being written before we could allocate. c->free is a
30 * smaller freelist, and buckets on that list are always ready to be used.
31 *
32 * If we've got discards enabled, that happens when a bucket moves from the
33 * free_inc list to the free list.
34 *
35 * There is another freelist, because sometimes we have buckets that we know
36 * have nothing pointing into them - these we can reuse without waiting for
37 * priorities to be rewritten. These come from freed btree nodes and buckets
38 * that garbage collection discovered no longer had valid keys pointing into
39 * them (because they were overwritten). That's the unused list - buckets on the
40 * unused list move to the free list, optionally being discarded in the process.
41 *
42 * It's also important to ensure that gens don't wrap around - with respect to
43 * either the oldest gen in the btree or the gen on disk. This is quite
44 * difficult to do in practice, but we explicitly guard against it anyways - if
45 * a bucket is in danger of wrapping around we simply skip invalidating it that
46 * time around, and we garbage collect or rewrite the priorities sooner than we
47 * would have otherwise.
48 *
49 * bch_bucket_alloc() allocates a single bucket from a specific cache.
50 *
51 * bch_bucket_alloc_set() allocates one or more buckets from different caches
52 * out of a cache set.
53 *
54 * free_some_buckets() drives all the processes described above. It's called
55 * from bch_bucket_alloc() and a few other places that need to make sure free
56 * buckets are ready.
57 *
58 * invalidate_buckets_(lru|fifo)() find buckets that are available to be
59 * invalidated, and then invalidate them and stick them on the free_inc list -
60 * in either lru or fifo order.
61 */
62
63 #include "bcache.h"
64 #include "btree.h"
65
66 #include <linux/blkdev.h>
67 #include <linux/freezer.h>
68 #include <linux/kthread.h>
69 #include <linux/random.h>
70 #include <trace/events/bcache.h>
71
72 /* Bucket heap / gen */
73
bch_inc_gen(struct cache * ca,struct bucket * b)74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
75 {
76 uint8_t ret = ++b->gen;
77
78 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
79 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
80
81 return ret;
82 }
83
bch_rescale_priorities(struct cache_set * c,int sectors)84 void bch_rescale_priorities(struct cache_set *c, int sectors)
85 {
86 struct cache *ca;
87 struct bucket *b;
88 unsigned next = c->nbuckets * c->sb.bucket_size / 1024;
89 unsigned i;
90 int r;
91
92 atomic_sub(sectors, &c->rescale);
93
94 do {
95 r = atomic_read(&c->rescale);
96
97 if (r >= 0)
98 return;
99 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r);
100
101 mutex_lock(&c->bucket_lock);
102
103 c->min_prio = USHRT_MAX;
104
105 for_each_cache(ca, c, i)
106 for_each_bucket(b, ca)
107 if (b->prio &&
108 b->prio != BTREE_PRIO &&
109 !atomic_read(&b->pin)) {
110 b->prio--;
111 c->min_prio = min(c->min_prio, b->prio);
112 }
113
114 mutex_unlock(&c->bucket_lock);
115 }
116
117 /*
118 * Background allocation thread: scans for buckets to be invalidated,
119 * invalidates them, rewrites prios/gens (marking them as invalidated on disk),
120 * then optionally issues discard commands to the newly free buckets, then puts
121 * them on the various freelists.
122 */
123
can_inc_bucket_gen(struct bucket * b)124 static inline bool can_inc_bucket_gen(struct bucket *b)
125 {
126 return bucket_gc_gen(b) < BUCKET_GC_GEN_MAX;
127 }
128
bch_can_invalidate_bucket(struct cache * ca,struct bucket * b)129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
130 {
131 BUG_ON(!ca->set->gc_mark_valid);
132
133 return (!GC_MARK(b) ||
134 GC_MARK(b) == GC_MARK_RECLAIMABLE) &&
135 !atomic_read(&b->pin) &&
136 can_inc_bucket_gen(b);
137 }
138
__bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
140 {
141 lockdep_assert_held(&ca->set->bucket_lock);
142 BUG_ON(GC_MARK(b) && GC_MARK(b) != GC_MARK_RECLAIMABLE);
143
144 if (GC_SECTORS_USED(b))
145 trace_bcache_invalidate(ca, b - ca->buckets);
146
147 bch_inc_gen(ca, b);
148 b->prio = INITIAL_PRIO;
149 atomic_inc(&b->pin);
150 }
151
bch_invalidate_one_bucket(struct cache * ca,struct bucket * b)152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
153 {
154 __bch_invalidate_one_bucket(ca, b);
155
156 fifo_push(&ca->free_inc, b - ca->buckets);
157 }
158
159 /*
160 * Determines what order we're going to reuse buckets, smallest bucket_prio()
161 * first: we also take into account the number of sectors of live data in that
162 * bucket, and in order for that multiply to make sense we have to scale bucket
163 *
164 * Thus, we scale the bucket priorities so that the bucket with the smallest
165 * prio is worth 1/8th of what INITIAL_PRIO is worth.
166 */
167
168 #define bucket_prio(b) \
169 ({ \
170 unsigned min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
171 \
172 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
173 })
174
175 #define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
176 #define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
177
invalidate_buckets_lru(struct cache * ca)178 static void invalidate_buckets_lru(struct cache *ca)
179 {
180 struct bucket *b;
181 ssize_t i;
182
183 ca->heap.used = 0;
184
185 for_each_bucket(b, ca) {
186 if (!bch_can_invalidate_bucket(ca, b))
187 continue;
188
189 if (!heap_full(&ca->heap))
190 heap_add(&ca->heap, b, bucket_max_cmp);
191 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
192 ca->heap.data[0] = b;
193 heap_sift(&ca->heap, 0, bucket_max_cmp);
194 }
195 }
196
197 for (i = ca->heap.used / 2 - 1; i >= 0; --i)
198 heap_sift(&ca->heap, i, bucket_min_cmp);
199
200 while (!fifo_full(&ca->free_inc)) {
201 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
202 /*
203 * We don't want to be calling invalidate_buckets()
204 * multiple times when it can't do anything
205 */
206 ca->invalidate_needs_gc = 1;
207 wake_up_gc(ca->set);
208 return;
209 }
210
211 bch_invalidate_one_bucket(ca, b);
212 }
213 }
214
invalidate_buckets_fifo(struct cache * ca)215 static void invalidate_buckets_fifo(struct cache *ca)
216 {
217 struct bucket *b;
218 size_t checked = 0;
219
220 while (!fifo_full(&ca->free_inc)) {
221 if (ca->fifo_last_bucket < ca->sb.first_bucket ||
222 ca->fifo_last_bucket >= ca->sb.nbuckets)
223 ca->fifo_last_bucket = ca->sb.first_bucket;
224
225 b = ca->buckets + ca->fifo_last_bucket++;
226
227 if (bch_can_invalidate_bucket(ca, b))
228 bch_invalidate_one_bucket(ca, b);
229
230 if (++checked >= ca->sb.nbuckets) {
231 ca->invalidate_needs_gc = 1;
232 wake_up_gc(ca->set);
233 return;
234 }
235 }
236 }
237
invalidate_buckets_random(struct cache * ca)238 static void invalidate_buckets_random(struct cache *ca)
239 {
240 struct bucket *b;
241 size_t checked = 0;
242
243 while (!fifo_full(&ca->free_inc)) {
244 size_t n;
245 get_random_bytes(&n, sizeof(n));
246
247 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
248 n += ca->sb.first_bucket;
249
250 b = ca->buckets + n;
251
252 if (bch_can_invalidate_bucket(ca, b))
253 bch_invalidate_one_bucket(ca, b);
254
255 if (++checked >= ca->sb.nbuckets / 2) {
256 ca->invalidate_needs_gc = 1;
257 wake_up_gc(ca->set);
258 return;
259 }
260 }
261 }
262
invalidate_buckets(struct cache * ca)263 static void invalidate_buckets(struct cache *ca)
264 {
265 BUG_ON(ca->invalidate_needs_gc);
266
267 switch (CACHE_REPLACEMENT(&ca->sb)) {
268 case CACHE_REPLACEMENT_LRU:
269 invalidate_buckets_lru(ca);
270 break;
271 case CACHE_REPLACEMENT_FIFO:
272 invalidate_buckets_fifo(ca);
273 break;
274 case CACHE_REPLACEMENT_RANDOM:
275 invalidate_buckets_random(ca);
276 break;
277 }
278 }
279
280 #define allocator_wait(ca, cond) \
281 do { \
282 while (1) { \
283 set_current_state(TASK_INTERRUPTIBLE); \
284 if (cond) \
285 break; \
286 \
287 mutex_unlock(&(ca)->set->bucket_lock); \
288 if (kthread_should_stop()) \
289 return 0; \
290 \
291 try_to_freeze(); \
292 schedule(); \
293 mutex_lock(&(ca)->set->bucket_lock); \
294 } \
295 __set_current_state(TASK_RUNNING); \
296 } while (0)
297
bch_allocator_push(struct cache * ca,long bucket)298 static int bch_allocator_push(struct cache *ca, long bucket)
299 {
300 unsigned i;
301
302 /* Prios/gens are actually the most important reserve */
303 if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
304 return true;
305
306 for (i = 0; i < RESERVE_NR; i++)
307 if (fifo_push(&ca->free[i], bucket))
308 return true;
309
310 return false;
311 }
312
bch_allocator_thread(void * arg)313 static int bch_allocator_thread(void *arg)
314 {
315 struct cache *ca = arg;
316
317 mutex_lock(&ca->set->bucket_lock);
318
319 while (1) {
320 /*
321 * First, we pull buckets off of the unused and free_inc lists,
322 * possibly issue discards to them, then we add the bucket to
323 * the free list:
324 */
325 while (!fifo_empty(&ca->free_inc)) {
326 long bucket;
327
328 fifo_pop(&ca->free_inc, bucket);
329
330 if (ca->discard) {
331 mutex_unlock(&ca->set->bucket_lock);
332 blkdev_issue_discard(ca->bdev,
333 bucket_to_sector(ca->set, bucket),
334 ca->sb.bucket_size, GFP_KERNEL, 0);
335 mutex_lock(&ca->set->bucket_lock);
336 }
337
338 allocator_wait(ca, bch_allocator_push(ca, bucket));
339 wake_up(&ca->set->btree_cache_wait);
340 wake_up(&ca->set->bucket_wait);
341 }
342
343 /*
344 * We've run out of free buckets, we need to find some buckets
345 * we can invalidate. First, invalidate them in memory and add
346 * them to the free_inc list:
347 */
348
349 retry_invalidate:
350 allocator_wait(ca, ca->set->gc_mark_valid &&
351 !ca->invalidate_needs_gc);
352 invalidate_buckets(ca);
353
354 /*
355 * Now, we write their new gens to disk so we can start writing
356 * new stuff to them:
357 */
358 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
359 if (CACHE_SYNC(&ca->set->sb)) {
360 /*
361 * This could deadlock if an allocation with a btree
362 * node locked ever blocked - having the btree node
363 * locked would block garbage collection, but here we're
364 * waiting on garbage collection before we invalidate
365 * and free anything.
366 *
367 * But this should be safe since the btree code always
368 * uses btree_check_reserve() before allocating now, and
369 * if it fails it blocks without btree nodes locked.
370 */
371 if (!fifo_full(&ca->free_inc))
372 goto retry_invalidate;
373
374 bch_prio_write(ca);
375 }
376 }
377 }
378
379 /* Allocation */
380
bch_bucket_alloc(struct cache * ca,unsigned reserve,bool wait)381 long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
382 {
383 DEFINE_WAIT(w);
384 struct bucket *b;
385 long r;
386
387 /* fastpath */
388 if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
389 fifo_pop(&ca->free[reserve], r))
390 goto out;
391
392 if (!wait) {
393 trace_bcache_alloc_fail(ca, reserve);
394 return -1;
395 }
396
397 do {
398 prepare_to_wait(&ca->set->bucket_wait, &w,
399 TASK_UNINTERRUPTIBLE);
400
401 mutex_unlock(&ca->set->bucket_lock);
402 schedule();
403 mutex_lock(&ca->set->bucket_lock);
404 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
405 !fifo_pop(&ca->free[reserve], r));
406
407 finish_wait(&ca->set->bucket_wait, &w);
408 out:
409 if (ca->alloc_thread)
410 wake_up_process(ca->alloc_thread);
411
412 trace_bcache_alloc(ca, reserve);
413
414 if (expensive_debug_checks(ca->set)) {
415 size_t iter;
416 long i;
417 unsigned j;
418
419 for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
420 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
421
422 for (j = 0; j < RESERVE_NR; j++)
423 fifo_for_each(i, &ca->free[j], iter)
424 BUG_ON(i == r);
425 fifo_for_each(i, &ca->free_inc, iter)
426 BUG_ON(i == r);
427 }
428
429 b = ca->buckets + r;
430
431 BUG_ON(atomic_read(&b->pin) != 1);
432
433 SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
434
435 if (reserve <= RESERVE_PRIO) {
436 SET_GC_MARK(b, GC_MARK_METADATA);
437 SET_GC_MOVE(b, 0);
438 b->prio = BTREE_PRIO;
439 } else {
440 SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
441 SET_GC_MOVE(b, 0);
442 b->prio = INITIAL_PRIO;
443 }
444
445 return r;
446 }
447
__bch_bucket_free(struct cache * ca,struct bucket * b)448 void __bch_bucket_free(struct cache *ca, struct bucket *b)
449 {
450 SET_GC_MARK(b, 0);
451 SET_GC_SECTORS_USED(b, 0);
452 }
453
bch_bucket_free(struct cache_set * c,struct bkey * k)454 void bch_bucket_free(struct cache_set *c, struct bkey *k)
455 {
456 unsigned i;
457
458 for (i = 0; i < KEY_PTRS(k); i++)
459 __bch_bucket_free(PTR_CACHE(c, k, i),
460 PTR_BUCKET(c, k, i));
461 }
462
__bch_bucket_alloc_set(struct cache_set * c,unsigned reserve,struct bkey * k,int n,bool wait)463 int __bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
464 struct bkey *k, int n, bool wait)
465 {
466 int i;
467
468 lockdep_assert_held(&c->bucket_lock);
469 BUG_ON(!n || n > c->caches_loaded || n > 8);
470
471 bkey_init(k);
472
473 /* sort by free space/prio of oldest data in caches */
474
475 for (i = 0; i < n; i++) {
476 struct cache *ca = c->cache_by_alloc[i];
477 long b = bch_bucket_alloc(ca, reserve, wait);
478
479 if (b == -1)
480 goto err;
481
482 k->ptr[i] = PTR(ca->buckets[b].gen,
483 bucket_to_sector(c, b),
484 ca->sb.nr_this_dev);
485
486 SET_KEY_PTRS(k, i + 1);
487 }
488
489 return 0;
490 err:
491 bch_bucket_free(c, k);
492 bkey_put(c, k);
493 return -1;
494 }
495
bch_bucket_alloc_set(struct cache_set * c,unsigned reserve,struct bkey * k,int n,bool wait)496 int bch_bucket_alloc_set(struct cache_set *c, unsigned reserve,
497 struct bkey *k, int n, bool wait)
498 {
499 int ret;
500 mutex_lock(&c->bucket_lock);
501 ret = __bch_bucket_alloc_set(c, reserve, k, n, wait);
502 mutex_unlock(&c->bucket_lock);
503 return ret;
504 }
505
506 /* Sector allocator */
507
508 struct open_bucket {
509 struct list_head list;
510 unsigned last_write_point;
511 unsigned sectors_free;
512 BKEY_PADDED(key);
513 };
514
515 /*
516 * We keep multiple buckets open for writes, and try to segregate different
517 * write streams for better cache utilization: first we look for a bucket where
518 * the last write to it was sequential with the current write, and failing that
519 * we look for a bucket that was last used by the same task.
520 *
521 * The ideas is if you've got multiple tasks pulling data into the cache at the
522 * same time, you'll get better cache utilization if you try to segregate their
523 * data and preserve locality.
524 *
525 * For example, say you've starting Firefox at the same time you're copying a
526 * bunch of files. Firefox will likely end up being fairly hot and stay in the
527 * cache awhile, but the data you copied might not be; if you wrote all that
528 * data to the same buckets it'd get invalidated at the same time.
529 *
530 * Both of those tasks will be doing fairly random IO so we can't rely on
531 * detecting sequential IO to segregate their data, but going off of the task
532 * should be a sane heuristic.
533 */
pick_data_bucket(struct cache_set * c,const struct bkey * search,unsigned write_point,struct bkey * alloc)534 static struct open_bucket *pick_data_bucket(struct cache_set *c,
535 const struct bkey *search,
536 unsigned write_point,
537 struct bkey *alloc)
538 {
539 struct open_bucket *ret, *ret_task = NULL;
540
541 list_for_each_entry_reverse(ret, &c->data_buckets, list)
542 if (!bkey_cmp(&ret->key, search))
543 goto found;
544 else if (ret->last_write_point == write_point)
545 ret_task = ret;
546
547 ret = ret_task ?: list_first_entry(&c->data_buckets,
548 struct open_bucket, list);
549 found:
550 if (!ret->sectors_free && KEY_PTRS(alloc)) {
551 ret->sectors_free = c->sb.bucket_size;
552 bkey_copy(&ret->key, alloc);
553 bkey_init(alloc);
554 }
555
556 if (!ret->sectors_free)
557 ret = NULL;
558
559 return ret;
560 }
561
562 /*
563 * Allocates some space in the cache to write to, and k to point to the newly
564 * allocated space, and updates KEY_SIZE(k) and KEY_OFFSET(k) (to point to the
565 * end of the newly allocated space).
566 *
567 * May allocate fewer sectors than @sectors, KEY_SIZE(k) indicates how many
568 * sectors were actually allocated.
569 *
570 * If s->writeback is true, will not fail.
571 */
bch_alloc_sectors(struct cache_set * c,struct bkey * k,unsigned sectors,unsigned write_point,unsigned write_prio,bool wait)572 bool bch_alloc_sectors(struct cache_set *c, struct bkey *k, unsigned sectors,
573 unsigned write_point, unsigned write_prio, bool wait)
574 {
575 struct open_bucket *b;
576 BKEY_PADDED(key) alloc;
577 unsigned i;
578
579 /*
580 * We might have to allocate a new bucket, which we can't do with a
581 * spinlock held. So if we have to allocate, we drop the lock, allocate
582 * and then retry. KEY_PTRS() indicates whether alloc points to
583 * allocated bucket(s).
584 */
585
586 bkey_init(&alloc.key);
587 spin_lock(&c->data_bucket_lock);
588
589 while (!(b = pick_data_bucket(c, k, write_point, &alloc.key))) {
590 unsigned watermark = write_prio
591 ? RESERVE_MOVINGGC
592 : RESERVE_NONE;
593
594 spin_unlock(&c->data_bucket_lock);
595
596 if (bch_bucket_alloc_set(c, watermark, &alloc.key, 1, wait))
597 return false;
598
599 spin_lock(&c->data_bucket_lock);
600 }
601
602 /*
603 * If we had to allocate, we might race and not need to allocate the
604 * second time we call find_data_bucket(). If we allocated a bucket but
605 * didn't use it, drop the refcount bch_bucket_alloc_set() took:
606 */
607 if (KEY_PTRS(&alloc.key))
608 bkey_put(c, &alloc.key);
609
610 for (i = 0; i < KEY_PTRS(&b->key); i++)
611 EBUG_ON(ptr_stale(c, &b->key, i));
612
613 /* Set up the pointer to the space we're allocating: */
614
615 for (i = 0; i < KEY_PTRS(&b->key); i++)
616 k->ptr[i] = b->key.ptr[i];
617
618 sectors = min(sectors, b->sectors_free);
619
620 SET_KEY_OFFSET(k, KEY_OFFSET(k) + sectors);
621 SET_KEY_SIZE(k, sectors);
622 SET_KEY_PTRS(k, KEY_PTRS(&b->key));
623
624 /*
625 * Move b to the end of the lru, and keep track of what this bucket was
626 * last used for:
627 */
628 list_move_tail(&b->list, &c->data_buckets);
629 bkey_copy_key(&b->key, k);
630 b->last_write_point = write_point;
631
632 b->sectors_free -= sectors;
633
634 for (i = 0; i < KEY_PTRS(&b->key); i++) {
635 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
636
637 atomic_long_add(sectors,
638 &PTR_CACHE(c, &b->key, i)->sectors_written);
639 }
640
641 if (b->sectors_free < c->sb.block_size)
642 b->sectors_free = 0;
643
644 /*
645 * k takes refcounts on the buckets it points to until it's inserted
646 * into the btree, but if we're done with this bucket we just transfer
647 * get_data_bucket()'s refcount.
648 */
649 if (b->sectors_free)
650 for (i = 0; i < KEY_PTRS(&b->key); i++)
651 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin);
652
653 spin_unlock(&c->data_bucket_lock);
654 return true;
655 }
656
657 /* Init */
658
bch_open_buckets_free(struct cache_set * c)659 void bch_open_buckets_free(struct cache_set *c)
660 {
661 struct open_bucket *b;
662
663 while (!list_empty(&c->data_buckets)) {
664 b = list_first_entry(&c->data_buckets,
665 struct open_bucket, list);
666 list_del(&b->list);
667 kfree(b);
668 }
669 }
670
bch_open_buckets_alloc(struct cache_set * c)671 int bch_open_buckets_alloc(struct cache_set *c)
672 {
673 int i;
674
675 spin_lock_init(&c->data_bucket_lock);
676
677 for (i = 0; i < 6; i++) {
678 struct open_bucket *b = kzalloc(sizeof(*b), GFP_KERNEL);
679 if (!b)
680 return -ENOMEM;
681
682 list_add(&b->list, &c->data_buckets);
683 }
684
685 return 0;
686 }
687
bch_cache_allocator_start(struct cache * ca)688 int bch_cache_allocator_start(struct cache *ca)
689 {
690 struct task_struct *k = kthread_run(bch_allocator_thread,
691 ca, "bcache_allocator");
692 if (IS_ERR(k))
693 return PTR_ERR(k);
694
695 ca->alloc_thread = k;
696 return 0;
697 }
698