• Home
  • Raw
  • Download

Lines Matching refs:ca

72 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)  in bch_inc_gen()  argument
76 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
77 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen()
79 if (CACHE_SYNC(&ca->set->sb)) { in bch_inc_gen()
80 ca->need_save_prio = max(ca->need_save_prio, in bch_inc_gen()
82 WARN_ON_ONCE(ca->need_save_prio > BUCKET_DISK_GEN_MAX); in bch_inc_gen()
90 struct cache *ca; in bch_rescale_priorities() local
109 for_each_cache(ca, c, i) in bch_rescale_priorities()
110 for_each_bucket(b, ca) in bch_rescale_priorities()
126 struct cache *ca; member
136 struct cache *ca = d->ca; in discard_finish() local
141 bdevname(ca->bdev, buf)); in discard_finish()
142 d->ca->discard = 0; in discard_finish()
145 mutex_lock(&ca->set->bucket_lock); in discard_finish()
147 fifo_push(&ca->free, d->bucket); in discard_finish()
148 list_add(&d->list, &ca->discards); in discard_finish()
149 atomic_dec(&ca->discards_in_flight); in discard_finish()
151 mutex_unlock(&ca->set->bucket_lock); in discard_finish()
153 closure_wake_up(&ca->set->bucket_wait); in discard_finish()
154 wake_up(&ca->set->alloc_wait); in discard_finish()
156 closure_put(&ca->set->cl); in discard_finish()
165 static void do_discard(struct cache *ca, long bucket) in do_discard() argument
167 struct discard *d = list_first_entry(&ca->discards, in do_discard()
173 atomic_inc(&ca->discards_in_flight); in do_discard()
174 closure_get(&ca->set->cl); in do_discard()
178 d->bio.bi_sector = bucket_to_sector(ca->set, d->bucket); in do_discard()
179 d->bio.bi_bdev = ca->bdev; in do_discard()
183 d->bio.bi_size = bucket_bytes(ca); in do_discard()
198 bool bch_bucket_add_unused(struct cache *ca, struct bucket *b) in bch_bucket_add_unused() argument
202 if (fifo_used(&ca->free) > ca->watermark[WATERMARK_MOVINGGC] && in bch_bucket_add_unused()
203 CACHE_REPLACEMENT(&ca->sb) == CACHE_REPLACEMENT_FIFO) in bch_bucket_add_unused()
209 fifo_push(&ca->unused, b - ca->buckets)) { in bch_bucket_add_unused()
217 static bool can_invalidate_bucket(struct cache *ca, struct bucket *b) in can_invalidate_bucket() argument
224 static void invalidate_one_bucket(struct cache *ca, struct bucket *b) in invalidate_one_bucket() argument
226 bch_inc_gen(ca, b); in invalidate_one_bucket()
229 fifo_push(&ca->free_inc, b - ca->buckets); in invalidate_one_bucket()
233 (((unsigned) (b->prio - ca->set->min_prio)) * GC_SECTORS_USED(b))
238 static void invalidate_buckets_lru(struct cache *ca) in invalidate_buckets_lru() argument
243 ca->heap.used = 0; in invalidate_buckets_lru()
245 for_each_bucket(b, ca) { in invalidate_buckets_lru()
252 if (fifo_full(&ca->unused)) in invalidate_buckets_lru()
255 if (!can_invalidate_bucket(ca, b)) in invalidate_buckets_lru()
259 bch_bucket_add_unused(ca, b)) in invalidate_buckets_lru()
262 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
263 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
264 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
265 ca->heap.data[0] = b; in invalidate_buckets_lru()
266 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
270 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
271 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
273 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_lru()
274 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
279 ca->invalidate_needs_gc = 1; in invalidate_buckets_lru()
280 bch_queue_gc(ca->set); in invalidate_buckets_lru()
284 invalidate_one_bucket(ca, b); in invalidate_buckets_lru()
288 static void invalidate_buckets_fifo(struct cache *ca) in invalidate_buckets_fifo() argument
293 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_fifo()
294 if (ca->fifo_last_bucket < ca->sb.first_bucket || in invalidate_buckets_fifo()
295 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo()
296 ca->fifo_last_bucket = ca->sb.first_bucket; in invalidate_buckets_fifo()
298 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
300 if (can_invalidate_bucket(ca, b)) in invalidate_buckets_fifo()
301 invalidate_one_bucket(ca, b); in invalidate_buckets_fifo()
303 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo()
304 ca->invalidate_needs_gc = 1; in invalidate_buckets_fifo()
305 bch_queue_gc(ca->set); in invalidate_buckets_fifo()
311 static void invalidate_buckets_random(struct cache *ca) in invalidate_buckets_random() argument
316 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_random()
320 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random()
321 n += ca->sb.first_bucket; in invalidate_buckets_random()
323 b = ca->buckets + n; in invalidate_buckets_random()
325 if (can_invalidate_bucket(ca, b)) in invalidate_buckets_random()
326 invalidate_one_bucket(ca, b); in invalidate_buckets_random()
328 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random()
329 ca->invalidate_needs_gc = 1; in invalidate_buckets_random()
330 bch_queue_gc(ca->set); in invalidate_buckets_random()
336 static void invalidate_buckets(struct cache *ca) in invalidate_buckets() argument
338 if (ca->invalidate_needs_gc) in invalidate_buckets()
341 switch (CACHE_REPLACEMENT(&ca->sb)) { in invalidate_buckets()
343 invalidate_buckets_lru(ca); in invalidate_buckets()
346 invalidate_buckets_fifo(ca); in invalidate_buckets()
349 invalidate_buckets_random(ca); in invalidate_buckets()
354 fifo_used(&ca->free), ca->free.size, in invalidate_buckets()
355 fifo_used(&ca->free_inc), ca->free_inc.size, in invalidate_buckets()
356 fifo_used(&ca->unused), ca->unused.size); in invalidate_buckets()
359 #define allocator_wait(ca, cond) \ argument
364 prepare_to_wait(&ca->set->alloc_wait, \
369 mutex_unlock(&(ca)->set->bucket_lock); \
370 if (test_bit(CACHE_SET_STOPPING_2, &ca->set->flags)) { \
371 finish_wait(&ca->set->alloc_wait, &__wait); \
376 mutex_lock(&(ca)->set->bucket_lock); \
379 finish_wait(&ca->set->alloc_wait, &__wait); \
384 struct cache *ca = container_of(cl, struct cache, alloc); in bch_allocator_thread() local
386 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
397 if ((!atomic_read(&ca->set->prio_blocked) || in bch_allocator_thread()
398 !CACHE_SYNC(&ca->set->sb)) && in bch_allocator_thread()
399 !fifo_empty(&ca->unused)) in bch_allocator_thread()
400 fifo_pop(&ca->unused, bucket); in bch_allocator_thread()
401 else if (!fifo_empty(&ca->free_inc)) in bch_allocator_thread()
402 fifo_pop(&ca->free_inc, bucket); in bch_allocator_thread()
406 allocator_wait(ca, (int) fifo_free(&ca->free) > in bch_allocator_thread()
407 atomic_read(&ca->discards_in_flight)); in bch_allocator_thread()
409 if (ca->discard) { in bch_allocator_thread()
410 allocator_wait(ca, !list_empty(&ca->discards)); in bch_allocator_thread()
411 do_discard(ca, bucket); in bch_allocator_thread()
413 fifo_push(&ca->free, bucket); in bch_allocator_thread()
414 closure_wake_up(&ca->set->bucket_wait); in bch_allocator_thread()
424 allocator_wait(ca, ca->set->gc_mark_valid && in bch_allocator_thread()
425 (ca->need_save_prio > 64 || in bch_allocator_thread()
426 !ca->invalidate_needs_gc)); in bch_allocator_thread()
427 invalidate_buckets(ca); in bch_allocator_thread()
433 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); in bch_allocator_thread()
434 if (CACHE_SYNC(&ca->set->sb) && in bch_allocator_thread()
435 (!fifo_empty(&ca->free_inc) || in bch_allocator_thread()
436 ca->need_save_prio > 64)) in bch_allocator_thread()
437 bch_prio_write(ca); in bch_allocator_thread()
441 long bch_bucket_alloc(struct cache *ca, unsigned watermark, struct closure *cl) in bch_bucket_alloc() argument
445 wake_up(&ca->set->alloc_wait); in bch_bucket_alloc()
447 if (fifo_used(&ca->free) > ca->watermark[watermark] && in bch_bucket_alloc()
448 fifo_pop(&ca->free, r)) { in bch_bucket_alloc()
449 struct bucket *b = ca->buckets + r; in bch_bucket_alloc()
454 for (iter = 0; iter < prio_buckets(ca) * 2; iter++) in bch_bucket_alloc()
455 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc()
457 fifo_for_each(i, &ca->free, iter) in bch_bucket_alloc()
459 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
461 fifo_for_each(i, &ca->unused, iter) in bch_bucket_alloc()
466 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
480 atomic_read(&ca->set->prio_blocked), fifo_used(&ca->free), in bch_bucket_alloc()
481 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); in bch_bucket_alloc()
484 closure_wait(&ca->set->bucket_wait, cl); in bch_bucket_alloc()
487 mutex_unlock(&ca->set->bucket_lock); in bch_bucket_alloc()
489 mutex_lock(&ca->set->bucket_lock); in bch_bucket_alloc()
523 struct cache *ca = c->cache_by_alloc[i]; in __bch_bucket_alloc_set() local
524 long b = bch_bucket_alloc(ca, watermark, cl); in __bch_bucket_alloc_set()
529 k->ptr[i] = PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
531 ca->sb.nr_this_dev); in __bch_bucket_alloc_set()
555 void bch_cache_allocator_exit(struct cache *ca) in bch_cache_allocator_exit() argument
559 while (!list_empty(&ca->discards)) { in bch_cache_allocator_exit()
560 d = list_first_entry(&ca->discards, struct discard, list); in bch_cache_allocator_exit()
567 int bch_cache_allocator_init(struct cache *ca) in bch_cache_allocator_init() argument
578 ca->watermark[WATERMARK_PRIO] = 0; in bch_cache_allocator_init()
580 ca->watermark[WATERMARK_METADATA] = prio_buckets(ca); in bch_cache_allocator_init()
582 ca->watermark[WATERMARK_MOVINGGC] = 8 + in bch_cache_allocator_init()
583 ca->watermark[WATERMARK_METADATA]; in bch_cache_allocator_init()
585 ca->watermark[WATERMARK_NONE] = ca->free.size / 2 + in bch_cache_allocator_init()
586 ca->watermark[WATERMARK_MOVINGGC]; in bch_cache_allocator_init()
593 d->ca = ca; in bch_cache_allocator_init()
595 list_add(&d->list, &ca->discards); in bch_cache_allocator_init()