• Home
  • Raw
  • Download

Lines Matching refs:c

99 #define PTR_HASH(c, k)							\  argument
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
109 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c->cache); in write_block()
116 bch_btree_sort(&b->keys, &b->c->sort); in bch_btree_init_next()
118 bch_btree_sort_lazy(&b->keys, &b->c->sort); in bch_btree_init_next()
122 bset_magic(&b->c->cache->sb)); in bch_btree_init_next()
128 void bkey_put(struct cache_set *c, struct bkey *k) in bkey_put() argument
133 if (ptr_available(c, k, i)) in bkey_put()
134 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin); in bkey_put()
159 iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO); in bch_btree_node_read_done()
160 iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size; in bch_btree_node_read_done()
178 if (b->written + set_blocks(i, block_bytes(b->c->cache)) > in bch_btree_node_read_done()
183 if (i->magic != bset_magic(&b->c->cache->sb)) in bch_btree_node_read_done()
204 b->written += set_blocks(i, block_bytes(b->c->cache)); in bch_btree_node_read_done()
210 i = ((void *) i) + block_bytes(b->c->cache)) in bch_btree_node_read_done()
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort); in bch_btree_node_read_done()
224 bset_magic(&b->c->cache->sb)); in bch_btree_node_read_done()
226 mempool_free(iter, &b->c->fill_iter); in bch_btree_node_read_done()
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys", in bch_btree_node_read_done()
231 err, PTR_BUCKET_NR(b->c, &b->key, 0), in bch_btree_node_read_done()
253 bio = bch_bbio_alloc(b->c); in bch_btree_node_read()
261 bch_submit_bbio(bio, b->c, &b->key, 0); in bch_btree_node_read()
267 bch_bbio_free(bio, b->c); in bch_btree_node_read()
273 bch_time_stats_update(&b->c->btree_read_time, start_time); in bch_btree_node_read()
277 bch_cache_set_error(b->c, "io error reading bucket %zu", in bch_btree_node_read()
278 PTR_BUCKET_NR(b->c, &b->key, 0)); in bch_btree_node_read()
284 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) in btree_complete_write()
285 wake_up_allocators(b->c); in btree_complete_write()
289 __closure_wake_up(&b->c->journal.wait); in btree_complete_write()
308 bch_bbio_free(b->bio, b->c); in __btree_node_write_done()
334 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree"); in btree_node_write_endio()
348 b->bio = bch_bbio_alloc(b->c); in do_btree_node_write()
352 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c->cache)); in do_btree_node_write()
385 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
396 bch_submit_bbio(b->bio, b->c, &k.key, 0); in do_btree_node_write()
421 closure_init(&b->io, parent ?: &b->c->cl); in __bch_btree_node_write()
428 atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size, in __bch_btree_node_write()
429 &b->c->cache->btree_sectors_written); in __bch_btree_node_write()
431 b->written += set_blocks(i, block_bytes(b->c->cache)); in __bch_btree_node_write()
497 journal_pin_cmp(b->c, w->journal, journal_ref)) { in bch_btree_leaf_dirty()
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \ argument
520 ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c) \ argument
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
530 b->c->btree_cache_used--; in mca_data_free()
531 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_free()
540 list_move(&b->list, &b->c->btree_cache_freeable); in mca_bucket_free()
552 ilog2(b->c->btree_pages), in mca_data_alloc()
555 b->c->btree_cache_used++; in mca_data_alloc()
556 list_move(&b->list, &b->c->btree_cache); in mca_data_alloc()
558 list_move(&b->list, &b->c->btree_cache_freed); in mca_data_alloc()
583 static struct btree *mca_bucket_alloc(struct cache_set *c, in mca_bucket_alloc() argument
601 b->c = c; in mca_bucket_alloc()
613 lockdep_assert_held(&b->c->bucket_lock); in mca_reap()
670 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_scan() local
676 if (c->shrinker_disabled) in bch_mca_scan()
679 if (c->btree_cache_alloc_lock) in bch_mca_scan()
684 mutex_lock(&c->bucket_lock); in bch_mca_scan()
685 else if (!mutex_trylock(&c->bucket_lock)) in bch_mca_scan()
695 nr /= c->btree_pages; in bch_mca_scan()
698 nr = min_t(unsigned long, nr, mca_can_free(c)); in bch_mca_scan()
701 btree_cache_used = c->btree_cache_used; in bch_mca_scan()
702 list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) { in bch_mca_scan()
715 list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) { in bch_mca_scan()
730 mutex_unlock(&c->bucket_lock); in bch_mca_scan()
731 return freed * c->btree_pages; in bch_mca_scan()
737 struct cache_set *c = container_of(shrink, struct cache_set, shrink); in bch_mca_count() local
739 if (c->shrinker_disabled) in bch_mca_count()
742 if (c->btree_cache_alloc_lock) in bch_mca_count()
745 return mca_can_free(c) * c->btree_pages; in bch_mca_count()
748 void bch_btree_cache_free(struct cache_set *c) in bch_btree_cache_free() argument
755 if (c->shrink.list.next) in bch_btree_cache_free()
756 unregister_shrinker(&c->shrink); in bch_btree_cache_free()
758 mutex_lock(&c->bucket_lock); in bch_btree_cache_free()
761 if (c->verify_data) in bch_btree_cache_free()
762 list_move(&c->verify_data->list, &c->btree_cache); in bch_btree_cache_free()
764 free_pages((unsigned long) c->verify_ondisk, ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_free()
767 list_splice(&c->btree_cache_freeable, in bch_btree_cache_free()
768 &c->btree_cache); in bch_btree_cache_free()
770 while (!list_empty(&c->btree_cache)) { in bch_btree_cache_free()
771 b = list_first_entry(&c->btree_cache, struct btree, list); in bch_btree_cache_free()
785 while (!list_empty(&c->btree_cache_freed)) { in bch_btree_cache_free()
786 b = list_first_entry(&c->btree_cache_freed, in bch_btree_cache_free()
793 mutex_unlock(&c->bucket_lock); in bch_btree_cache_free()
796 int bch_btree_cache_alloc(struct cache_set *c) in bch_btree_cache_alloc() argument
800 for (i = 0; i < mca_reserve(c); i++) in bch_btree_cache_alloc()
801 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL)) in bch_btree_cache_alloc()
804 list_splice_init(&c->btree_cache, in bch_btree_cache_alloc()
805 &c->btree_cache_freeable); in bch_btree_cache_alloc()
808 mutex_init(&c->verify_lock); in bch_btree_cache_alloc()
810 c->verify_ondisk = (void *) in bch_btree_cache_alloc()
812 ilog2(meta_bucket_pages(&c->cache->sb))); in bch_btree_cache_alloc()
813 if (!c->verify_ondisk) { in bch_btree_cache_alloc()
822 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL); in bch_btree_cache_alloc()
824 if (c->verify_data && in bch_btree_cache_alloc()
825 c->verify_data->keys.set->data) in bch_btree_cache_alloc()
826 list_del_init(&c->verify_data->list); in bch_btree_cache_alloc()
828 c->verify_data = NULL; in bch_btree_cache_alloc()
831 c->shrink.count_objects = bch_mca_count; in bch_btree_cache_alloc()
832 c->shrink.scan_objects = bch_mca_scan; in bch_btree_cache_alloc()
833 c->shrink.seeks = 4; in bch_btree_cache_alloc()
834 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
836 if (register_shrinker(&c->shrink, "md-bcache:%pU", c->set_uuid)) in bch_btree_cache_alloc()
845 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k) in mca_hash() argument
847 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)]; in mca_hash()
850 static struct btree *mca_find(struct cache_set *c, struct bkey *k) in mca_find() argument
855 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash) in mca_find()
856 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k)) in mca_find()
864 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op) in mca_cannibalize_lock() argument
866 spin_lock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
867 if (likely(c->btree_cache_alloc_lock == NULL)) { in mca_cannibalize_lock()
868 c->btree_cache_alloc_lock = current; in mca_cannibalize_lock()
869 } else if (c->btree_cache_alloc_lock != current) { in mca_cannibalize_lock()
871 prepare_to_wait(&c->btree_cache_wait, &op->wait, in mca_cannibalize_lock()
873 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
876 spin_unlock(&c->btree_cannibalize_lock); in mca_cannibalize_lock()
881 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op, in mca_cannibalize() argument
886 trace_bcache_btree_cache_cannibalize(c); in mca_cannibalize()
888 if (mca_cannibalize_lock(c, op)) in mca_cannibalize()
891 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
895 list_for_each_entry_reverse(b, &c->btree_cache, list) in mca_cannibalize()
909 void bch_cannibalize_unlock(struct cache_set *c) in bch_cannibalize_unlock() argument
911 spin_lock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
912 if (c->btree_cache_alloc_lock == current) { in bch_cannibalize_unlock()
913 c->btree_cache_alloc_lock = NULL; in bch_cannibalize_unlock()
914 wake_up(&c->btree_cache_wait); in bch_cannibalize_unlock()
916 spin_unlock(&c->btree_cannibalize_lock); in bch_cannibalize_unlock()
919 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op, in mca_alloc() argument
926 lockdep_assert_held(&c->bucket_lock); in mca_alloc()
928 if (mca_find(c, k)) in mca_alloc()
934 list_for_each_entry(b, &c->btree_cache_freeable, list) in mca_alloc()
941 list_for_each_entry(b, &c->btree_cache_freed, list) in mca_alloc()
950 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO); in mca_alloc()
961 list_move(&b->list, &c->btree_cache); in mca_alloc()
963 hlist_add_head_rcu(&b->hash, mca_hash(c, k)); in mca_alloc()
973 &b->c->expensive_debug_checks); in mca_alloc()
976 &b->c->expensive_debug_checks); in mca_alloc()
983 b = mca_cannibalize(c, op, k); in mca_alloc()
1002 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op, in bch_btree_node_get() argument
1011 b = mca_find(c, k); in bch_btree_node_get()
1017 mutex_lock(&c->bucket_lock); in bch_btree_node_get()
1018 b = mca_alloc(c, op, k, level); in bch_btree_node_get()
1019 mutex_unlock(&c->bucket_lock); in bch_btree_node_get()
1032 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) { in bch_btree_node_get()
1063 mutex_lock(&parent->c->bucket_lock); in btree_node_prefetch()
1064 b = mca_alloc(parent->c, NULL, k, parent->level - 1); in btree_node_prefetch()
1065 mutex_unlock(&parent->c->bucket_lock); in btree_node_prefetch()
1080 BUG_ON(b == b->c->root); in btree_node_free()
1106 mutex_lock(&b->c->bucket_lock); in btree_node_free()
1107 bch_bucket_free(b->c, &b->key); in btree_node_free()
1109 mutex_unlock(&b->c->bucket_lock); in btree_node_free()
1116 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op, in __bch_btree_node_alloc() argument
1123 mutex_lock(&c->bucket_lock); in __bch_btree_node_alloc()
1127 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, wait)) in __bch_btree_node_alloc()
1130 bkey_put(c, &k.key); in __bch_btree_node_alloc()
1131 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS); in __bch_btree_node_alloc()
1133 b = mca_alloc(c, op, &k.key, level); in __bch_btree_node_alloc()
1138 cache_bug(c, in __bch_btree_node_alloc()
1144 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb)); in __bch_btree_node_alloc()
1146 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1151 bch_bucket_free(c, &k.key); in __bch_btree_node_alloc()
1153 mutex_unlock(&c->bucket_lock); in __bch_btree_node_alloc()
1155 trace_bcache_btree_node_alloc_fail(c); in __bch_btree_node_alloc()
1159 static struct btree *bch_btree_node_alloc(struct cache_set *c, in bch_btree_node_alloc() argument
1163 return __bch_btree_node_alloc(c, op, level, op != NULL, parent); in bch_btree_node_alloc()
1169 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_node_alloc_replacement()
1173 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort); in btree_node_alloc_replacement()
1185 mutex_lock(&b->c->bucket_lock); in make_btree_freeing_key()
1187 atomic_inc(&b->c->prio_blocked); in make_btree_freeing_key()
1194 bch_inc_gen(b->c->cache, in make_btree_freeing_key()
1195 PTR_BUCKET(b->c, &b->key, i))); in make_btree_freeing_key()
1197 mutex_unlock(&b->c->bucket_lock); in make_btree_freeing_key()
1202 struct cache_set *c = b->c; in btree_check_reserve() local
1203 struct cache *ca = c->cache; in btree_check_reserve()
1204 unsigned int reserve = (c->root->level - b->level) * 2 + 1; in btree_check_reserve()
1206 mutex_lock(&c->bucket_lock); in btree_check_reserve()
1210 prepare_to_wait(&c->btree_cache_wait, &op->wait, in btree_check_reserve()
1212 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1216 mutex_unlock(&c->bucket_lock); in btree_check_reserve()
1218 return mca_cannibalize_lock(b->c, op); in btree_check_reserve()
1223 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level, in __bch_btree_mark_key() argument
1239 if (!ptr_available(c, k, i)) in __bch_btree_mark_key()
1242 g = PTR_BUCKET(c, k, i); in __bch_btree_mark_key()
1247 if (ptr_stale(c, k, i)) { in __bch_btree_mark_key()
1248 stale = max(stale, ptr_stale(c, k, i)); in __bch_btree_mark_key()
1254 c, "inconsistent ptrs: mark = %llu, level = %i", in __bch_btree_mark_key()
1275 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1277 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k) in bch_initial_mark_key() argument
1282 if (ptr_available(c, k, i) && in bch_initial_mark_key()
1283 !ptr_stale(c, k, i)) { in bch_initial_mark_key()
1284 struct bucket *b = PTR_BUCKET(c, k, i); in bch_initial_mark_key()
1294 __bch_btree_mark_key(c, level, k); in bch_initial_mark_key()
1297 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats) in bch_update_bucket_in_use() argument
1299 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets; in bch_update_bucket_in_use()
1332 if (b->c->gc_always_rewrite) in btree_gc_mark_node()
1376 blocks = btree_default_blocks(b->c) * 2 / 3; in btree_gc_coalesce()
1380 block_bytes(b->c->cache)) > blocks * (nodes - 1)) in btree_gc_coalesce()
1414 block_bytes(b->c->cache)) > blocks) in btree_gc_coalesce()
1430 block_bytes(b->c->cache)) > in btree_gc_coalesce()
1439 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) > in btree_gc_coalesce()
1516 atomic_dec(&b->c->prio_blocked); in btree_gc_coalesce()
1577 static size_t btree_gc_min_nodes(struct cache_set *c) in btree_gc_min_nodes() argument
1595 min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; in btree_gc_min_nodes()
1613 bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done); in btree_gc_recurse()
1622 r->b = bch_btree_node_get(b->c, op, k, b->level - 1, in btree_gc_recurse()
1653 bkey_copy_key(&b->c->gc_done, &last->b->key); in btree_gc_recurse()
1669 if (atomic_read(&b->c->search_inflight) && in btree_gc_recurse()
1670 gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { in btree_gc_recurse()
1716 __bch_btree_mark_key(b->c, b->level + 1, &b->key); in bch_btree_gc_root()
1724 bkey_copy_key(&b->c->gc_done, &b->key); in bch_btree_gc_root()
1729 static void btree_gc_start(struct cache_set *c) in btree_gc_start() argument
1734 if (!c->gc_mark_valid) in btree_gc_start()
1737 mutex_lock(&c->bucket_lock); in btree_gc_start()
1739 c->gc_mark_valid = 0; in btree_gc_start()
1740 c->gc_done = ZERO_KEY; in btree_gc_start()
1742 ca = c->cache; in btree_gc_start()
1751 mutex_unlock(&c->bucket_lock); in btree_gc_start()
1754 static void bch_btree_gc_finish(struct cache_set *c) in bch_btree_gc_finish() argument
1761 mutex_lock(&c->bucket_lock); in bch_btree_gc_finish()
1763 set_gc_sectors(c); in bch_btree_gc_finish()
1764 c->gc_mark_valid = 1; in bch_btree_gc_finish()
1765 c->need_gc = 0; in bch_btree_gc_finish()
1767 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++) in bch_btree_gc_finish()
1768 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i), in bch_btree_gc_finish()
1773 for (i = 0; i < c->devices_max_used; i++) { in bch_btree_gc_finish()
1774 struct bcache_device *d = c->devices[i]; in bch_btree_gc_finish()
1778 if (!d || UUID_FLASH_ONLY(&c->uuids[i])) in bch_btree_gc_finish()
1786 SET_GC_MARK(PTR_BUCKET(c, &w->key, j), in bch_btree_gc_finish()
1792 c->avail_nbuckets = 0; in bch_btree_gc_finish()
1794 ca = c->cache; in bch_btree_gc_finish()
1805 c->need_gc = max(c->need_gc, bucket_gc_gen(b)); in bch_btree_gc_finish()
1813 c->avail_nbuckets++; in bch_btree_gc_finish()
1816 mutex_unlock(&c->bucket_lock); in bch_btree_gc_finish()
1819 static void bch_btree_gc(struct cache_set *c) in bch_btree_gc() argument
1827 trace_bcache_gc_start(c); in bch_btree_gc()
1833 btree_gc_start(c); in bch_btree_gc()
1837 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats); in bch_btree_gc()
1846 } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_btree_gc()
1848 bch_btree_gc_finish(c); in bch_btree_gc()
1849 wake_up_allocators(c); in bch_btree_gc()
1851 bch_time_stats_update(&c->btree_gc_time, start_time); in bch_btree_gc()
1855 bch_update_bucket_in_use(c, &stats); in bch_btree_gc()
1856 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat)); in bch_btree_gc()
1858 trace_bcache_gc_end(c); in bch_btree_gc()
1860 bch_moving_gc(c); in bch_btree_gc()
1863 static bool gc_should_run(struct cache_set *c) in gc_should_run() argument
1865 struct cache *ca = c->cache; in gc_should_run()
1870 if (atomic_read(&c->sectors_to_gc) < 0) in gc_should_run()
1878 struct cache_set *c = arg; in bch_gc_thread() local
1881 wait_event_interruptible(c->gc_wait, in bch_gc_thread()
1883 test_bit(CACHE_SET_IO_DISABLE, &c->flags) || in bch_gc_thread()
1884 gc_should_run(c)); in bch_gc_thread()
1887 test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_gc_thread()
1890 set_gc_sectors(c); in bch_gc_thread()
1891 bch_btree_gc(c); in bch_gc_thread()
1898 int bch_gc_thread_start(struct cache_set *c) in bch_gc_thread_start() argument
1900 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc"); in bch_gc_thread_start()
1901 return PTR_ERR_OR_ZERO(c->gc_thread); in bch_gc_thread_start()
1913 bch_initial_mark_key(b->c, b->level, k); in bch_btree_check_recurse()
1915 bch_initial_mark_key(b->c, b->level + 1, &b->key); in bch_btree_check_recurse()
1929 b->c->gc_stats.nodes++; in bch_btree_check_recurse()
1948 struct cache_set *c = check_state->c; in bch_btree_check_thread() local
1958 bch_btree_iter_stack_init(&c->root->keys, &iter, NULL); in bch_btree_check_thread()
1959 k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad); in bch_btree_check_thread()
1978 &c->root->keys, in bch_btree_check_thread()
2000 btree_node_prefetch(c->root, p); in bch_btree_check_thread()
2001 c->gc_stats.nodes++; in bch_btree_check_thread()
2003 ret = bcache_btree(check_recurse, p, c->root, &op); in bch_btree_check_thread()
2011 bch_cannibalize_unlock(c); in bch_btree_check_thread()
2012 finish_wait(&c->btree_cache_wait, &(&op)->wait); in bch_btree_check_thread()
2045 int bch_btree_check(struct cache_set *c) in bch_btree_check() argument
2054 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid) in bch_btree_check()
2055 bch_initial_mark_key(c, c->root->level, k); in bch_btree_check()
2057 bch_initial_mark_key(c, c->root->level + 1, &c->root->key); in bch_btree_check()
2059 if (c->root->level == 0) in bch_btree_check()
2063 check_state.c = c; in bch_btree_check()
2071 rw_lock(0, c->root, c->root->level); in bch_btree_check()
2114 rw_unlock(0, c->root); in bch_btree_check()
2118 void bch_initial_gc_finish(struct cache_set *c) in bch_initial_gc_finish() argument
2120 struct cache *ca = c->cache; in bch_initial_gc_finish()
2123 bch_btree_gc_finish(c); in bch_initial_gc_finish()
2125 mutex_lock(&c->bucket_lock); in bch_initial_gc_finish()
2151 mutex_unlock(&c->bucket_lock); in bch_initial_gc_finish()
2203 bkey_put(b->c, k); in bch_btree_insert_keys()
2255 block_bytes(n1->c->cache)) > (btree_blocks(b) * 4) / 5; in btree_split()
2262 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent); in btree_split()
2267 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL); in btree_split()
2342 bch_time_stats_update(&b->c->btree_split_time, start_time); in btree_split()
2346 bkey_put(b->c, &n2->key); in btree_split()
2350 bkey_put(b->c, &n1->key); in btree_split()
2403 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2405 } else if (op->lock <= b->c->root->level) { in bch_btree_insert_node()
2406 op->lock = b->c->root->level + 1; in bch_btree_insert_node()
2478 int bch_btree_insert(struct cache_set *c, struct keylist *keys, in bch_btree_insert() argument
2494 ret = bch_btree_map_leaf_nodes(&op.op, c, in bch_btree_insert()
2505 bkey_put(c, k); in bch_btree_insert()
2524 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO); in bch_btree_set_root()
2526 mutex_lock(&b->c->bucket_lock); in bch_btree_set_root()
2528 mutex_unlock(&b->c->bucket_lock); in bch_btree_set_root()
2530 b->c->root = b; in bch_btree_set_root()
2532 bch_journal_meta(b->c, &cl); in bch_btree_set_root()
2567 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c, in __bch_btree_map_nodes() argument
2570 return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags); in __bch_btree_map_nodes()
2602 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c, in bch_btree_map_keys() argument
2605 return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags); in bch_btree_map_keys()
2678 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf, in bch_refill_keybuf() argument
2692 bch_btree_map_keys(&refill.op, c, &buf->last_scanned, in bch_refill_keybuf()
2778 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c, in bch_keybuf_next_rescan() argument
2795 bch_refill_keybuf(c, buf, end, pred); in bch_keybuf_next_rescan()