Lines Matching refs:c
368 struct cache_set *c = container_of(cl, struct cache_set, sb_write); in bcache_write_super_unlock() local
370 up(&c->sb_write_mutex); in bcache_write_super_unlock()
373 void bcache_write_super(struct cache_set *c) in bcache_write_super() argument
375 struct closure *cl = &c->sb_write; in bcache_write_super()
376 struct cache *ca = c->cache; in bcache_write_super()
380 down(&c->sb_write_mutex); in bcache_write_super()
381 closure_init(cl, &c->cl); in bcache_write_super()
403 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio() local
405 cache_set_err_on(bio->bi_status, c, "accessing uuids"); in uuid_endio()
406 bch_bbio_free(bio, c); in uuid_endio()
412 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_io_unlock() local
414 up(&c->uuid_write_mutex); in uuid_io_unlock()
417 static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, in uuid_io() argument
420 struct closure *cl = &c->uuid_write; in uuid_io()
426 down(&c->uuid_write_mutex); in uuid_io()
430 struct bio *bio = bch_bbio_alloc(c); in uuid_io()
437 bch_bio_map(bio, c->uuids); in uuid_io()
439 bch_submit_bbio(bio, c, k, i); in uuid_io()
449 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) in uuid_io()
452 u - c->uuids, u->uuid, u->label, in uuid_io()
458 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read() argument
462 if (__bch_btree_ptr_invalid(c, k)) in uuid_read()
465 bkey_copy(&c->uuid_bucket, k); in uuid_read()
466 uuid_io(c, REQ_OP_READ, k, cl); in uuid_read()
469 struct uuid_entry_v0 *u0 = (void *) c->uuids; in uuid_read()
470 struct uuid_entry *u1 = (void *) c->uuids; in uuid_read()
481 for (i = c->nr_uuids - 1; in uuid_read()
499 static int __uuid_write(struct cache_set *c) in __uuid_write() argument
503 struct cache *ca = c->cache; in __uuid_write()
509 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) in __uuid_write()
514 uuid_io(c, REQ_OP_WRITE, &k.key, &cl); in __uuid_write()
520 bkey_copy(&c->uuid_bucket, &k.key); in __uuid_write()
521 bkey_put(c, &k.key); in __uuid_write()
525 int bch_uuid_write(struct cache_set *c) in bch_uuid_write() argument
527 int ret = __uuid_write(c); in bch_uuid_write()
530 bch_journal_meta(c, NULL); in bch_uuid_write()
535 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find() argument
539 for (u = c->uuids; in uuid_find()
540 u < c->uuids + c->nr_uuids; u++) in uuid_find()
547 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty() argument
551 return uuid_find(c, zero_uuid); in uuid_find_empty()
792 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { in bcache_device_unlink()
793 struct cache *ca = d->c->cache; in bcache_device_unlink()
795 sysfs_remove_link(&d->c->kobj, d->name); in bcache_device_unlink()
802 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, in bcache_device_link() argument
805 struct cache *ca = c->cache; in bcache_device_link()
813 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); in bcache_device_link()
817 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); in bcache_device_link()
828 atomic_dec(&d->c->attached_dev_nr); in bcache_device_detach()
831 struct uuid_entry *u = d->c->uuids + d->id; in bcache_device_detach()
836 bch_uuid_write(d->c); in bcache_device_detach()
841 d->c->devices[d->id] = NULL; in bcache_device_detach()
842 closure_put(&d->c->caching); in bcache_device_detach()
843 d->c = NULL; in bcache_device_detach()
846 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, in bcache_device_attach() argument
850 d->c = c; in bcache_device_attach()
851 c->devices[id] = d; in bcache_device_attach()
853 if (id >= c->devices_max_used) in bcache_device_attach()
854 c->devices_max_used = id + 1; in bcache_device_attach()
856 closure_get(&c->caching); in bcache_device_attach()
880 if (d->c) in bcache_device_free()
995 static void calc_cached_dev_sectors(struct cache_set *c) in calc_cached_dev_sectors() argument
1000 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
1003 c->cached_dev_sectors = sectors; in calc_cached_dev_sectors()
1068 if (!d->c && in bch_cached_dev_run()
1138 struct cache_set *c = dc->disk.c; in cached_dev_detach_finish() local
1156 calc_cached_dev_sectors(c); in cached_dev_detach_finish()
1190 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, in bch_cached_dev_attach() argument
1198 if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) || in bch_cached_dev_attach()
1199 (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16))) in bch_cached_dev_attach()
1202 if (dc->disk.c) { in bch_cached_dev_attach()
1207 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { in bch_cached_dev_attach()
1212 if (dc->sb.block_size < c->cache->sb.block_size) { in bch_cached_dev_attach()
1220 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { in bch_cached_dev_attach()
1229 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
1245 u = uuid_find_empty(c); in bch_cached_dev_attach()
1265 bch_uuid_write(c); in bch_cached_dev_attach()
1267 memcpy(dc->sb.set_uuid, c->set_uuid, 16); in bch_cached_dev_attach()
1274 bch_uuid_write(c); in bch_cached_dev_attach()
1277 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1278 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1279 calc_cached_dev_sectors(c); in bch_cached_dev_attach()
1319 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1320 atomic_inc(&c->attached_dev_nr); in bch_cached_dev_attach()
1322 if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) { in bch_cached_dev_attach()
1334 dc->disk.c->set_uuid); in bch_cached_dev_attach()
1453 struct cache_set *c; in register_bdev() local
1473 list_for_each_entry(c, &bch_cache_sets, list) in register_bdev()
1474 bch_cached_dev_attach(dc, c, NULL); in register_bdev()
1507 &d->c->flash_dev_dirty_sectors); in flash_dev_free()
1525 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) in flash_dev_run() argument
1538 if (bcache_device_init(d, block_bytes(c->cache), u->sectors, in flash_dev_run()
1542 bcache_device_attach(d, c, u - c->uuids); in flash_dev_run()
1553 bcache_device_link(d, c, "volume"); in flash_dev_run()
1555 if (bch_has_feature_obso_large_bucket(&c->cache->sb)) { in flash_dev_run()
1568 static int flash_devs_run(struct cache_set *c) in flash_devs_run() argument
1573 for (u = c->uuids; in flash_devs_run()
1574 u < c->uuids + c->nr_uuids && !ret; in flash_devs_run()
1577 ret = flash_dev_run(c, u); in flash_devs_run()
1582 int bch_flash_dev_create(struct cache_set *c, uint64_t size) in bch_flash_dev_create() argument
1586 if (test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_flash_dev_create()
1589 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) in bch_flash_dev_create()
1592 u = uuid_find_empty(c); in bch_flash_dev_create()
1605 bch_uuid_write(c); in bch_flash_dev_create()
1607 return flash_dev_run(c, u); in bch_flash_dev_create()
1629 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) in bch_cache_set_error() argument
1634 if (c->on_error != ON_ERROR_PANIC && in bch_cache_set_error()
1635 test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_error()
1638 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_cache_set_error()
1652 c->set_uuid, &vaf); in bch_cache_set_error()
1656 if (c->on_error == ON_ERROR_PANIC) in bch_cache_set_error()
1659 bch_cache_set_unregister(c); in bch_cache_set_error()
1666 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in bch_cache_set_release() local
1668 kfree(c); in bch_cache_set_release()
1674 struct cache_set *c = container_of(cl, struct cache_set, cl); in cache_set_free() local
1677 debugfs_remove(c->debug); in cache_set_free()
1679 bch_open_buckets_free(c); in cache_set_free()
1680 bch_btree_cache_free(c); in cache_set_free()
1681 bch_journal_free(c); in cache_set_free()
1684 bch_bset_sort_state_free(&c->sort); in cache_set_free()
1685 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb))); in cache_set_free()
1687 ca = c->cache; in cache_set_free()
1690 c->cache = NULL; in cache_set_free()
1695 if (c->moving_gc_wq) in cache_set_free()
1696 destroy_workqueue(c->moving_gc_wq); in cache_set_free()
1697 bioset_exit(&c->bio_split); in cache_set_free()
1698 mempool_exit(&c->fill_iter); in cache_set_free()
1699 mempool_exit(&c->bio_meta); in cache_set_free()
1700 mempool_exit(&c->search); in cache_set_free()
1701 kfree(c->devices); in cache_set_free()
1703 list_del(&c->list); in cache_set_free()
1706 pr_info("Cache set %pU unregistered\n", c->set_uuid); in cache_set_free()
1709 closure_debug_destroy(&c->cl); in cache_set_free()
1710 kobject_put(&c->kobj); in cache_set_free()
1715 struct cache_set *c = container_of(cl, struct cache_set, caching); in cache_set_flush() local
1716 struct cache *ca = c->cache; in cache_set_flush()
1719 bch_cache_accounting_destroy(&c->accounting); in cache_set_flush()
1721 kobject_put(&c->internal); in cache_set_flush()
1722 kobject_del(&c->kobj); in cache_set_flush()
1724 if (!IS_ERR_OR_NULL(c->gc_thread)) in cache_set_flush()
1725 kthread_stop(c->gc_thread); in cache_set_flush()
1727 if (!IS_ERR(c->root)) in cache_set_flush()
1728 list_add(&c->root->list, &c->btree_cache); in cache_set_flush()
1734 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in cache_set_flush()
1735 list_for_each_entry(b, &c->btree_cache, list) { in cache_set_flush()
1745 if (c->journal.cur) { in cache_set_flush()
1746 cancel_delayed_work_sync(&c->journal.work); in cache_set_flush()
1748 c->journal.work.work.func(&c->journal.work.work); in cache_set_flush()
1770 static void conditional_stop_bcache_device(struct cache_set *c, in conditional_stop_bcache_device() argument
1776 d->disk->disk_name, c->set_uuid); in conditional_stop_bcache_device()
1812 struct cache_set *c = container_of(cl, struct cache_set, caching); in __cache_set_unregister() local
1819 for (i = 0; i < c->devices_max_used; i++) { in __cache_set_unregister()
1820 d = c->devices[i]; in __cache_set_unregister()
1824 if (!UUID_FLASH_ONLY(&c->uuids[i]) && in __cache_set_unregister()
1825 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { in __cache_set_unregister()
1828 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in __cache_set_unregister()
1829 conditional_stop_bcache_device(c, d, dc); in __cache_set_unregister()
1840 void bch_cache_set_stop(struct cache_set *c) in bch_cache_set_stop() argument
1842 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_stop()
1844 closure_queue(&c->caching); in bch_cache_set_stop()
1847 void bch_cache_set_unregister(struct cache_set *c) in bch_cache_set_unregister() argument
1849 set_bit(CACHE_SET_UNREGISTERING, &c->flags); in bch_cache_set_unregister()
1850 bch_cache_set_stop(c); in bch_cache_set_unregister()
1860 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); in bch_cache_set_alloc() local
1862 if (!c) in bch_cache_set_alloc()
1866 closure_init(&c->cl, NULL); in bch_cache_set_alloc()
1867 set_closure_fn(&c->cl, cache_set_free, system_wq); in bch_cache_set_alloc()
1869 closure_init(&c->caching, &c->cl); in bch_cache_set_alloc()
1870 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); in bch_cache_set_alloc()
1873 closure_set_stopped(&c->cl); in bch_cache_set_alloc()
1874 closure_put(&c->cl); in bch_cache_set_alloc()
1876 kobject_init(&c->kobj, &bch_cache_set_ktype); in bch_cache_set_alloc()
1877 kobject_init(&c->internal, &bch_cache_set_internal_ktype); in bch_cache_set_alloc()
1879 bch_cache_accounting_init(&c->accounting, &c->cl); in bch_cache_set_alloc()
1881 memcpy(c->set_uuid, sb->set_uuid, 16); in bch_cache_set_alloc()
1883 c->cache = ca; in bch_cache_set_alloc()
1884 c->cache->set = c; in bch_cache_set_alloc()
1885 c->bucket_bits = ilog2(sb->bucket_size); in bch_cache_set_alloc()
1886 c->block_bits = ilog2(sb->block_size); in bch_cache_set_alloc()
1887 c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry); in bch_cache_set_alloc()
1888 c->devices_max_used = 0; in bch_cache_set_alloc()
1889 atomic_set(&c->attached_dev_nr, 0); in bch_cache_set_alloc()
1890 c->btree_pages = meta_bucket_pages(sb); in bch_cache_set_alloc()
1891 if (c->btree_pages > BTREE_MAX_PAGES) in bch_cache_set_alloc()
1892 c->btree_pages = max_t(int, c->btree_pages / 4, in bch_cache_set_alloc()
1895 sema_init(&c->sb_write_mutex, 1); in bch_cache_set_alloc()
1896 mutex_init(&c->bucket_lock); in bch_cache_set_alloc()
1897 init_waitqueue_head(&c->btree_cache_wait); in bch_cache_set_alloc()
1898 spin_lock_init(&c->btree_cannibalize_lock); in bch_cache_set_alloc()
1899 init_waitqueue_head(&c->bucket_wait); in bch_cache_set_alloc()
1900 init_waitqueue_head(&c->gc_wait); in bch_cache_set_alloc()
1901 sema_init(&c->uuid_write_mutex, 1); in bch_cache_set_alloc()
1903 spin_lock_init(&c->btree_gc_time.lock); in bch_cache_set_alloc()
1904 spin_lock_init(&c->btree_split_time.lock); in bch_cache_set_alloc()
1905 spin_lock_init(&c->btree_read_time.lock); in bch_cache_set_alloc()
1907 bch_moving_init_cache_set(c); in bch_cache_set_alloc()
1909 INIT_LIST_HEAD(&c->list); in bch_cache_set_alloc()
1910 INIT_LIST_HEAD(&c->cached_devs); in bch_cache_set_alloc()
1911 INIT_LIST_HEAD(&c->btree_cache); in bch_cache_set_alloc()
1912 INIT_LIST_HEAD(&c->btree_cache_freeable); in bch_cache_set_alloc()
1913 INIT_LIST_HEAD(&c->btree_cache_freed); in bch_cache_set_alloc()
1914 INIT_LIST_HEAD(&c->data_buckets); in bch_cache_set_alloc()
1920 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); in bch_cache_set_alloc()
1921 if (!c->devices) in bch_cache_set_alloc()
1924 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) in bch_cache_set_alloc()
1927 if (mempool_init_kmalloc_pool(&c->bio_meta, 2, in bch_cache_set_alloc()
1932 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) in bch_cache_set_alloc()
1935 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), in bch_cache_set_alloc()
1939 c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); in bch_cache_set_alloc()
1940 if (!c->uuids) in bch_cache_set_alloc()
1943 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); in bch_cache_set_alloc()
1944 if (!c->moving_gc_wq) in bch_cache_set_alloc()
1947 if (bch_journal_alloc(c)) in bch_cache_set_alloc()
1950 if (bch_btree_cache_alloc(c)) in bch_cache_set_alloc()
1953 if (bch_open_buckets_alloc(c)) in bch_cache_set_alloc()
1956 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) in bch_cache_set_alloc()
1959 c->congested_read_threshold_us = 2000; in bch_cache_set_alloc()
1960 c->congested_write_threshold_us = 20000; in bch_cache_set_alloc()
1961 c->error_limit = DEFAULT_IO_ERROR_LIMIT; in bch_cache_set_alloc()
1962 c->idle_max_writeback_rate_enabled = 1; in bch_cache_set_alloc()
1963 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_cache_set_alloc()
1965 return c; in bch_cache_set_alloc()
1967 bch_cache_set_unregister(c); in bch_cache_set_alloc()
1971 static int run_cache_set(struct cache_set *c) in run_cache_set() argument
1975 struct cache *ca = c->cache; in run_cache_set()
1982 c->nbuckets = ca->sb.nbuckets; in run_cache_set()
1983 set_gc_sectors(c); in run_cache_set()
1985 if (CACHE_SYNC(&c->cache->sb)) { in run_cache_set()
1990 if (bch_journal_read(c, &journal)) in run_cache_set()
2014 if (__bch_btree_ptr_invalid(c, k)) in run_cache_set()
2018 c->root = bch_btree_node_get(c, NULL, k, in run_cache_set()
2021 if (IS_ERR(c->root)) in run_cache_set()
2024 list_del_init(&c->root->list); in run_cache_set()
2025 rw_unlock(true, c->root); in run_cache_set()
2027 err = uuid_read(c, j, &cl); in run_cache_set()
2032 if (bch_btree_check(c)) in run_cache_set()
2035 bch_journal_mark(c, &journal); in run_cache_set()
2036 bch_initial_gc_finish(c); in run_cache_set()
2044 bch_journal_next(&c->journal); in run_cache_set()
2061 __uuid_write(c); in run_cache_set()
2064 if (bch_journal_replay(c, &journal)) in run_cache_set()
2076 bch_initial_gc_finish(c); in run_cache_set()
2082 mutex_lock(&c->bucket_lock); in run_cache_set()
2084 mutex_unlock(&c->bucket_lock); in run_cache_set()
2087 if (__uuid_write(c)) in run_cache_set()
2091 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); in run_cache_set()
2092 if (IS_ERR(c->root)) in run_cache_set()
2095 mutex_lock(&c->root->write_lock); in run_cache_set()
2096 bkey_copy_key(&c->root->key, &MAX_KEY); in run_cache_set()
2097 bch_btree_node_write(c->root, &cl); in run_cache_set()
2098 mutex_unlock(&c->root->write_lock); in run_cache_set()
2100 bch_btree_set_root(c->root); in run_cache_set()
2101 rw_unlock(true, c->root); in run_cache_set()
2108 SET_CACHE_SYNC(&c->cache->sb, true); in run_cache_set()
2110 bch_journal_next(&c->journal); in run_cache_set()
2111 bch_journal_meta(c, &cl); in run_cache_set()
2115 if (bch_gc_thread_start(c)) in run_cache_set()
2119 c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); in run_cache_set()
2120 bcache_write_super(c); in run_cache_set()
2122 if (bch_has_feature_obso_large_bucket(&c->cache->sb)) in run_cache_set()
2126 bch_cached_dev_attach(dc, c, NULL); in run_cache_set()
2128 flash_devs_run(c); in run_cache_set()
2130 bch_journal_space_reserve(&c->journal); in run_cache_set()
2131 set_bit(CACHE_SET_RUNNING, &c->flags); in run_cache_set()
2142 bch_cache_set_error(c, "%s", err); in run_cache_set()
2151 struct cache_set *c; in register_cache_set() local
2153 list_for_each_entry(c, &bch_cache_sets, list) in register_cache_set()
2154 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
2155 if (c->cache) in register_cache_set()
2161 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
2162 if (!c) in register_cache_set()
2166 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) || in register_cache_set()
2167 kobject_add(&c->internal, &c->kobj, "internal")) in register_cache_set()
2170 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) in register_cache_set()
2173 bch_debug_init_cache_set(c); in register_cache_set()
2175 list_add(&c->list, &bch_cache_sets); in register_cache_set()
2178 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
2179 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
2183 ca->set = c; in register_cache_set()
2187 if (run_cache_set(c) < 0) in register_cache_set()
2192 bch_cache_set_unregister(c); in register_cache_set()
2414 struct cache_set *c, *tc; in bch_is_open_backing() local
2417 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_backing()
2418 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
2429 struct cache_set *c, *tc; in bch_is_open_cache() local
2431 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { in bch_is_open_cache()
2432 struct cache *ca = c->cache; in bch_is_open_cache()
2676 struct cache_set *c, *tc; in bch_pending_bdevs_cleanup() local
2689 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { in bch_pending_bdevs_cleanup()
2690 char *set_uuid = c->set_uuid; in bch_pending_bdevs_cleanup()
2723 struct cache_set *c, *tc; in bcache_reboot() local
2761 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bcache_reboot()
2762 bch_cache_set_stop(c); in bcache_reboot()