Lines Matching refs:c

367 	closure_type(c, struct cache_set, sb_write);  in CLOSURE_CALLBACK()
369 up(&c->sb_write_mutex); in CLOSURE_CALLBACK()
372 void bcache_write_super(struct cache_set *c) in bcache_write_super() argument
374 struct closure *cl = &c->sb_write; in bcache_write_super()
375 struct cache *ca = c->cache; in bcache_write_super()
379 down(&c->sb_write_mutex); in bcache_write_super()
380 closure_init(cl, &c->cl); in bcache_write_super()
402 struct cache_set *c = container_of(cl, struct cache_set, uuid_write); in uuid_endio() local
404 cache_set_err_on(bio->bi_status, c, "accessing uuids"); in uuid_endio()
405 bch_bbio_free(bio, c); in uuid_endio()
411 closure_type(c, struct cache_set, uuid_write); in CLOSURE_CALLBACK()
413 up(&c->uuid_write_mutex); in CLOSURE_CALLBACK()
416 static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k, in uuid_io() argument
419 struct closure *cl = &c->uuid_write; in uuid_io()
425 down(&c->uuid_write_mutex); in uuid_io()
429 struct bio *bio = bch_bbio_alloc(c); in uuid_io()
436 bch_bio_map(bio, c->uuids); in uuid_io()
438 bch_submit_bbio(bio, c, k, i); in uuid_io()
448 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) in uuid_io()
451 u - c->uuids, u->uuid, u->label, in uuid_io()
457 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl) in uuid_read() argument
461 if (__bch_btree_ptr_invalid(c, k)) in uuid_read()
464 bkey_copy(&c->uuid_bucket, k); in uuid_read()
465 uuid_io(c, REQ_OP_READ, k, cl); in uuid_read()
468 struct uuid_entry_v0 *u0 = (void *) c->uuids; in uuid_read()
469 struct uuid_entry *u1 = (void *) c->uuids; in uuid_read()
480 for (i = c->nr_uuids - 1; in uuid_read()
498 static int __uuid_write(struct cache_set *c) in __uuid_write() argument
502 struct cache *ca = c->cache; in __uuid_write()
508 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true)) in __uuid_write()
513 uuid_io(c, REQ_OP_WRITE, &k.key, &cl); in __uuid_write()
519 bkey_copy(&c->uuid_bucket, &k.key); in __uuid_write()
520 bkey_put(c, &k.key); in __uuid_write()
524 int bch_uuid_write(struct cache_set *c) in bch_uuid_write() argument
526 int ret = __uuid_write(c); in bch_uuid_write()
529 bch_journal_meta(c, NULL); in bch_uuid_write()
534 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid) in uuid_find() argument
538 for (u = c->uuids; in uuid_find()
539 u < c->uuids + c->nr_uuids; u++) in uuid_find()
546 static struct uuid_entry *uuid_find_empty(struct cache_set *c) in uuid_find_empty() argument
551 return uuid_find(c, zero_uuid); in uuid_find_empty()
792 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) { in bcache_device_unlink()
793 struct cache *ca = d->c->cache; in bcache_device_unlink()
795 sysfs_remove_link(&d->c->kobj, d->name); in bcache_device_unlink()
802 static void bcache_device_link(struct bcache_device *d, struct cache_set *c, in bcache_device_link() argument
805 struct cache *ca = c->cache; in bcache_device_link()
813 ret = sysfs_create_link(&d->kobj, &c->kobj, "cache"); in bcache_device_link()
817 ret = sysfs_create_link(&c->kobj, &d->kobj, d->name); in bcache_device_link()
828 atomic_dec(&d->c->attached_dev_nr); in bcache_device_detach()
831 struct uuid_entry *u = d->c->uuids + d->id; in bcache_device_detach()
836 bch_uuid_write(d->c); in bcache_device_detach()
841 d->c->devices[d->id] = NULL; in bcache_device_detach()
842 closure_put(&d->c->caching); in bcache_device_detach()
843 d->c = NULL; in bcache_device_detach()
846 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c, in bcache_device_attach() argument
850 d->c = c; in bcache_device_attach()
851 c->devices[id] = d; in bcache_device_attach()
853 if (id >= c->devices_max_used) in bcache_device_attach()
854 c->devices_max_used = id + 1; in bcache_device_attach()
856 closure_get(&c->caching); in bcache_device_attach()
880 if (d->c) in bcache_device_free()
993 static void calc_cached_dev_sectors(struct cache_set *c) in calc_cached_dev_sectors() argument
998 list_for_each_entry(dc, &c->cached_devs, list) in calc_cached_dev_sectors()
1001 c->cached_dev_sectors = sectors; in calc_cached_dev_sectors()
1066 if (!d->c && in bch_cached_dev_run()
1136 struct cache_set *c = dc->disk.c; in cached_dev_detach_finish() local
1154 calc_cached_dev_sectors(c); in cached_dev_detach_finish()
1188 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, in bch_cached_dev_attach() argument
1196 if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) || in bch_cached_dev_attach()
1197 (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16))) in bch_cached_dev_attach()
1200 if (dc->disk.c) { in bch_cached_dev_attach()
1205 if (test_bit(CACHE_SET_STOPPING, &c->flags)) { in bch_cached_dev_attach()
1210 if (dc->sb.block_size < c->cache->sb.block_size) { in bch_cached_dev_attach()
1218 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { in bch_cached_dev_attach()
1227 u = uuid_find(c, dc->sb.uuid); in bch_cached_dev_attach()
1243 u = uuid_find_empty(c); in bch_cached_dev_attach()
1263 bch_uuid_write(c); in bch_cached_dev_attach()
1265 memcpy(dc->sb.set_uuid, c->set_uuid, 16); in bch_cached_dev_attach()
1272 bch_uuid_write(c); in bch_cached_dev_attach()
1275 bcache_device_attach(&dc->disk, c, u - c->uuids); in bch_cached_dev_attach()
1276 list_move(&dc->list, &c->cached_devs); in bch_cached_dev_attach()
1277 calc_cached_dev_sectors(c); in bch_cached_dev_attach()
1317 bcache_device_link(&dc->disk, c, "bdev"); in bch_cached_dev_attach()
1318 atomic_inc(&c->attached_dev_nr); in bch_cached_dev_attach()
1320 if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) { in bch_cached_dev_attach()
1332 dc->disk.c->set_uuid); in bch_cached_dev_attach()
1446 struct cache_set *c; in register_bdev() local
1467 list_for_each_entry(c, &bch_cache_sets, list) in register_bdev()
1468 bch_cached_dev_attach(dc, c, NULL); in register_bdev()
1501 &d->c->flash_dev_dirty_sectors); in CLOSURE_CALLBACK()
1519 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u) in flash_dev_run() argument
1532 if (bcache_device_init(d, block_bytes(c->cache), u->sectors, in flash_dev_run()
1536 bcache_device_attach(d, c, u - c->uuids); in flash_dev_run()
1547 bcache_device_link(d, c, "volume"); in flash_dev_run()
1549 if (bch_has_feature_obso_large_bucket(&c->cache->sb)) { in flash_dev_run()
1562 static int flash_devs_run(struct cache_set *c) in flash_devs_run() argument
1567 for (u = c->uuids; in flash_devs_run()
1568 u < c->uuids + c->nr_uuids && !ret; in flash_devs_run()
1571 ret = flash_dev_run(c, u); in flash_devs_run()
1576 int bch_flash_dev_create(struct cache_set *c, uint64_t size) in bch_flash_dev_create() argument
1580 if (test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_flash_dev_create()
1583 if (!test_bit(CACHE_SET_RUNNING, &c->flags)) in bch_flash_dev_create()
1586 u = uuid_find_empty(c); in bch_flash_dev_create()
1599 bch_uuid_write(c); in bch_flash_dev_create()
1601 return flash_dev_run(c, u); in bch_flash_dev_create()
1623 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) in bch_cache_set_error() argument
1628 if (c->on_error != ON_ERROR_PANIC && in bch_cache_set_error()
1629 test_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_error()
1632 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) in bch_cache_set_error()
1646 c->set_uuid, &vaf); in bch_cache_set_error()
1650 if (c->on_error == ON_ERROR_PANIC) in bch_cache_set_error()
1653 bch_cache_set_unregister(c); in bch_cache_set_error()
1660 struct cache_set *c = container_of(kobj, struct cache_set, kobj); in bch_cache_set_release() local
1662 kfree(c); in bch_cache_set_release()
1668 closure_type(c, struct cache_set, cl); in CLOSURE_CALLBACK()
1671 debugfs_remove(c->debug); in CLOSURE_CALLBACK()
1673 bch_open_buckets_free(c); in CLOSURE_CALLBACK()
1674 bch_btree_cache_free(c); in CLOSURE_CALLBACK()
1675 bch_journal_free(c); in CLOSURE_CALLBACK()
1678 bch_bset_sort_state_free(&c->sort); in CLOSURE_CALLBACK()
1679 free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb))); in CLOSURE_CALLBACK()
1681 ca = c->cache; in CLOSURE_CALLBACK()
1684 c->cache = NULL; in CLOSURE_CALLBACK()
1689 if (c->moving_gc_wq) in CLOSURE_CALLBACK()
1690 destroy_workqueue(c->moving_gc_wq); in CLOSURE_CALLBACK()
1691 bioset_exit(&c->bio_split); in CLOSURE_CALLBACK()
1692 mempool_exit(&c->fill_iter); in CLOSURE_CALLBACK()
1693 mempool_exit(&c->bio_meta); in CLOSURE_CALLBACK()
1694 mempool_exit(&c->search); in CLOSURE_CALLBACK()
1695 kfree(c->devices); in CLOSURE_CALLBACK()
1697 list_del(&c->list); in CLOSURE_CALLBACK()
1700 pr_info("Cache set %pU unregistered\n", c->set_uuid); in CLOSURE_CALLBACK()
1703 closure_debug_destroy(&c->cl); in CLOSURE_CALLBACK()
1704 kobject_put(&c->kobj); in CLOSURE_CALLBACK()
1709 closure_type(c, struct cache_set, caching); in CLOSURE_CALLBACK()
1710 struct cache *ca = c->cache; in CLOSURE_CALLBACK()
1713 bch_cache_accounting_destroy(&c->accounting); in CLOSURE_CALLBACK()
1715 kobject_put(&c->internal); in CLOSURE_CALLBACK()
1716 kobject_del(&c->kobj); in CLOSURE_CALLBACK()
1718 if (!IS_ERR_OR_NULL(c->gc_thread)) in CLOSURE_CALLBACK()
1719 kthread_stop(c->gc_thread); in CLOSURE_CALLBACK()
1721 if (!IS_ERR_OR_NULL(c->root)) in CLOSURE_CALLBACK()
1722 list_add(&c->root->list, &c->btree_cache); in CLOSURE_CALLBACK()
1728 if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in CLOSURE_CALLBACK()
1729 list_for_each_entry(b, &c->btree_cache, list) { in CLOSURE_CALLBACK()
1744 if (c->journal.cur) { in CLOSURE_CALLBACK()
1745 cancel_delayed_work_sync(&c->journal.work); in CLOSURE_CALLBACK()
1747 c->journal.work.work.func(&c->journal.work.work); in CLOSURE_CALLBACK()
1769 static void conditional_stop_bcache_device(struct cache_set *c, in conditional_stop_bcache_device() argument
1775 d->disk->disk_name, c->set_uuid); in conditional_stop_bcache_device()
1811 closure_type(c, struct cache_set, caching); in CLOSURE_CALLBACK()
1818 for (i = 0; i < c->devices_max_used; i++) { in CLOSURE_CALLBACK()
1819 d = c->devices[i]; in CLOSURE_CALLBACK()
1823 if (!UUID_FLASH_ONLY(&c->uuids[i]) && in CLOSURE_CALLBACK()
1824 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { in CLOSURE_CALLBACK()
1827 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) in CLOSURE_CALLBACK()
1828 conditional_stop_bcache_device(c, d, dc); in CLOSURE_CALLBACK()
1839 void bch_cache_set_stop(struct cache_set *c) in bch_cache_set_stop() argument
1841 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags)) in bch_cache_set_stop()
1843 closure_queue(&c->caching); in bch_cache_set_stop()
1846 void bch_cache_set_unregister(struct cache_set *c) in bch_cache_set_unregister() argument
1848 set_bit(CACHE_SET_UNREGISTERING, &c->flags); in bch_cache_set_unregister()
1849 bch_cache_set_stop(c); in bch_cache_set_unregister()
1859 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL); in bch_cache_set_alloc() local
1861 if (!c) in bch_cache_set_alloc()
1865 closure_init(&c->cl, NULL); in bch_cache_set_alloc()
1866 set_closure_fn(&c->cl, cache_set_free, system_wq); in bch_cache_set_alloc()
1868 closure_init(&c->caching, &c->cl); in bch_cache_set_alloc()
1869 set_closure_fn(&c->caching, __cache_set_unregister, system_wq); in bch_cache_set_alloc()
1872 closure_set_stopped(&c->cl); in bch_cache_set_alloc()
1873 closure_put(&c->cl); in bch_cache_set_alloc()
1875 kobject_init(&c->kobj, &bch_cache_set_ktype); in bch_cache_set_alloc()
1876 kobject_init(&c->internal, &bch_cache_set_internal_ktype); in bch_cache_set_alloc()
1878 bch_cache_accounting_init(&c->accounting, &c->cl); in bch_cache_set_alloc()
1880 memcpy(c->set_uuid, sb->set_uuid, 16); in bch_cache_set_alloc()
1882 c->cache = ca; in bch_cache_set_alloc()
1883 c->cache->set = c; in bch_cache_set_alloc()
1884 c->bucket_bits = ilog2(sb->bucket_size); in bch_cache_set_alloc()
1885 c->block_bits = ilog2(sb->block_size); in bch_cache_set_alloc()
1886 c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry); in bch_cache_set_alloc()
1887 c->devices_max_used = 0; in bch_cache_set_alloc()
1888 atomic_set(&c->attached_dev_nr, 0); in bch_cache_set_alloc()
1889 c->btree_pages = meta_bucket_pages(sb); in bch_cache_set_alloc()
1890 if (c->btree_pages > BTREE_MAX_PAGES) in bch_cache_set_alloc()
1891 c->btree_pages = max_t(int, c->btree_pages / 4, in bch_cache_set_alloc()
1894 sema_init(&c->sb_write_mutex, 1); in bch_cache_set_alloc()
1895 mutex_init(&c->bucket_lock); in bch_cache_set_alloc()
1896 init_waitqueue_head(&c->btree_cache_wait); in bch_cache_set_alloc()
1897 spin_lock_init(&c->btree_cannibalize_lock); in bch_cache_set_alloc()
1898 init_waitqueue_head(&c->bucket_wait); in bch_cache_set_alloc()
1899 init_waitqueue_head(&c->gc_wait); in bch_cache_set_alloc()
1900 sema_init(&c->uuid_write_mutex, 1); in bch_cache_set_alloc()
1902 spin_lock_init(&c->btree_gc_time.lock); in bch_cache_set_alloc()
1903 spin_lock_init(&c->btree_split_time.lock); in bch_cache_set_alloc()
1904 spin_lock_init(&c->btree_read_time.lock); in bch_cache_set_alloc()
1906 bch_moving_init_cache_set(c); in bch_cache_set_alloc()
1908 INIT_LIST_HEAD(&c->list); in bch_cache_set_alloc()
1909 INIT_LIST_HEAD(&c->cached_devs); in bch_cache_set_alloc()
1910 INIT_LIST_HEAD(&c->btree_cache); in bch_cache_set_alloc()
1911 INIT_LIST_HEAD(&c->btree_cache_freeable); in bch_cache_set_alloc()
1912 INIT_LIST_HEAD(&c->btree_cache_freed); in bch_cache_set_alloc()
1913 INIT_LIST_HEAD(&c->data_buckets); in bch_cache_set_alloc()
1919 c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL); in bch_cache_set_alloc()
1920 if (!c->devices) in bch_cache_set_alloc()
1923 if (mempool_init_slab_pool(&c->search, 32, bch_search_cache)) in bch_cache_set_alloc()
1926 if (mempool_init_kmalloc_pool(&c->bio_meta, 2, in bch_cache_set_alloc()
1931 if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size)) in bch_cache_set_alloc()
1934 if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), in bch_cache_set_alloc()
1938 c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); in bch_cache_set_alloc()
1939 if (!c->uuids) in bch_cache_set_alloc()
1942 c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0); in bch_cache_set_alloc()
1943 if (!c->moving_gc_wq) in bch_cache_set_alloc()
1946 if (bch_journal_alloc(c)) in bch_cache_set_alloc()
1949 if (bch_btree_cache_alloc(c)) in bch_cache_set_alloc()
1952 if (bch_open_buckets_alloc(c)) in bch_cache_set_alloc()
1955 if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages))) in bch_cache_set_alloc()
1958 c->congested_read_threshold_us = 2000; in bch_cache_set_alloc()
1959 c->congested_write_threshold_us = 20000; in bch_cache_set_alloc()
1960 c->error_limit = DEFAULT_IO_ERROR_LIMIT; in bch_cache_set_alloc()
1961 c->idle_max_writeback_rate_enabled = 1; in bch_cache_set_alloc()
1962 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); in bch_cache_set_alloc()
1964 return c; in bch_cache_set_alloc()
1966 bch_cache_set_unregister(c); in bch_cache_set_alloc()
1970 static int run_cache_set(struct cache_set *c) in run_cache_set() argument
1974 struct cache *ca = c->cache; in run_cache_set()
1981 c->nbuckets = ca->sb.nbuckets; in run_cache_set()
1982 set_gc_sectors(c); in run_cache_set()
1984 if (CACHE_SYNC(&c->cache->sb)) { in run_cache_set()
1989 if (bch_journal_read(c, &journal)) in run_cache_set()
2013 if (__bch_btree_ptr_invalid(c, k)) in run_cache_set()
2017 c->root = bch_btree_node_get(c, NULL, k, in run_cache_set()
2020 if (IS_ERR(c->root)) in run_cache_set()
2023 list_del_init(&c->root->list); in run_cache_set()
2024 rw_unlock(true, c->root); in run_cache_set()
2026 err = uuid_read(c, j, &cl); in run_cache_set()
2031 if (bch_btree_check(c)) in run_cache_set()
2034 bch_journal_mark(c, &journal); in run_cache_set()
2035 bch_initial_gc_finish(c); in run_cache_set()
2043 bch_journal_next(&c->journal); in run_cache_set()
2060 __uuid_write(c); in run_cache_set()
2063 if (bch_journal_replay(c, &journal)) in run_cache_set()
2075 bch_initial_gc_finish(c); in run_cache_set()
2081 mutex_lock(&c->bucket_lock); in run_cache_set()
2083 mutex_unlock(&c->bucket_lock); in run_cache_set()
2086 if (__uuid_write(c)) in run_cache_set()
2090 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL); in run_cache_set()
2091 if (IS_ERR(c->root)) in run_cache_set()
2094 mutex_lock(&c->root->write_lock); in run_cache_set()
2095 bkey_copy_key(&c->root->key, &MAX_KEY); in run_cache_set()
2096 bch_btree_node_write(c->root, &cl); in run_cache_set()
2097 mutex_unlock(&c->root->write_lock); in run_cache_set()
2099 bch_btree_set_root(c->root); in run_cache_set()
2100 rw_unlock(true, c->root); in run_cache_set()
2107 SET_CACHE_SYNC(&c->cache->sb, true); in run_cache_set()
2109 bch_journal_next(&c->journal); in run_cache_set()
2110 bch_journal_meta(c, &cl); in run_cache_set()
2114 if (bch_gc_thread_start(c)) in run_cache_set()
2118 c->cache->sb.last_mount = (u32)ktime_get_real_seconds(); in run_cache_set()
2119 bcache_write_super(c); in run_cache_set()
2121 if (bch_has_feature_obso_large_bucket(&c->cache->sb)) in run_cache_set()
2125 bch_cached_dev_attach(dc, c, NULL); in run_cache_set()
2127 flash_devs_run(c); in run_cache_set()
2129 bch_journal_space_reserve(&c->journal); in run_cache_set()
2130 set_bit(CACHE_SET_RUNNING, &c->flags); in run_cache_set()
2141 bch_cache_set_error(c, "%s", err); in run_cache_set()
2150 struct cache_set *c; in register_cache_set() local
2152 list_for_each_entry(c, &bch_cache_sets, list) in register_cache_set()
2153 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) { in register_cache_set()
2154 if (c->cache) in register_cache_set()
2160 c = bch_cache_set_alloc(&ca->sb); in register_cache_set()
2161 if (!c) in register_cache_set()
2165 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) || in register_cache_set()
2166 kobject_add(&c->internal, &c->kobj, "internal")) in register_cache_set()
2169 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj)) in register_cache_set()
2172 bch_debug_init_cache_set(c); in register_cache_set()
2174 list_add(&c->list, &bch_cache_sets); in register_cache_set()
2177 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") || in register_cache_set()
2178 sysfs_create_link(&c->kobj, &ca->kobj, buf)) in register_cache_set()
2182 ca->set = c; in register_cache_set()
2186 if (run_cache_set(c) < 0) in register_cache_set()
2191 bch_cache_set_unregister(c); in register_cache_set()
2444 struct cache_set *c, *tc; in bch_is_open_backing() local
2447 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bch_is_open_backing()
2448 list_for_each_entry_safe(dc, t, &c->cached_devs, list) in bch_is_open_backing()
2459 struct cache_set *c, *tc; in bch_is_open_cache() local
2461 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { in bch_is_open_cache()
2462 struct cache *ca = c->cache; in bch_is_open_cache()
2703 struct cache_set *c, *tc; in bch_pending_bdevs_cleanup() local
2716 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) { in bch_pending_bdevs_cleanup()
2717 char *set_uuid = c->set_uuid; in bch_pending_bdevs_cleanup()
2750 struct cache_set *c, *tc; in bcache_reboot() local
2788 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) in bcache_reboot()
2789 bch_cache_set_stop(c); in bcache_reboot()