Lines Matching refs:ca

342 	struct bch_dev *ca = c ? bch2_dev_tryget_noerror(c, dev) : NULL;  in __bch2_alloc_v4_to_text()  local
362 if (ca) in __bch2_alloc_v4_to_text()
363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in __bch2_alloc_v4_to_text()
367 bch2_dev_put(ca); in __bch2_alloc_v4_to_text()
623 struct bch_dev *ca = NULL; in bch2_alloc_read() local
635 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
640 if (!ca) { in bch2_alloc_read()
647 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
648 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
650 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
656 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
661 if (!ca) { in bch2_alloc_read()
666 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
667 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
671 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
677 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
682 bch2_dev_put(ca); in bch2_alloc_read()
726 struct bch_dev *ca, in bch2_bucket_do_index() argument
757 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && in bch2_bucket_do_index()
804 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, in bch2_dev_data_type_accounting_mod() argument
814 .dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
818 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, in bch2_alloc_key_to_dev_counters() argument
826 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
827 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: in bch2_alloc_key_to_dev_counters()
828 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
829 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
833 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
836 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
837 bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
845 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, in bch2_alloc_key_to_dev_counters()
866 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc() local
867 if (!ca) in bch2_trigger_alloc()
913 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: in bch2_trigger_alloc()
914 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); in bch2_trigger_alloc()
933 alloc_lru_idx_fragmentation(*old_a, ca), in bch2_trigger_alloc()
934 alloc_lru_idx_fragmentation(*new_a, ca)); in bch2_trigger_alloc()
944 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); in bch2_trigger_alloc()
1003 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
1020 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
1024 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) in bch2_trigger_alloc()
1025 bch2_dev_do_invalidates(ca); in bch2_trigger_alloc()
1033 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1042 bch2_dev_put(ca); in bch2_trigger_alloc()
1098 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) in next_bucket() argument
1100 if (*ca) { in next_bucket()
1101 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1102 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1104 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1107 bch2_dev_put(*ca); in next_bucket()
1108 *ca = NULL; in next_bucket()
1114 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1115 if (*ca) { in next_bucket()
1116 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1117 bch2_dev_get(*ca); in next_bucket()
1120 return *ca != NULL; in next_bucket()
1125 struct bch_dev **ca, struct bkey *hole) in bch2_get_key_or_real_bucket_hole() argument
1134 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1139 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { in bch2_get_key_or_real_bucket_hole()
1140 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole()
1147 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1148 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1170 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key() local
1171 if (fsck_err_on(!ca, in bch2_check_alloc_key()
1176 if (!ca) in bch2_check_alloc_key()
1179 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1247 bch2_dev_put(ca); in bch2_check_alloc_key()
1254 struct bch_dev *ca, in bch2_check_alloc_hole_freespace() argument
1263 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1522 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key() local
1523 if (!ca) { in bch2_check_bucket_gens_key()
1531 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1532 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1540 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1548 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1568 bch2_dev_put(ca); in bch2_check_bucket_gens_key()
1577 struct bch_dev *ca = NULL; in bch2_check_alloc_info() local
1596 k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole); in bch2_check_alloc_info()
1617 ret = bch2_check_alloc_hole_freespace(trans, ca, in bch2_check_alloc_info()
1645 bch2_dev_put(ca); in bch2_check_alloc_info()
1646 ca = NULL; in bch2_check_alloc_info()
1717 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref() local
1718 if (!ca) in bch2_check_alloc_to_lru_ref()
1723 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); in bch2_check_alloc_to_lru_ref()
1763 bch2_dev_put(ca); in bch2_check_alloc_to_lru_ref()
1787 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) in discard_in_flight_add() argument
1789 struct bch_fs *c = ca->fs; in discard_in_flight_add()
1792 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1794 darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); in discard_in_flight_add()
1800 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1805 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1809 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) in discard_in_flight_remove() argument
1811 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1813 darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket); in discard_in_flight_remove()
1816 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1817 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1828 struct bch_dev *ca, in bch2_discard_one_bucket() argument
1880 if (discard_in_flight_add(ca, iter.pos.offset, true)) in bch2_discard_one_bucket()
1890 if (bch2_discard_opt_enabled(c, ca) && !c->opts.nochanges) { in bch2_discard_one_bucket()
1896 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1897 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1898 ca->mi.bucket_size, in bch2_discard_one_bucket()
1926 discard_in_flight_remove(ca, iter.pos.offset); in bch2_discard_one_bucket()
1936 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); in bch2_do_discards_work() local
1937 struct bch_fs *c = ca->fs; in bch2_do_discards_work()
1950 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1951 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1952 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); in bch2_do_discards_work()
1954 if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) in bch2_do_discards_work()
1960 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); in bch2_do_discards_work()
1964 void bch2_dev_do_discards(struct bch_dev *ca) in bch2_dev_do_discards() argument
1966 struct bch_fs *c = ca->fs; in bch2_dev_do_discards()
1971 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_dev_do_discards)) in bch2_dev_do_discards()
1974 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1977 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards); in bch2_dev_do_discards()
1984 for_each_member_device(c, ca) in bch2_do_discards()
1985 bch2_dev_do_discards(ca); in bch2_do_discards()
1989 struct bch_dev *ca, in bch2_do_discards_fast_one() argument
1996 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); in bch2_do_discards_fast_one()
2004 ca->dev_idx, bucket)) in bch2_do_discards_fast_one()
2007 ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); in bch2_do_discards_fast_one()
2016 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); in bch2_do_discards_fast_work() local
2017 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work()
2027 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2028 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
2037 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2043 bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); in bch2_do_discards_fast_work()
2046 discard_in_flight_remove(ca, bucket); in bch2_do_discards_fast_work()
2055 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); in bch2_do_discards_fast_work()
2059 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) in bch2_discard_one_bucket_fast() argument
2061 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast()
2063 if (discard_in_flight_add(ca, bucket, false)) in bch2_discard_one_bucket_fast()
2069 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_discard_one_bucket_fast)) in bch2_discard_one_bucket_fast()
2072 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2075 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast); in bch2_discard_one_bucket_fast()
2081 struct bch_dev *ca, in invalidate_one_bp() argument
2102 bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); in invalidate_one_bp()
2109 struct bch_dev *ca, in invalidate_one_bucket_by_bps() argument
2114 struct bpos bp_start = bucket_pos_to_bp_start(ca, bucket); in invalidate_one_bucket_by_bps()
2115 struct bpos bp_end = bucket_pos_to_bp_end(ca, bucket); in invalidate_one_bucket_by_bps()
2132 invalidate_one_bp(trans, ca, bp, last_flushed); in invalidate_one_bucket_by_bps()
2138 struct bch_dev *ca, in invalidate_one_bucket() argument
2189 bch2_check_bucket_backpointer_mismatch(trans, ca, bucket.offset, in invalidate_one_bucket()
2197 ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); in invalidate_one_bucket()
2211 struct bch_dev *ca, bool *wrapped) in next_lru_key() argument
2215 k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2217 bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2227 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); in bch2_do_invalidates_work() local
2228 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work()
2241 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); in bch2_do_invalidates_work()
2246 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2253 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); in bch2_do_invalidates_work()
2260 ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate); in bch2_do_invalidates_work()
2273 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); in bch2_do_invalidates_work()
2277 void bch2_dev_do_invalidates(struct bch_dev *ca) in bch2_dev_do_invalidates() argument
2279 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates()
2284 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_do_invalidates)) in bch2_dev_do_invalidates()
2287 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2290 enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates); in bch2_dev_do_invalidates()
2297 for_each_member_device(c, ca) in bch2_do_invalidates()
2298 bch2_dev_do_invalidates(ca); in bch2_do_invalidates()
2301 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, in bch2_dev_freespace_init() argument
2308 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2314 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2317 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2325 bch_info(ca, "%s: currently at %llu/%llu", in bch2_dev_freespace_init()
2326 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2350 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2389 bch_err_msg(ca, ret, "initializing free space"); in bch2_dev_freespace_init()
2394 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2413 for_each_member_device(c, ca) { in bch2_fs_freespace_init()
2414 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2422 int ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2424 bch2_dev_put(ca); in bch2_fs_freespace_init()
2442 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_remove_alloc() argument
2444 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2445 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2464 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2465 bch_err_msg(ca, ret, "removing dev alloc info"); in bch2_dev_remove_alloc()
2516 for_each_member_device_rcu(c, ca, NULL) { in bch2_recalc_capacity()
2517 struct block_device *bdev = READ_ONCE(ca->disk_sb.bdev); in bch2_recalc_capacity()
2521 if (ca->mi.state != BCH_MEMBER_STATE_rw) in bch2_recalc_capacity()
2543 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2544 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2550 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2552 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2553 ca->mi.first_bucket); in bch2_recalc_capacity()
2558 ca->mi.bucket_size); in bch2_recalc_capacity()
2585 for_each_rw_member_rcu(c, ca) in bch2_min_rw_member_capacity()
2586 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2590 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_has_open_write_point() argument
2599 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2607 void bch2_dev_allocator_set_rw(struct bch_fs *c, struct bch_dev *ca, bool rw) in bch2_dev_allocator_set_rw() argument
2614 (ca->mi.data_allowed & BIT(i)))) in bch2_dev_allocator_set_rw()
2615 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_set_rw()
2617 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_set_rw()
2621 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_remove() argument
2626 bch2_dev_allocator_set_rw(c, ca, false); in bch2_dev_allocator_remove()
2635 bch2_open_buckets_stop(c, ca, false); in bch2_dev_allocator_remove()
2652 !bch2_dev_has_open_write_point(c, ca)); in bch2_dev_allocator_remove()
2656 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_add() argument
2660 bch2_dev_allocator_set_rw(c, ca, true); in bch2_dev_allocator_add()
2664 void bch2_dev_allocator_background_exit(struct bch_dev *ca) in bch2_dev_allocator_background_exit() argument
2666 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2669 void bch2_dev_allocator_background_init(struct bch_dev *ca) in bch2_dev_allocator_background_init() argument
2671 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2672 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2673 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2674 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()