Lines Matching refs:ca
73 for_each_member_device_rcu(c, ca, NULL) in bch2_reset_alloc_cursors()
74 memset(ca->alloc_cursor, 0, sizeof(ca->alloc_cursor)); in bch2_reset_alloc_cursors()
102 struct bch_dev *ca = ob_dev(c, ob); in __bch2_open_bucket_put() local
121 ca->nr_open_buckets--; in __bch2_open_bucket_put()
154 static inline bool is_superblock_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b) in is_superblock_bucket() argument
159 return bch2_is_superblock_bucket(ca, b); in is_superblock_bucket()
212 struct bch_dev *ca = req->ca; in __try_alloc_bucket() local
214 if (unlikely(is_superblock_bucket(c, ca, bucket))) in __try_alloc_bucket()
217 if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) { in __try_alloc_bucket()
234 if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) { in __try_alloc_bucket()
244 ob->sectors_free = ca->mi.bucket_size; in __try_alloc_bucket()
245 ob->dev = ca->dev_idx; in __try_alloc_bucket()
250 ca->nr_open_buckets++; in __try_alloc_bucket()
268 if (!may_alloc_bucket(c, req, POS(req->ca->dev_idx, b))) in try_alloc_bucket()
290 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_early() local
294 u64 first_bucket = ca->mi.first_bucket; in bch2_bucket_alloc_early()
295 u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; in bch2_bucket_alloc_early()
310 for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor), in bch2_bucket_alloc_early()
314 if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) in bch2_bucket_alloc_early()
318 req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_early()
319 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_early()
321 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_early()
324 bucket = sector_to_bucket(ca, in bch2_bucket_alloc_early()
325 round_up(bucket_to_sector(ca, bucket) + 1, in bch2_bucket_alloc_early()
326 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_early()
327 bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket)); in bch2_bucket_alloc_early()
380 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_freelist() local
384 u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; in bch2_bucket_alloc_freelist()
385 u64 alloc_start = max_t(u64, ca->mi.first_bucket, READ_ONCE(*dev_alloc_cursor)); in bch2_bucket_alloc_freelist()
390 POS(ca->dev_idx, alloc_cursor), in bch2_bucket_alloc_freelist()
391 POS(ca->dev_idx, U64_MAX), in bch2_bucket_alloc_freelist()
404 req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_freelist()
405 bucket_to_sector(ca, bucket), ca->mi.bucket_size)) { in bch2_bucket_alloc_freelist()
407 bucket_to_sector(ca, bucket) > 64ULL << ca->mi.btree_bitmap_shift) in bch2_bucket_alloc_freelist()
410 bucket = sector_to_bucket(ca, in bch2_bucket_alloc_freelist()
411 round_up(bucket_to_sector(ca, bucket + 1), in bch2_bucket_alloc_freelist()
412 1ULL << ca->mi.btree_bitmap_shift)); in bch2_bucket_alloc_freelist()
415 bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor)); in bch2_bucket_alloc_freelist()
443 if (!ob && alloc_start > ca->mi.first_bucket) { in bch2_bucket_alloc_freelist()
444 alloc_cursor = alloc_start = ca->mi.first_bucket; in bch2_bucket_alloc_freelist()
460 prt_printf(&buf, "dev\t%s (%u)\n", req->ca->name, req->ca->dev_idx); in trace_bucket_alloc2()
465 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(req->ca, req->usage, req->watermark)); in trace_bucket_alloc2()
502 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_trans() local
504 bool freespace = READ_ONCE(ca->mi.freespace_initialized); in bch2_bucket_alloc_trans()
511 bch2_dev_usage_read_fast(ca, &req->usage); in bch2_bucket_alloc_trans()
512 avail = dev_buckets_free(ca, req->usage, req->watermark); in bch2_bucket_alloc_trans()
515 min(avail, ca->mi.nbuckets >> 7)) in bch2_bucket_alloc_trans()
516 bch2_dev_do_discards(ca); in bch2_bucket_alloc_trans()
521 if (should_invalidate_buckets(ca, req->usage)) in bch2_bucket_alloc_trans()
522 bch2_dev_do_invalidates(ca); in bch2_bucket_alloc_trans()
580 struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca, in bch2_bucket_alloc() argument
589 .ca = ca, in bch2_bucket_alloc()
650 static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca, in bch2_dev_stripe_increment_inlined() argument
663 u64 *v = stripe->next_alloc + ca->dev_idx; in bch2_dev_stripe_increment_inlined()
664 u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal); in bch2_dev_stripe_increment_inlined()
677 void bch2_dev_stripe_increment(struct bch_dev *ca, in bch2_dev_stripe_increment() argument
682 bch2_dev_usage_read_fast(ca, &usage); in bch2_dev_stripe_increment()
683 bch2_dev_stripe_increment_inlined(ca, stripe, &usage); in bch2_dev_stripe_increment()
720 req->ca = bch2_dev_tryget_noerror(c, *i); in bch2_bucket_alloc_set_trans()
721 if (!req->ca) in bch2_bucket_alloc_set_trans()
724 if (!req->ca->mi.durability && req->have_cache) { in bch2_bucket_alloc_set_trans()
725 bch2_dev_put(req->ca); in bch2_bucket_alloc_set_trans()
732 bch2_dev_stripe_increment_inlined(req->ca, stripe, &req->usage); in bch2_bucket_alloc_set_trans()
733 bch2_dev_put(req->ca); in bch2_bucket_alloc_set_trans()
810 struct bch_dev *ca = ob_dev(c, ob); in want_bucket() local
818 if (!ca->mi.durability && in want_bucket()
865 struct bch_dev *ca = ob_dev(c, ob); in bucket_alloc_set_partial() local
868 bch2_dev_usage_read_fast(ca, &req->usage); in bucket_alloc_set_partial()
869 avail = dev_buckets_free(ca, req->usage, req->watermark) + ca->nr_partial_buckets; in bucket_alloc_set_partial()
978 struct bch_dev *ca, bool ec) in should_drop_bucket() argument
982 } else if (ca) { in should_drop_bucket()
983 bool drop = ob->dev == ca->dev_idx; in should_drop_bucket()
998 drop |= ob2->dev == ca->dev_idx; in should_drop_bucket()
1009 static void bch2_writepoint_stop(struct bch_fs *c, struct bch_dev *ca, in bch2_writepoint_stop() argument
1018 if (should_drop_bucket(ob, c, ca, ec)) in bch2_writepoint_stop()
1026 void bch2_open_buckets_stop(struct bch_fs *c, struct bch_dev *ca, in bch2_open_buckets_stop() argument
1033 bch2_writepoint_stop(c, ca, ec, &c->write_points[i]); in bch2_open_buckets_stop()
1035 bch2_writepoint_stop(c, ca, ec, &c->copygc_write_point); in bch2_open_buckets_stop()
1036 bch2_writepoint_stop(c, ca, ec, &c->rebalance_write_point); in bch2_open_buckets_stop()
1037 bch2_writepoint_stop(c, ca, ec, &c->btree_write_point); in bch2_open_buckets_stop()
1054 if (should_drop_bucket(ob, c, ca, ec)) { in bch2_open_buckets_stop()
1073 bch2_ec_stop_dev(c, ca); in bch2_open_buckets_stop()
1343 struct bch_dev *ca = ob_dev(c, ob); in bch2_alloc_sectors_start_trans() local
1344 u64 offset = bucket_to_sector(ca, ob->bucket) + in bch2_alloc_sectors_start_trans()
1345 ca->mi.bucket_size - in bch2_alloc_sectors_start_trans()
1453 struct bch_dev *ca = ob_dev(c, ob); in bch2_open_bucket_to_text() local
1463 ca->mi.bucket_size - ob->sectors_free, ca->mi.bucket_size); in bch2_open_bucket_to_text()
1472 struct bch_dev *ca) in bch2_open_buckets_to_text() argument
1482 if (ob->valid && (!ca || ob->dev == ca->dev_idx)) in bch2_open_buckets_to_text()
1593 void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca) in bch2_dev_alloc_debug_to_text() argument
1595 struct bch_fs *c = ca->fs; in bch2_dev_alloc_debug_to_text()
1596 struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca); in bch2_dev_alloc_debug_to_text()
1604 bch2_dev_usage_to_text(out, ca, &stats); in bch2_dev_alloc_debug_to_text()
1610 prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i)); in bch2_dev_alloc_debug_to_text()
1618 prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets); in bch2_dev_alloc_debug_to_text()
1620 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))); in bch2_dev_alloc_debug_to_text()
1640 for_each_online_member_rcu(c, ca) { in bch2_print_allocator_stuck()
1641 prt_printf(&buf, "Dev %u:\n", ca->dev_idx); in bch2_print_allocator_stuck()
1643 bch2_dev_alloc_debug_to_text(&buf, ca); in bch2_print_allocator_stuck()