Lines Matching refs:req

181 				    struct alloc_request *req,  in may_alloc_bucket()  argument
185 req->counters.skipped_open++; in may_alloc_bucket()
194 req->counters.need_journal_commit++; in may_alloc_bucket()
195 req->counters.skipped_need_journal_commit++; in may_alloc_bucket()
200 req->counters.skipped_nocow++; in may_alloc_bucket()
208 struct alloc_request *req, in __try_alloc_bucket() argument
212 struct bch_dev *ca = req->ca; in __try_alloc_bucket()
218 req->counters.skipped_nouse++; in __try_alloc_bucket()
224 if (unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(req->watermark))) { in __try_alloc_bucket()
236 req->counters.skipped_open++; in __try_alloc_bucket()
261 struct alloc_request *req, in try_alloc_bucket() argument
268 if (!may_alloc_bucket(c, req, POS(req->ca->dev_idx, b))) in try_alloc_bucket()
278 return __try_alloc_bucket(c, req, b, gen, cl); in try_alloc_bucket()
286 struct alloc_request *req, in bch2_bucket_alloc_early() argument
290 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_early()
295 u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; in bch2_bucket_alloc_early()
317 if (req->btree_bitmap != BTREE_BITMAP_ANY && in bch2_bucket_alloc_early()
318 req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_early()
320 if (req->btree_bitmap == BTREE_BITMAP_YES && in bch2_bucket_alloc_early()
328 req->counters.buckets_seen++; in bch2_bucket_alloc_early()
329 req->counters.skipped_mi_btree_bitmap++; in bch2_bucket_alloc_early()
348 req->counters.buckets_seen++; in bch2_bucket_alloc_early()
350 ob = may_alloc_bucket(c, req, k.k->p) in bch2_bucket_alloc_early()
351 ? __try_alloc_bucket(c, req, k.k->p.offset, a->gen, cl) in bch2_bucket_alloc_early()
377 struct alloc_request *req, in bch2_bucket_alloc_freelist() argument
380 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_freelist()
384 u64 *dev_alloc_cursor = &ca->alloc_cursor[req->btree_bitmap]; in bch2_bucket_alloc_freelist()
400 req->counters.buckets_seen++; in bch2_bucket_alloc_freelist()
403 if (req->btree_bitmap != BTREE_BITMAP_ANY && in bch2_bucket_alloc_freelist()
404 req->btree_bitmap != bch2_dev_btree_bitmap_marked_sectors(ca, in bch2_bucket_alloc_freelist()
406 if (req->btree_bitmap == BTREE_BITMAP_YES && in bch2_bucket_alloc_freelist()
416 req->counters.skipped_mi_btree_bitmap++; in bch2_bucket_alloc_freelist()
420 ob = try_alloc_bucket(trans, req, &iter, cl); in bch2_bucket_alloc_freelist()
452 struct alloc_request *req, in trace_bucket_alloc2() argument
460 prt_printf(&buf, "dev\t%s (%u)\n", req->ca->name, req->ca->dev_idx); in trace_bucket_alloc2()
461 prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[req->watermark]); in trace_bucket_alloc2()
462 prt_printf(&buf, "data type\t%s\n", __bch2_data_types[req->data_type]); in trace_bucket_alloc2()
464 prt_printf(&buf, "free\t%llu\n", req->usage.buckets[BCH_DATA_free]); in trace_bucket_alloc2()
465 prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(req->ca, req->usage, req->watermark)); in trace_bucket_alloc2()
469 prt_printf(&buf, "seen\t%llu\n", req->counters.buckets_seen); in trace_bucket_alloc2()
470 prt_printf(&buf, "open\t%llu\n", req->counters.skipped_open); in trace_bucket_alloc2()
471 prt_printf(&buf, "need journal commit\t%llu\n", req->counters.skipped_need_journal_commit); in trace_bucket_alloc2()
472 prt_printf(&buf, "nocow\t%llu\n", req->counters.skipped_nocow); in trace_bucket_alloc2()
473 prt_printf(&buf, "nouse\t%llu\n", req->counters.skipped_nouse); in trace_bucket_alloc2()
474 prt_printf(&buf, "mi_btree_bitmap\t%llu\n", req->counters.skipped_mi_btree_bitmap); in trace_bucket_alloc2()
497 struct alloc_request *req, in bch2_bucket_alloc_trans() argument
502 struct bch_dev *ca = req->ca; in bch2_bucket_alloc_trans()
508 req->btree_bitmap = req->data_type == BCH_DATA_btree; in bch2_bucket_alloc_trans()
509 memset(&req->counters, 0, sizeof(req->counters)); in bch2_bucket_alloc_trans()
511 bch2_dev_usage_read_fast(ca, &req->usage); in bch2_bucket_alloc_trans()
512 avail = dev_buckets_free(ca, req->usage, req->watermark); in bch2_bucket_alloc_trans()
514 if (req->usage.buckets[BCH_DATA_need_discard] > in bch2_bucket_alloc_trans()
518 if (req->usage.buckets[BCH_DATA_need_gc_gens] > avail) in bch2_bucket_alloc_trans()
521 if (should_invalidate_buckets(ca, req->usage)) in bch2_bucket_alloc_trans()
525 if (req->watermark > BCH_WATERMARK_normal && in bch2_bucket_alloc_trans()
545 ? bch2_bucket_alloc_freelist(trans, req, cl) in bch2_bucket_alloc_trans()
546 : bch2_bucket_alloc_early(trans, req, cl); in bch2_bucket_alloc_trans()
548 if (req->counters.need_journal_commit * 2 > avail) in bch2_bucket_alloc_trans()
551 if (!ob && req->btree_bitmap != BTREE_BITMAP_ANY) { in bch2_bucket_alloc_trans()
552 req->btree_bitmap = BTREE_BITMAP_ANY; in bch2_bucket_alloc_trans()
565 ob->data_type = req->data_type; in bch2_bucket_alloc_trans()
575 trace_bucket_alloc2(c, req, cl, ob); in bch2_bucket_alloc_trans()
586 struct alloc_request req = { in bch2_bucket_alloc() local
593 PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, &req, cl, false))); in bch2_bucket_alloc()
687 struct alloc_request *req, in add_new_bucket() argument
692 BUG_ON(req->nr_effective >= req->nr_replicas); in add_new_bucket()
694 __clear_bit(ob->dev, req->devs_may_alloc.d); in add_new_bucket()
695 req->nr_effective += durability; in add_new_bucket()
696 req->have_cache |= !durability; in add_new_bucket()
698 ob_push(c, &req->ptrs, ob); in add_new_bucket()
700 if (req->nr_effective >= req->nr_replicas) in add_new_bucket()
708 struct alloc_request *req, in bch2_bucket_alloc_set_trans() argument
715 BUG_ON(req->nr_effective >= req->nr_replicas); in bch2_bucket_alloc_set_trans()
717 bch2_dev_alloc_list(c, stripe, &req->devs_may_alloc, &req->devs_sorted); in bch2_bucket_alloc_set_trans()
719 darray_for_each(req->devs_sorted, i) { in bch2_bucket_alloc_set_trans()
720 req->ca = bch2_dev_tryget_noerror(c, *i); in bch2_bucket_alloc_set_trans()
721 if (!req->ca) in bch2_bucket_alloc_set_trans()
724 if (!req->ca->mi.durability && req->have_cache) { in bch2_bucket_alloc_set_trans()
725 bch2_dev_put(req->ca); in bch2_bucket_alloc_set_trans()
729 struct open_bucket *ob = bch2_bucket_alloc_trans(trans, req, cl, in bch2_bucket_alloc_set_trans()
730 req->flags & BCH_WRITE_alloc_nowait); in bch2_bucket_alloc_set_trans()
732 bch2_dev_stripe_increment_inlined(req->ca, stripe, &req->usage); in bch2_bucket_alloc_set_trans()
733 bch2_dev_put(req->ca); in bch2_bucket_alloc_set_trans()
742 ret = add_new_bucket(c, req, ob); in bch2_bucket_alloc_set_trans()
763 struct alloc_request *req, in bucket_alloc_from_stripe() argument
769 if (req->nr_replicas < 2) in bucket_alloc_from_stripe()
772 if (ec_open_bucket(c, &req->ptrs)) in bucket_alloc_from_stripe()
776 bch2_ec_stripe_head_get(trans, req, 0, cl); in bucket_alloc_from_stripe()
782 bch2_dev_alloc_list(c, &req->wp->stripe, &req->devs_may_alloc, &req->devs_sorted); in bucket_alloc_from_stripe()
784 darray_for_each(req->devs_sorted, i) in bucket_alloc_from_stripe()
795 ret = add_new_bucket(c, req, ob); in bucket_alloc_from_stripe()
807 struct alloc_request *req, in want_bucket() argument
812 if (!test_bit(ob->dev, req->devs_may_alloc.d)) in want_bucket()
815 if (ob->data_type != req->wp->data_type) in want_bucket()
819 (req->wp->data_type == BCH_DATA_btree || req->ec || req->have_cache)) in want_bucket()
822 if (req->ec != (ob->ec != NULL)) in want_bucket()
829 struct alloc_request *req) in bucket_alloc_set_writepoint() argument
835 req->scratch_ptrs.nr = 0; in bucket_alloc_set_writepoint()
837 open_bucket_for_each(c, &req->wp->ptrs, ob, i) { in bucket_alloc_set_writepoint()
838 if (!ret && want_bucket(c, req, ob)) in bucket_alloc_set_writepoint()
839 ret = add_new_bucket(c, req, ob); in bucket_alloc_set_writepoint()
841 ob_push(c, &req->scratch_ptrs, ob); in bucket_alloc_set_writepoint()
843 req->wp->ptrs = req->scratch_ptrs; in bucket_alloc_set_writepoint()
849 struct alloc_request *req) in bucket_alloc_set_partial() argument
864 if (want_bucket(c, req, ob)) { in bucket_alloc_set_partial()
868 bch2_dev_usage_read_fast(ca, &req->usage); in bucket_alloc_set_partial()
869 avail = dev_buckets_free(ca, req->usage, req->watermark) + ca->nr_partial_buckets; in bucket_alloc_set_partial()
881 ret = add_new_bucket(c, req, ob); in bucket_alloc_set_partial()
892 struct alloc_request *req, in __open_bucket_add_buckets() argument
901 req->devs_may_alloc = target_rw_devs(c, req->wp->data_type, req->target); in __open_bucket_add_buckets()
904 darray_for_each(*req->devs_have, i) in __open_bucket_add_buckets()
905 __clear_bit(*i, req->devs_may_alloc.d); in __open_bucket_add_buckets()
907 open_bucket_for_each(c, &req->ptrs, ob, i) in __open_bucket_add_buckets()
908 __clear_bit(ob->dev, req->devs_may_alloc.d); in __open_bucket_add_buckets()
910 ret = bucket_alloc_set_writepoint(c, req); in __open_bucket_add_buckets()
914 ret = bucket_alloc_set_partial(c, req); in __open_bucket_add_buckets()
918 if (req->ec) { in __open_bucket_add_buckets()
919 ret = bucket_alloc_from_stripe(trans, req, _cl); in __open_bucket_add_buckets()
926 ret = bch2_bucket_alloc_set_trans(trans, req, &req->wp->stripe, cl); in __open_bucket_add_buckets()
940 struct alloc_request *req, in open_bucket_add_buckets() argument
945 if (req->ec && !ec_open_bucket(trans->c, &req->ptrs)) { in open_bucket_add_buckets()
946 ret = __open_bucket_add_buckets(trans, req, cl); in open_bucket_add_buckets()
952 if (req->nr_effective >= req->nr_replicas) in open_bucket_add_buckets()
957 swap(ec, req->ec); in open_bucket_add_buckets()
958 ret = __open_bucket_add_buckets(trans, req, cl); in open_bucket_add_buckets()
959 swap(ec, req->ec); in open_bucket_add_buckets()
1209 struct alloc_request *req) in deallocate_extra_replicas() argument
1212 unsigned extra_replicas = req->nr_effective - req->nr_replicas; in deallocate_extra_replicas()
1215 req->scratch_ptrs.nr = 0; in deallocate_extra_replicas()
1217 open_bucket_for_each(c, &req->ptrs, ob, i) { in deallocate_extra_replicas()
1222 ob_push(c, &req->wp->ptrs, ob); in deallocate_extra_replicas()
1224 ob_push(c, &req->scratch_ptrs, ob); in deallocate_extra_replicas()
1228 req->ptrs = req->scratch_ptrs; in deallocate_extra_replicas()
1251 struct alloc_request *req = bch2_trans_kmalloc_nomemzero(trans, sizeof(*req)); in bch2_alloc_sectors_start_trans() local
1252 int ret = PTR_ERR_OR_ZERO(req); in bch2_alloc_sectors_start_trans()
1259 req->nr_replicas = nr_replicas; in bch2_alloc_sectors_start_trans()
1260 req->target = target; in bch2_alloc_sectors_start_trans()
1261 req->ec = erasure_code; in bch2_alloc_sectors_start_trans()
1262 req->watermark = watermark; in bch2_alloc_sectors_start_trans()
1263 req->flags = flags; in bch2_alloc_sectors_start_trans()
1264 req->devs_have = devs_have; in bch2_alloc_sectors_start_trans()
1268 req->ptrs.nr = 0; in bch2_alloc_sectors_start_trans()
1269 req->nr_effective = 0; in bch2_alloc_sectors_start_trans()
1270 req->have_cache = false; in bch2_alloc_sectors_start_trans()
1273 *wp_ret = req->wp = writepoint_find(trans, write_point.v); in bch2_alloc_sectors_start_trans()
1275 req->data_type = req->wp->data_type; in bch2_alloc_sectors_start_trans()
1282 if (req->data_type != BCH_DATA_user) in bch2_alloc_sectors_start_trans()
1283 req->have_cache = true; in bch2_alloc_sectors_start_trans()
1286 ret = open_bucket_add_buckets(trans, req, NULL); in bch2_alloc_sectors_start_trans()
1293 int ret2 = open_bucket_add_buckets(trans, req, cl); in bch2_alloc_sectors_start_trans()
1306 req->have_cache = true; in bch2_alloc_sectors_start_trans()
1307 req->target = 0; in bch2_alloc_sectors_start_trans()
1309 ret = open_bucket_add_buckets(trans, req, cl); in bch2_alloc_sectors_start_trans()
1311 ret = open_bucket_add_buckets(trans, req, cl); in bch2_alloc_sectors_start_trans()
1314 BUG_ON(!ret && req->nr_effective < req->nr_replicas); in bch2_alloc_sectors_start_trans()
1316 if (erasure_code && !ec_open_bucket(c, &req->ptrs)) in bch2_alloc_sectors_start_trans()
1320 req->nr_effective >= nr_replicas_required) in bch2_alloc_sectors_start_trans()
1326 if (req->nr_effective > req->nr_replicas) in bch2_alloc_sectors_start_trans()
1327 deallocate_extra_replicas(c, req); in bch2_alloc_sectors_start_trans()
1330 open_bucket_for_each(c, &req->wp->ptrs, ob, i) in bch2_alloc_sectors_start_trans()
1333 req->wp->ptrs = req->ptrs; in bch2_alloc_sectors_start_trans()
1335 req->wp->sectors_free = UINT_MAX; in bch2_alloc_sectors_start_trans()
1337 open_bucket_for_each(c, &req->wp->ptrs, ob, i) { in bch2_alloc_sectors_start_trans()
1351 req->wp->sectors_free = min(req->wp->sectors_free, ob->sectors_free); in bch2_alloc_sectors_start_trans()
1354 req->wp->sectors_free = rounddown(req->wp->sectors_free, block_sectors(c)); in bch2_alloc_sectors_start_trans()
1357 if (unlikely(!req->wp->sectors_free)) { in bch2_alloc_sectors_start_trans()
1358 bch2_alloc_sectors_done(c, req->wp); in bch2_alloc_sectors_start_trans()
1362 BUG_ON(!req->wp->sectors_free || req->wp->sectors_free == UINT_MAX); in bch2_alloc_sectors_start_trans()
1366 open_bucket_for_each(c, &req->wp->ptrs, ob, i) in bch2_alloc_sectors_start_trans()
1367 if (req->ptrs.nr < ARRAY_SIZE(req->ptrs.v)) in bch2_alloc_sectors_start_trans()
1368 ob_push(c, &req->ptrs, ob); in bch2_alloc_sectors_start_trans()
1371 req->wp->ptrs = req->ptrs; in bch2_alloc_sectors_start_trans()
1373 mutex_unlock(&req->wp->lock); in bch2_alloc_sectors_start_trans()