Lines Matching refs:d

133 void __check_limbo(struct rdt_mon_domain *d, bool force_free)  in __check_limbo()  argument
157 idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); in __check_limbo()
162 if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, in __check_limbo()
177 trace_mon_llc_occupancy_limbo(entry->closid, entry->rmid, d->hdr.id, val); in __check_limbo()
181 clear_bit(idx, d->rmid_busy_llc); in __check_limbo()
191 bool has_busy_rmid(struct rdt_mon_domain *d) in has_busy_rmid() argument
195 return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; in has_busy_rmid()
292 struct rdt_mon_domain *d; in add_rmid_to_limbo() local
303 list_for_each_entry(d, &r->mon_domains, hdr.list) { in add_rmid_to_limbo()
308 if (!has_busy_rmid(d)) in add_rmid_to_limbo()
309 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, in add_rmid_to_limbo()
311 set_bit(idx, d->rmid_busy_llc); in add_rmid_to_limbo()
345 static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid, in get_mbm_state() argument
352 return &d->mbm_total[idx]; in get_mbm_state()
354 return &d->mbm_local[idx]; in get_mbm_state()
363 struct rdt_mon_domain *d; in __mon_event_count() local
370 resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); in __mon_event_count()
371 m = get_mbm_state(rr->d, closid, rmid, rr->evtid); in __mon_event_count()
377 if (rr->d) { in __mon_event_count()
379 if (!cpumask_test_cpu(cpu, &rr->d->hdr.cpu_mask)) in __mon_event_count()
381 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, in __mon_event_count()
404 list_for_each_entry(d, &rr->r->mon_domains, hdr.list) { in __mon_event_count()
405 if (d->ci_id != rr->ci_id) in __mon_event_count()
407 err = resctrl_arch_rmid_read(rr->r, d, closid, rmid, in __mon_event_count()
438 m = get_mbm_state(rr->d, closid, rmid, rr->evtid); in mbm_bw_count()
493 struct rdt_ctrl_domain *d; in get_ctrl_domain_from_cpu() local
497 list_for_each_entry(d, &r->ctrl_domains, hdr.list) { in get_ctrl_domain_from_cpu()
499 if (cpumask_test_cpu(cpu, &d->hdr.cpu_mask)) in get_ctrl_domain_from_cpu()
500 return d; in get_ctrl_domain_from_cpu()
606 static void mbm_update_one_event(struct rdt_resource *r, struct rdt_mon_domain *d, in mbm_update_one_event() argument
612 rr.d = d; in mbm_update_one_event()
633 static void mbm_update(struct rdt_resource *r, struct rdt_mon_domain *d, in mbm_update() argument
641 mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_TOTAL_EVENT_ID); in mbm_update()
644 mbm_update_one_event(r, d, closid, rmid, QOS_L3_MBM_LOCAL_EVENT_ID); in mbm_update()
654 struct rdt_mon_domain *d; in cqm_handle_limbo() local
659 d = container_of(work, struct rdt_mon_domain, cqm_limbo.work); in cqm_handle_limbo()
661 __check_limbo(d, false); in cqm_handle_limbo()
663 if (has_busy_rmid(d)) { in cqm_handle_limbo()
664 d->cqm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask, in cqm_handle_limbo()
666 schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, in cqm_handle_limbo()
699 struct rdt_mon_domain *d; in mbm_handle_overflow() local
714 d = container_of(work, struct rdt_mon_domain, mbm_over.work); in mbm_handle_overflow()
717 mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); in mbm_handle_overflow()
721 mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); in mbm_handle_overflow()
724 update_mba_bw(prgrp, d); in mbm_handle_overflow()
731 d->mbm_work_cpu = cpumask_any_housekeeping(&d->hdr.cpu_mask, in mbm_handle_overflow()
733 schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); in mbm_handle_overflow()