Lines Matching refs:cpuc
2191 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_disable_all() local
2195 if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) in __intel_pmu_disable_all()
2208 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_enable_all() local
2209 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in __intel_pmu_enable_all()
2213 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { in __intel_pmu_enable_all()
2214 wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, cpuc->fixed_ctrl_val); in __intel_pmu_enable_all()
2215 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; in __intel_pmu_enable_all()
2219 intel_ctrl & ~cpuc->intel_ctrl_guest_mask); in __intel_pmu_enable_all()
2221 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { in __intel_pmu_enable_all()
2223 cpuc->events[INTEL_PMC_IDX_FIXED_BTS]; in __intel_pmu_enable_all()
2242 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in __intel_pmu_snapshot_branch_stack() local
2247 memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); in __intel_pmu_snapshot_branch_stack()
2295 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_nhm_workaround() local
2328 event = cpuc->events[i]; in intel_pmu_nhm_workaround()
2342 event = cpuc->events[i]; in intel_pmu_nhm_workaround()
2360 static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on) in intel_set_tfa() argument
2364 if (cpuc->tfa_shadow != val) { in intel_set_tfa()
2365 cpuc->tfa_shadow = val; in intel_set_tfa()
2370 static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) in intel_tfa_commit_scheduling() argument
2376 intel_set_tfa(cpuc, true); in intel_tfa_commit_scheduling()
2381 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_tfa_pmu_enable_all() local
2387 if (!test_bit(3, cpuc->active_mask)) in intel_tfa_pmu_enable_all()
2388 intel_set_tfa(cpuc, false); in intel_tfa_pmu_enable_all()
2414 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_set_masks() local
2417 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); in intel_set_masks()
2419 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); in intel_set_masks()
2421 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status); in intel_set_masks()
2426 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_clear_masks() local
2428 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask); in intel_clear_masks()
2429 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask); in intel_clear_masks()
2430 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status); in intel_clear_masks()
2435 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_disable_fixed() local
2441 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_disable_fixed() local
2447 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) in intel_pmu_disable_fixed()
2455 cpuc->fixed_ctrl_val &= ~mask; in intel_pmu_disable_fixed()
2604 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in update_saved_topdown_regs() local
2611 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { in update_saved_topdown_regs()
2614 other = cpuc->events[idx]; in update_saved_topdown_regs()
2629 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_update_topdown_event() local
2643 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) { in intel_update_topdown_event()
2646 other = cpuc->events[idx]; in intel_update_topdown_event()
2656 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) { in intel_update_topdown_event()
2704 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_read_topdown_event() local
2707 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) && in intel_pmu_read_topdown_event()
2728 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_enable_fixed() local
2734 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_enable_fixed() local
2739 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx)) in intel_pmu_enable_fixed()
2774 cpuc->fixed_ctrl_val &= ~mask; in intel_pmu_enable_fixed()
2775 cpuc->fixed_ctrl_val |= bits; in intel_pmu_enable_fixed()
2857 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_reset() local
2858 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); in intel_pmu_reset()
2859 int num_counters = hybrid(cpuc->pmu, num_counters); in intel_pmu_reset()
2875 if (fixed_counter_disabled(idx, cpuc->pmu)) in intel_pmu_reset()
2912 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_handle_guest_pebs() local
2913 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; in x86_pmu_handle_guest_pebs()
2926 event = cpuc->events[bit]; in x86_pmu_handle_guest_pebs()
2942 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in handle_pmi_common() local
2945 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in handle_pmi_common()
2978 status &= ~(cpuc->pebs_enabled & x86_pmu.pebs_capable); in handle_pmi_common()
2984 u64 pebs_enabled = cpuc->pebs_enabled; in handle_pmi_common()
2998 if (pebs_enabled != cpuc->pebs_enabled) in handle_pmi_common()
2999 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled); in handle_pmi_common()
3024 status |= cpuc->intel_cp_status; in handle_pmi_common()
3027 struct perf_event *event = cpuc->events[bit]; in handle_pmi_common()
3031 if (!test_bit(bit, cpuc->active_mask)) in handle_pmi_common()
3040 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); in handle_pmi_common()
3055 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_pmu_handle_irq() local
3056 bool late_ack = hybrid_bit(cpuc->pmu, late_ack); in intel_pmu_handle_irq()
3057 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack); in intel_pmu_handle_irq()
3067 pmu_enabled = cpuc->enabled; in intel_pmu_handle_irq()
3079 cpuc->enabled = 0; in intel_pmu_handle_irq()
3116 cpuc->enabled = pmu_enabled; in intel_pmu_handle_irq()
3156 static int intel_alt_er(struct cpu_hw_events *cpuc, in intel_alt_er() argument
3159 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); in intel_alt_er()
3201 __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, in __intel_shared_reg_get_constraints() argument
3215 if (reg->alloc && !cpuc->is_fake) in __intel_shared_reg_get_constraints()
3219 era = &cpuc->shared_regs->regs[idx]; in __intel_shared_reg_get_constraints()
3238 if (!cpuc->is_fake) { in __intel_shared_reg_get_constraints()
3264 idx = intel_alt_er(cpuc, idx, reg->config); in __intel_shared_reg_get_constraints()
3276 __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc, in __intel_shared_reg_put_constraints() argument
3289 if (!reg->alloc || cpuc->is_fake) in __intel_shared_reg_put_constraints()
3292 era = &cpuc->shared_regs->regs[reg->idx]; in __intel_shared_reg_put_constraints()
3302 intel_shared_regs_constraints(struct cpu_hw_events *cpuc, in intel_shared_regs_constraints() argument
3310 c = __intel_shared_reg_get_constraints(cpuc, event, xreg); in intel_shared_regs_constraints()
3316 d = __intel_shared_reg_get_constraints(cpuc, event, breg); in intel_shared_regs_constraints()
3318 __intel_shared_reg_put_constraints(cpuc, xreg); in intel_shared_regs_constraints()
3326 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in x86_get_event_constraints() argument
3329 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); in x86_get_event_constraints()
3341 return &hybrid_var(cpuc->pmu, unconstrained); in x86_get_event_constraints()
3345 __intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in __intel_get_event_constraints() argument
3358 c = intel_shared_regs_constraints(cpuc, event); in __intel_get_event_constraints()
3366 return x86_get_event_constraints(cpuc, idx, event); in __intel_get_event_constraints()
3370 intel_start_scheduling(struct cpu_hw_events *cpuc) in intel_start_scheduling() argument
3372 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; in intel_start_scheduling()
3374 int tid = cpuc->excl_thread_id; in intel_start_scheduling()
3379 if (cpuc->is_fake || !is_ht_workaround_enabled()) in intel_start_scheduling()
3399 static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr) in intel_commit_scheduling() argument
3401 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; in intel_commit_scheduling()
3402 struct event_constraint *c = cpuc->event_constraint[idx]; in intel_commit_scheduling()
3404 int tid = cpuc->excl_thread_id; in intel_commit_scheduling()
3406 if (cpuc->is_fake || !is_ht_workaround_enabled()) in intel_commit_scheduling()
3426 intel_stop_scheduling(struct cpu_hw_events *cpuc) in intel_stop_scheduling() argument
3428 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; in intel_stop_scheduling()
3430 int tid = cpuc->excl_thread_id; in intel_stop_scheduling()
3435 if (cpuc->is_fake || !is_ht_workaround_enabled()) in intel_stop_scheduling()
3453 dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx) in dyn_constraint() argument
3455 WARN_ON_ONCE(!cpuc->constraint_list); in dyn_constraint()
3463 cx = &cpuc->constraint_list[idx]; in dyn_constraint()
3482 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event, in intel_get_excl_constraints() argument
3485 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; in intel_get_excl_constraints()
3487 int tid = cpuc->excl_thread_id; in intel_get_excl_constraints()
3494 if (cpuc->is_fake || !is_ht_workaround_enabled()) in intel_get_excl_constraints()
3511 c = dyn_constraint(cpuc, c, idx); in intel_get_excl_constraints()
3532 if (!cpuc->n_excl++) in intel_get_excl_constraints()
3582 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in intel_get_event_constraints() argument
3587 c1 = cpuc->event_constraint[idx]; in intel_get_event_constraints()
3594 c2 = __intel_get_event_constraints(cpuc, idx, event); in intel_get_event_constraints()
3602 if (cpuc->excl_cntrs) in intel_get_event_constraints()
3603 return intel_get_excl_constraints(cpuc, event, idx, c2); in intel_get_event_constraints()
3608 static void intel_put_excl_constraints(struct cpu_hw_events *cpuc, in intel_put_excl_constraints() argument
3612 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs; in intel_put_excl_constraints()
3613 int tid = cpuc->excl_thread_id; in intel_put_excl_constraints()
3619 if (cpuc->is_fake) in intel_put_excl_constraints()
3627 if (!--cpuc->n_excl) in intel_put_excl_constraints()
3654 intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc, in intel_put_shared_regs_event_constraints() argument
3661 __intel_shared_reg_put_constraints(cpuc, reg); in intel_put_shared_regs_event_constraints()
3665 __intel_shared_reg_put_constraints(cpuc, reg); in intel_put_shared_regs_event_constraints()
3668 static void intel_put_event_constraints(struct cpu_hw_events *cpuc, in intel_put_event_constraints() argument
3671 intel_put_shared_regs_event_constraints(cpuc, event); in intel_put_event_constraints()
3678 if (cpuc->excl_cntrs) in intel_put_event_constraints()
3679 intel_put_excl_constraints(cpuc, event); in intel_put_event_constraints()
4030 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in intel_guest_get_msrs() local
4031 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; in intel_guest_get_msrs()
4033 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in intel_guest_get_msrs()
4034 u64 pebs_mask = cpuc->pebs_enabled & x86_pmu.pebs_capable; in intel_guest_get_msrs()
4041 .host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask, in intel_guest_get_msrs()
4042 .guest = intel_ctrl & (~cpuc->intel_ctrl_host_mask | ~pebs_mask), in intel_guest_get_msrs()
4059 .host = cpuc->pebs_enabled, in intel_guest_get_msrs()
4070 .host = (unsigned long)cpuc->ds, in intel_guest_get_msrs()
4077 .host = cpuc->pebs_data_cfg, in intel_guest_get_msrs()
4085 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, in intel_guest_get_msrs()
4086 .guest = pebs_mask & ~cpuc->intel_ctrl_host_mask, in intel_guest_get_msrs()
4105 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in core_guest_get_msrs() local
4106 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; in core_guest_get_msrs()
4110 struct perf_event *event = cpuc->events[idx]; in core_guest_get_msrs()
4115 if (!test_bit(idx, cpuc->active_mask)) in core_guest_get_msrs()
4139 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in core_pmu_enable_all() local
4143 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in core_pmu_enable_all()
4145 if (!test_bit(idx, cpuc->active_mask) || in core_pmu_enable_all()
4146 cpuc->events[idx]->attr.exclude_host) in core_pmu_enable_all()
4215 hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in hsw_get_event_constraints() argument
4220 c = intel_get_event_constraints(cpuc, idx, event); in hsw_get_event_constraints()
4233 icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in icl_get_event_constraints() argument
4244 return hsw_get_event_constraints(cpuc, idx, event); in icl_get_event_constraints()
4248 spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in spr_get_event_constraints() argument
4253 c = icl_get_event_constraints(cpuc, idx, event); in spr_get_event_constraints()
4273 glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in glp_get_event_constraints() argument
4282 c = intel_get_event_constraints(cpuc, idx, event); in glp_get_event_constraints()
4288 tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in tnt_get_event_constraints() argument
4293 c = intel_get_event_constraints(cpuc, idx, event); in tnt_get_event_constraints()
4313 tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in tfa_get_event_constraints() argument
4316 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event); in tfa_get_event_constraints()
4322 c = dyn_constraint(cpuc, c, idx); in tfa_get_event_constraints()
4331 adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in adl_get_event_constraints() argument
4337 return spr_get_event_constraints(cpuc, idx, event); in adl_get_event_constraints()
4339 return tnt_get_event_constraints(cpuc, idx, event); in adl_get_event_constraints()
4346 cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in cmt_get_event_constraints() argument
4351 c = intel_get_event_constraints(cpuc, idx, event); in cmt_get_event_constraints()
4379 rwc_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in rwc_get_event_constraints() argument
4384 c = spr_get_event_constraints(cpuc, idx, event); in rwc_get_event_constraints()
4403 mtl_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in mtl_get_event_constraints() argument
4409 return rwc_get_event_constraints(cpuc, idx, event); in mtl_get_event_constraints()
4411 return cmt_get_event_constraints(cpuc, idx, event); in mtl_get_event_constraints()
4531 int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) in intel_cpuc_prepare() argument
4533 cpuc->pebs_record_size = x86_pmu.pebs_record_size; in intel_cpuc_prepare()
4536 cpuc->shared_regs = allocate_shared_regs(cpu); in intel_cpuc_prepare()
4537 if (!cpuc->shared_regs) in intel_cpuc_prepare()
4544 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); in intel_cpuc_prepare()
4545 if (!cpuc->constraint_list) in intel_cpuc_prepare()
4550 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); in intel_cpuc_prepare()
4551 if (!cpuc->excl_cntrs) in intel_cpuc_prepare()
4554 cpuc->excl_thread_id = 0; in intel_cpuc_prepare()
4560 kfree(cpuc->constraint_list); in intel_cpuc_prepare()
4561 cpuc->constraint_list = NULL; in intel_cpuc_prepare()
4564 kfree(cpuc->shared_regs); in intel_cpuc_prepare()
4565 cpuc->shared_regs = NULL; in intel_cpuc_prepare()
4610 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in init_hybrid_pmu() local
4625 cpuc->pmu = NULL; in init_hybrid_pmu()
4651 cpuc->pmu = &pmu->pmu; in init_hybrid_pmu()
4658 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in intel_pmu_cpu_starting() local
4671 cpuc->lbr_sel = NULL; in intel_pmu_cpu_starting()
4674 WARN_ON_ONCE(cpuc->tfa_shadow); in intel_pmu_cpu_starting()
4675 cpuc->tfa_shadow = ~0ULL; in intel_pmu_cpu_starting()
4676 intel_set_tfa(cpuc, false); in intel_pmu_cpu_starting()
4701 if (!cpuc->shared_regs) in intel_pmu_cpu_starting()
4710 cpuc->kfree_on_online[0] = cpuc->shared_regs; in intel_pmu_cpu_starting()
4711 cpuc->shared_regs = pc; in intel_pmu_cpu_starting()
4715 cpuc->shared_regs->core_id = core_id; in intel_pmu_cpu_starting()
4716 cpuc->shared_regs->refcnt++; in intel_pmu_cpu_starting()
4720 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; in intel_pmu_cpu_starting()
4730 cpuc->kfree_on_online[1] = cpuc->excl_cntrs; in intel_pmu_cpu_starting()
4731 cpuc->excl_cntrs = c; in intel_pmu_cpu_starting()
4733 cpuc->excl_thread_id = 1; in intel_pmu_cpu_starting()
4737 cpuc->excl_cntrs->core_id = core_id; in intel_pmu_cpu_starting()
4738 cpuc->excl_cntrs->refcnt++; in intel_pmu_cpu_starting()
4742 static void free_excl_cntrs(struct cpu_hw_events *cpuc) in free_excl_cntrs() argument
4746 c = cpuc->excl_cntrs; in free_excl_cntrs()
4750 cpuc->excl_cntrs = NULL; in free_excl_cntrs()
4753 kfree(cpuc->constraint_list); in free_excl_cntrs()
4754 cpuc->constraint_list = NULL; in free_excl_cntrs()
4762 void intel_cpuc_finish(struct cpu_hw_events *cpuc) in intel_cpuc_finish() argument
4766 pc = cpuc->shared_regs; in intel_cpuc_finish()
4770 cpuc->shared_regs = NULL; in intel_cpuc_finish()
4773 free_excl_cntrs(cpuc); in intel_cpuc_finish()
4778 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in intel_pmu_cpu_dead() local
4780 intel_cpuc_finish(cpuc); in intel_pmu_cpu_dead()
4782 if (is_hybrid() && cpuc->pmu) in intel_pmu_cpu_dead()
4783 cpumask_clear_cpu(cpu, &hybrid_pmu(cpuc->pmu)->supported_cpus); in intel_pmu_cpu_dead()
5376 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in update_tfa_sched() local
5382 if (test_bit(3, cpuc->active_mask)) in update_tfa_sched()