Lines Matching refs:cpuc

681 	struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);  in x86_pmu_disable_all()  local
685 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_disable_all()
688 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_disable_all()
721 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_disable() local
726 if (!cpuc->enabled) in x86_pmu_disable()
729 cpuc->n_added = 0; in x86_pmu_disable()
730 cpuc->enabled = 0; in x86_pmu_disable()
738 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable_all() local
742 struct hw_perf_event *hwc = &cpuc->events[idx]->hw; in x86_pmu_enable_all()
744 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_enable_all()
768 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_get_pmu() local
774 if (WARN_ON_ONCE(!cpuc->pmu)) in x86_get_pmu()
777 return cpuc->pmu; in x86_get_pmu()
978 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign) in x86_schedule_events() argument
980 int num_counters = hybrid(cpuc->pmu, num_counters); in x86_schedule_events()
994 n0 = cpuc->n_events; in x86_schedule_events()
995 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_schedule_events()
996 n0 -= cpuc->n_txn; in x86_schedule_events()
998 static_call_cond(x86_pmu_start_scheduling)(cpuc); in x86_schedule_events()
1001 c = cpuc->event_constraint[i]; in x86_schedule_events()
1015 c = static_call(x86_pmu_get_event_constraints)(cpuc, i, cpuc->event_list[i]); in x86_schedule_events()
1016 cpuc->event_constraint[i] = c; in x86_schedule_events()
1029 hwc = &cpuc->event_list[i]->hw; in x86_schedule_events()
1030 c = cpuc->event_constraint[i]; in x86_schedule_events()
1068 if (is_ht_workaround_enabled() && !cpuc->is_fake && in x86_schedule_events()
1069 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
1077 gpmax = num_counters - cpuc->n_pair; in x86_schedule_events()
1081 unsched = perf_assign_events(cpuc->event_constraint, n, wmin, in x86_schedule_events()
1097 static_call_cond(x86_pmu_commit_scheduling)(cpuc, i, assign[i]); in x86_schedule_events()
1100 e = cpuc->event_list[i]; in x86_schedule_events()
1105 static_call_cond(x86_pmu_put_event_constraints)(cpuc, e); in x86_schedule_events()
1107 cpuc->event_constraint[i] = NULL; in x86_schedule_events()
1111 static_call_cond(x86_pmu_stop_scheduling)(cpuc); in x86_schedule_events()
1116 static int add_nr_metric_event(struct cpu_hw_events *cpuc, in add_nr_metric_event() argument
1120 if (cpuc->n_metric == INTEL_TD_METRIC_NUM) in add_nr_metric_event()
1122 cpuc->n_metric++; in add_nr_metric_event()
1123 cpuc->n_txn_metric++; in add_nr_metric_event()
1129 static void del_nr_metric_event(struct cpu_hw_events *cpuc, in del_nr_metric_event() argument
1133 cpuc->n_metric--; in del_nr_metric_event()
1136 static int collect_event(struct cpu_hw_events *cpuc, struct perf_event *event, in collect_event() argument
1139 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in collect_event()
1141 if (intel_cap.perf_metrics && add_nr_metric_event(cpuc, event)) in collect_event()
1144 if (n >= max_count + cpuc->n_metric) in collect_event()
1147 cpuc->event_list[n] = event; in collect_event()
1149 cpuc->n_pair++; in collect_event()
1150 cpuc->n_txn_pair++; in collect_event()
1160 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp) in collect_events() argument
1162 int num_counters = hybrid(cpuc->pmu, num_counters); in collect_events()
1163 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); in collect_events()
1170 n = cpuc->n_events; in collect_events()
1171 if (!cpuc->n_events) in collect_events()
1172 cpuc->pebs_output = 0; in collect_events()
1174 if (!cpuc->is_fake && leader->attr.precise_ip) { in collect_events()
1186 if (cpuc->pebs_output && in collect_events()
1187 cpuc->pebs_output != is_pebs_pt(leader) + 1) in collect_events()
1190 cpuc->pebs_output = is_pebs_pt(leader) + 1; in collect_events()
1194 if (collect_event(cpuc, leader, max_count, n)) in collect_events()
1206 if (collect_event(cpuc, event, max_count, n)) in collect_events()
1215 struct cpu_hw_events *cpuc, int i) in x86_assign_hw_event() argument
1220 idx = hwc->idx = cpuc->assign[i]; in x86_assign_hw_event()
1222 hwc->last_tag = ++cpuc->tags[i]; in x86_assign_hw_event()
1275 struct cpu_hw_events *cpuc, in match_prev_assignment() argument
1278 return hwc->idx == cpuc->assign[i] && in match_prev_assignment()
1280 hwc->last_tag == cpuc->tags[i]; in match_prev_assignment()
1287 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_enable() local
1290 int i, added = cpuc->n_added; in x86_pmu_enable()
1295 if (cpuc->enabled) in x86_pmu_enable()
1298 if (cpuc->n_added) { in x86_pmu_enable()
1299 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable()
1307 event = cpuc->event_list[i]; in x86_pmu_enable()
1317 match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1333 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_enable()
1334 event = cpuc->event_list[i]; in x86_pmu_enable()
1337 if (!match_prev_assignment(hwc, cpuc, i)) in x86_pmu_enable()
1338 x86_assign_hw_event(event, cpuc, i); in x86_pmu_enable()
1351 cpuc->n_added = 0; in x86_pmu_enable()
1355 cpuc->enabled = 1; in x86_pmu_enable()
1441 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_add() local
1448 n0 = cpuc->n_events; in x86_pmu_add()
1449 ret = n = collect_events(cpuc, event, false); in x86_pmu_add()
1465 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_add()
1468 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); in x86_pmu_add()
1475 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_add()
1482 cpuc->n_events = n; in x86_pmu_add()
1483 cpuc->n_added += n - n0; in x86_pmu_add()
1484 cpuc->n_txn += n - n0; in x86_pmu_add()
1499 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_start() local
1515 cpuc->events[idx] = event; in x86_pmu_start()
1516 __set_bit(idx, cpuc->active_mask); in x86_pmu_start()
1526 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in perf_event_print_debug() local
1527 int num_counters = hybrid(cpuc->pmu, num_counters); in perf_event_print_debug()
1528 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed); in perf_event_print_debug()
1529 struct event_constraint *pebs_constraints = hybrid(cpuc->pmu, pebs_constraints); in perf_event_print_debug()
1558 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); in perf_event_print_debug()
1574 if (fixed_counter_disabled(idx, cpuc->pmu)) in perf_event_print_debug()
1586 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_stop() local
1589 if (test_bit(hwc->idx, cpuc->active_mask)) { in x86_pmu_stop()
1591 __clear_bit(hwc->idx, cpuc->active_mask); in x86_pmu_stop()
1592 cpuc->events[hwc->idx] = NULL; in x86_pmu_stop()
1609 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_del() local
1610 union perf_capabilities intel_cap = hybrid(cpuc->pmu, intel_cap); in x86_pmu_del()
1621 if (cpuc->txn_flags & PERF_PMU_TXN_ADD) in x86_pmu_del()
1624 __set_bit(event->hw.idx, cpuc->dirty); in x86_pmu_del()
1631 for (i = 0; i < cpuc->n_events; i++) { in x86_pmu_del()
1632 if (event == cpuc->event_list[i]) in x86_pmu_del()
1636 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */ in x86_pmu_del()
1640 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del()
1641 --cpuc->n_added; in x86_pmu_del()
1643 static_call_cond(x86_pmu_put_event_constraints)(cpuc, event); in x86_pmu_del()
1646 while (++i < cpuc->n_events) { in x86_pmu_del()
1647 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del()
1648 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del()
1650 cpuc->event_constraint[i-1] = NULL; in x86_pmu_del()
1651 --cpuc->n_events; in x86_pmu_del()
1653 del_nr_metric_event(cpuc, event); in x86_pmu_del()
1669 struct cpu_hw_events *cpuc; in x86_pmu_handle_irq() local
1674 cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_handle_irq()
1687 if (!test_bit(idx, cpuc->active_mask)) in x86_pmu_handle_irq()
1690 event = cpuc->events[idx]; in x86_pmu_handle_irq()
1707 data.br_stack = &cpuc->lbr_stack; in x86_pmu_handle_irq()
1761 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_prepare_cpu() local
1765 cpuc->kfree_on_online[i] = NULL; in x86_pmu_prepare_cpu()
1780 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in x86_pmu_online_cpu() local
1784 kfree(cpuc->kfree_on_online[i]); in x86_pmu_online_cpu()
1785 cpuc->kfree_on_online[i] = NULL; in x86_pmu_online_cpu()
2223 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_start_txn() local
2225 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */ in x86_pmu_start_txn()
2227 cpuc->txn_flags = txn_flags; in x86_pmu_start_txn()
2245 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_cancel_txn() local
2247 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_cancel_txn()
2249 txn_flags = cpuc->txn_flags; in x86_pmu_cancel_txn()
2250 cpuc->txn_flags = 0; in x86_pmu_cancel_txn()
2274 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in x86_pmu_commit_txn() local
2278 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */ in x86_pmu_commit_txn()
2280 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) { in x86_pmu_commit_txn()
2281 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2285 n = cpuc->n_events; in x86_pmu_commit_txn()
2290 ret = static_call(x86_pmu_schedule_events)(cpuc, n, assign); in x86_pmu_commit_txn()
2298 memcpy(cpuc->assign, assign, n*sizeof(int)); in x86_pmu_commit_txn()
2300 cpuc->txn_flags = 0; in x86_pmu_commit_txn()
2312 static void free_fake_cpuc(struct cpu_hw_events *cpuc) in free_fake_cpuc() argument
2314 intel_cpuc_finish(cpuc); in free_fake_cpuc()
2315 kfree(cpuc); in free_fake_cpuc()
2320 struct cpu_hw_events *cpuc; in allocate_fake_cpuc() local
2323 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); in allocate_fake_cpuc()
2324 if (!cpuc) in allocate_fake_cpuc()
2326 cpuc->is_fake = 1; in allocate_fake_cpuc()
2337 cpuc->pmu = event_pmu; in allocate_fake_cpuc()
2339 if (intel_cpuc_prepare(cpuc, cpu)) in allocate_fake_cpuc()
2342 return cpuc; in allocate_fake_cpuc()
2344 free_fake_cpuc(cpuc); in allocate_fake_cpuc()
2475 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in perf_clear_dirty_counters() local
2479 for (i = 0; i < cpuc->n_events; i++) in perf_clear_dirty_counters()
2480 __clear_bit(cpuc->assign[i], cpuc->dirty); in perf_clear_dirty_counters()
2482 if (bitmap_empty(cpuc->dirty, X86_PMC_IDX_MAX)) in perf_clear_dirty_counters()
2485 for_each_set_bit(i, cpuc->dirty, X86_PMC_IDX_MAX) { in perf_clear_dirty_counters()
2488 if ((i - INTEL_PMC_IDX_FIXED) >= hybrid(cpuc->pmu, num_counters_fixed)) in perf_clear_dirty_counters()
2497 bitmap_zero(cpuc->dirty, X86_PMC_IDX_MAX); in perf_clear_dirty_counters()