Lines Matching refs:x86_pmu

47 struct x86_pmu x86_pmu __read_mostly;
63 DEFINE_STATIC_CALL_NULL(x86_pmu_handle_irq, *x86_pmu.handle_irq);
64 DEFINE_STATIC_CALL_NULL(x86_pmu_disable_all, *x86_pmu.disable_all);
65 DEFINE_STATIC_CALL_NULL(x86_pmu_enable_all, *x86_pmu.enable_all);
66 DEFINE_STATIC_CALL_NULL(x86_pmu_enable, *x86_pmu.enable);
67 DEFINE_STATIC_CALL_NULL(x86_pmu_disable, *x86_pmu.disable);
69 DEFINE_STATIC_CALL_NULL(x86_pmu_assign, *x86_pmu.assign);
71 DEFINE_STATIC_CALL_NULL(x86_pmu_add, *x86_pmu.add);
72 DEFINE_STATIC_CALL_NULL(x86_pmu_del, *x86_pmu.del);
73 DEFINE_STATIC_CALL_NULL(x86_pmu_read, *x86_pmu.read);
75 DEFINE_STATIC_CALL_NULL(x86_pmu_set_period, *x86_pmu.set_period);
76 DEFINE_STATIC_CALL_NULL(x86_pmu_update, *x86_pmu.update);
77 DEFINE_STATIC_CALL_NULL(x86_pmu_limit_period, *x86_pmu.limit_period);
79 DEFINE_STATIC_CALL_NULL(x86_pmu_schedule_events, *x86_pmu.schedule_events);
80 DEFINE_STATIC_CALL_NULL(x86_pmu_get_event_constraints, *x86_pmu.get_event_constraints);
81 DEFINE_STATIC_CALL_NULL(x86_pmu_put_event_constraints, *x86_pmu.put_event_constraints);
83 DEFINE_STATIC_CALL_NULL(x86_pmu_start_scheduling, *x86_pmu.start_scheduling);
84 DEFINE_STATIC_CALL_NULL(x86_pmu_commit_scheduling, *x86_pmu.commit_scheduling);
85 DEFINE_STATIC_CALL_NULL(x86_pmu_stop_scheduling, *x86_pmu.stop_scheduling);
87 DEFINE_STATIC_CALL_NULL(x86_pmu_sched_task, *x86_pmu.sched_task);
88 DEFINE_STATIC_CALL_NULL(x86_pmu_swap_task_ctx, *x86_pmu.swap_task_ctx);
90 DEFINE_STATIC_CALL_NULL(x86_pmu_drain_pebs, *x86_pmu.drain_pebs);
91 DEFINE_STATIC_CALL_NULL(x86_pmu_pebs_aliases, *x86_pmu.pebs_aliases);
93 DEFINE_STATIC_CALL_NULL(x86_pmu_filter, *x86_pmu.filter);
99 DEFINE_STATIC_CALL_RET0(x86_pmu_guest_get_msrs, *x86_pmu.guest_get_msrs);
118 int shift = 64 - x86_pmu.cntval_bits; in x86_perf_event_update()
196 int i, num_counters = x86_pmu.num_counters; in get_possible_num_counters()
201 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) in get_possible_num_counters()
202 num_counters = max_t(int, num_counters, x86_pmu.hybrid_pmu[i].num_counters); in get_possible_num_counters()
358 return x86_pmu.handle_irq != NULL; in x86_pmu_initialized()
441 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_add_exclusive()
444 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { in x86_add_exclusive()
446 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { in x86_add_exclusive()
447 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i])) in x86_add_exclusive()
450 atomic_inc(&x86_pmu.lbr_exclusive[what]); in x86_add_exclusive()
470 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt) in x86_del_exclusive()
473 atomic_dec(&x86_pmu.lbr_exclusive[what]); in x86_del_exclusive()
483 hwc->sample_period = x86_pmu.max_period; in x86_setup_perfctr()
494 if (attr->config >= x86_pmu.max_events) in x86_setup_perfctr()
497 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events); in x86_setup_perfctr()
502 config = x86_pmu.event_map(attr->config); in x86_setup_perfctr()
550 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { in x86_pmu_max_precise()
554 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) in x86_pmu_max_precise()
557 if (x86_pmu.pebs_prec_dist) in x86_pmu_max_precise()
579 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) { in x86_pmu_hw_config()
626 if (event->attr.sample_period && x86_pmu.limit_period) { in x86_pmu_hw_config()
628 x86_pmu.limit_period(event, &left); in x86_pmu_hw_config()
676 return x86_pmu.hw_config(event); in __x86_pmu_event_init()
684 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_disable_all()
741 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_enable_all()
758 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { in is_x86_event()
759 if (event->pmu == &x86_pmu.hybrid_pmu[i].pmu) in is_x86_event()
1076 if (x86_pmu.flags & PMU_FL_PAIR) { in x86_schedule_events()
1399 if (left > x86_pmu.max_period) in x86_perf_event_set_period()
1400 left = x86_pmu.max_period; in x86_perf_event_set_period()
1412 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); in x86_perf_event_set_period()
1538 if (x86_pmu.version >= 2) { in perf_event_print_debug()
1553 if (x86_pmu.lbr_nr) { in perf_event_print_debug()
1686 for (idx = 0; idx < x86_pmu.num_counters; idx++) { in x86_pmu_handle_irq()
1693 if (val & (1ULL << (x86_pmu.cntval_bits - 1))) in x86_pmu_handle_irq()
1723 if (!x86_pmu.apic || !x86_pmu_initialized()) in perf_events_lapic_init()
1766 if (x86_pmu.cpu_prepare) in x86_pmu_prepare_cpu()
1767 return x86_pmu.cpu_prepare(cpu); in x86_pmu_prepare_cpu()
1773 if (x86_pmu.cpu_dead) in x86_pmu_dead_cpu()
1774 x86_pmu.cpu_dead(cpu); in x86_pmu_dead_cpu()
1792 if (x86_pmu.cpu_starting) in x86_pmu_starting_cpu()
1793 x86_pmu.cpu_starting(cpu); in x86_pmu_starting_cpu()
1799 if (x86_pmu.cpu_dying) in x86_pmu_dying_cpu()
1800 x86_pmu.cpu_dying(cpu); in x86_pmu_dying_cpu()
1809 x86_pmu.apic = 0; in pmu_check_apic()
1834 if (pmu_attr->id < x86_pmu.max_events) in events_sysfs_show()
1835 config = x86_pmu.event_map(pmu_attr->id); in events_sysfs_show()
1841 return x86_pmu.events_sysfs_show(page, config); in events_sysfs_show()
1893 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { in events_hybrid_sysfs_show()
1894 if (!(x86_pmu.hybrid_pmu[i].cpu_type & pmu_attr->pmu_type)) in events_hybrid_sysfs_show()
1896 if (x86_pmu.hybrid_pmu[i].cpu_type & pmu->cpu_type) { in events_hybrid_sysfs_show()
1947 if (idx >= x86_pmu.max_events) in is_visible()
1952 return pmu_attr->event_str || x86_pmu.event_map(idx) ? attr->mode : 0; in is_visible()
2005 static_call_update(x86_pmu_handle_irq, x86_pmu.handle_irq); in x86_pmu_static_call_update()
2006 static_call_update(x86_pmu_disable_all, x86_pmu.disable_all); in x86_pmu_static_call_update()
2007 static_call_update(x86_pmu_enable_all, x86_pmu.enable_all); in x86_pmu_static_call_update()
2008 static_call_update(x86_pmu_enable, x86_pmu.enable); in x86_pmu_static_call_update()
2009 static_call_update(x86_pmu_disable, x86_pmu.disable); in x86_pmu_static_call_update()
2011 static_call_update(x86_pmu_assign, x86_pmu.assign); in x86_pmu_static_call_update()
2013 static_call_update(x86_pmu_add, x86_pmu.add); in x86_pmu_static_call_update()
2014 static_call_update(x86_pmu_del, x86_pmu.del); in x86_pmu_static_call_update()
2015 static_call_update(x86_pmu_read, x86_pmu.read); in x86_pmu_static_call_update()
2017 static_call_update(x86_pmu_set_period, x86_pmu.set_period); in x86_pmu_static_call_update()
2018 static_call_update(x86_pmu_update, x86_pmu.update); in x86_pmu_static_call_update()
2019 static_call_update(x86_pmu_limit_period, x86_pmu.limit_period); in x86_pmu_static_call_update()
2021 static_call_update(x86_pmu_schedule_events, x86_pmu.schedule_events); in x86_pmu_static_call_update()
2022 static_call_update(x86_pmu_get_event_constraints, x86_pmu.get_event_constraints); in x86_pmu_static_call_update()
2023 static_call_update(x86_pmu_put_event_constraints, x86_pmu.put_event_constraints); in x86_pmu_static_call_update()
2025 static_call_update(x86_pmu_start_scheduling, x86_pmu.start_scheduling); in x86_pmu_static_call_update()
2026 static_call_update(x86_pmu_commit_scheduling, x86_pmu.commit_scheduling); in x86_pmu_static_call_update()
2027 static_call_update(x86_pmu_stop_scheduling, x86_pmu.stop_scheduling); in x86_pmu_static_call_update()
2029 static_call_update(x86_pmu_sched_task, x86_pmu.sched_task); in x86_pmu_static_call_update()
2030 static_call_update(x86_pmu_swap_task_ctx, x86_pmu.swap_task_ctx); in x86_pmu_static_call_update()
2032 static_call_update(x86_pmu_drain_pebs, x86_pmu.drain_pebs); in x86_pmu_static_call_update()
2033 static_call_update(x86_pmu_pebs_aliases, x86_pmu.pebs_aliases); in x86_pmu_static_call_update()
2035 static_call_update(x86_pmu_guest_get_msrs, x86_pmu.guest_get_msrs); in x86_pmu_static_call_update()
2036 static_call_update(x86_pmu_filter, x86_pmu.filter); in x86_pmu_static_call_update()
2047 pr_info("... version: %d\n", x86_pmu.version); in x86_pmu_show_pmu_cap()
2048 pr_info("... bit width: %d\n", x86_pmu.cntval_bits); in x86_pmu_show_pmu_cap()
2050 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask); in x86_pmu_show_pmu_cap()
2051 pr_info("... max period: %016Lx\n", x86_pmu.max_period); in x86_pmu_show_pmu_cap()
2074 x86_pmu.name = "HYGON"; in init_hw_perf_events()
2092 if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed)) in init_hw_perf_events()
2095 pr_cont("%s PMU driver.\n", x86_pmu.name); in init_hw_perf_events()
2097 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ in init_hw_perf_events()
2099 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) in init_hw_perf_events()
2102 if (!x86_pmu.intel_ctrl) in init_hw_perf_events()
2103 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; in init_hw_perf_events()
2109 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, in init_hw_perf_events()
2110 0, x86_pmu.num_counters, 0, 0); in init_hw_perf_events()
2112 x86_pmu_format_group.attrs = x86_pmu.format_attrs; in init_hw_perf_events()
2114 if (!x86_pmu.events_sysfs_show) in init_hw_perf_events()
2117 pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2120 x86_pmu_show_pmu_cap(x86_pmu.num_counters, in init_hw_perf_events()
2121 x86_pmu.num_counters_fixed, in init_hw_perf_events()
2122 x86_pmu.intel_ctrl); in init_hw_perf_events()
2125 if (!x86_pmu.read) in init_hw_perf_events()
2126 x86_pmu.read = _x86_pmu_read; in init_hw_perf_events()
2128 if (!x86_pmu.guest_get_msrs) in init_hw_perf_events()
2129 x86_pmu.guest_get_msrs = (void *)&__static_call_return0; in init_hw_perf_events()
2131 if (!x86_pmu.set_period) in init_hw_perf_events()
2132 x86_pmu.set_period = x86_perf_event_set_period; in init_hw_perf_events()
2134 if (!x86_pmu.update) in init_hw_perf_events()
2135 x86_pmu.update = x86_perf_event_update; in init_hw_perf_events()
2167 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) { in init_hw_perf_events()
2168 hybrid_pmu = &x86_pmu.hybrid_pmu[i]; in init_hw_perf_events()
2172 hybrid_pmu->pmu.attr_update = x86_pmu.attr_update; in init_hw_perf_events()
2182 if (i < x86_pmu.num_hybrid_pmus) { in init_hw_perf_events()
2184 perf_pmu_unregister(&x86_pmu.hybrid_pmu[j].pmu); in init_hw_perf_events()
2186 kfree(x86_pmu.hybrid_pmu); in init_hw_perf_events()
2187 x86_pmu.hybrid_pmu = NULL; in init_hw_perf_events()
2188 x86_pmu.num_hybrid_pmus = 0; in init_hw_perf_events()
2202 memset(&x86_pmu, 0, sizeof(x86_pmu)); in init_hw_perf_events()
2361 c = x86_pmu.get_event_constraints(fake_cpuc, 0, event); in validate_event()
2366 if (x86_pmu.put_event_constraints) in validate_event()
2367 x86_pmu.put_event_constraints(fake_cpuc, event); in validate_event()
2430 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL); in validate_group()
2466 if (READ_ONCE(x86_pmu.attr_rdpmc) && in x86_pmu_event_init()
2547 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc); in get_attr_rdpmc()
2564 if (x86_pmu.attr_rdpmc_broken) in set_attr_rdpmc()
2567 if (val != x86_pmu.attr_rdpmc) { in set_attr_rdpmc()
2575 else if (x86_pmu.attr_rdpmc == 0) in set_attr_rdpmc()
2580 else if (x86_pmu.attr_rdpmc == 2) in set_attr_rdpmc()
2584 x86_pmu.attr_rdpmc = val; in set_attr_rdpmc()
2641 if (x86_pmu.check_microcode) in perf_check_microcode()
2642 x86_pmu.check_microcode(); in perf_check_microcode()
2647 if (x86_pmu.check_period && x86_pmu.check_period(event, value)) in x86_pmu_check_period()
2650 if (value && x86_pmu.limit_period) { in x86_pmu_check_period()
2652 x86_pmu.limit_period(event, &left); in x86_pmu_check_period()
2665 if (x86_pmu.aux_output_match) in x86_pmu_aux_output_match()
2666 return x86_pmu.aux_output_match(event); in x86_pmu_aux_output_match()
2721 userpg->pmc_width = x86_pmu.cntval_bits; in arch_perf_update_userpage()
2989 cap->version = x86_pmu.version; in perf_get_x86_pmu_capability()
2990 cap->num_counters_gp = x86_pmu.num_counters; in perf_get_x86_pmu_capability()
2991 cap->num_counters_fixed = x86_pmu.num_counters_fixed; in perf_get_x86_pmu_capability()
2992 cap->bit_width_gp = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2993 cap->bit_width_fixed = x86_pmu.cntval_bits; in perf_get_x86_pmu_capability()
2994 cap->events_mask = (unsigned int)x86_pmu.events_maskl; in perf_get_x86_pmu_capability()
2995 cap->events_mask_len = x86_pmu.events_mask_len; in perf_get_x86_pmu_capability()
2996 cap->pebs_ept = x86_pmu.pebs_ept; in perf_get_x86_pmu_capability()
3002 int max = x86_pmu.max_events; in perf_get_hw_event_config()
3005 return x86_pmu.event_map(array_index_nospec(hw_event, max)); in perf_get_hw_event_config()