| /tools/perf/pmu-events/arch/x86/ |
| A D | mapfile.csv | 3 GenuineIntel-6-BE,v1.31,alderlaken,core 4 GenuineIntel-6-C[56],v1.09,arrowlake,core 6 GenuineIntel-6-(3D|47),v30,broadwell,core 7 GenuineIntel-6-56,v12,broadwellde,core 8 GenuineIntel-6-4F,v23,broadwellx,core 13 GenuineIntel-6-5[CF],v13,goldmont,core 18 GenuineIntel-6-3F,v29,haswellx,core 21 GenuineIntel-6-3A,v24,ivybridge,core 22 GenuineIntel-6-3E,v24,ivytown,core 23 GenuineIntel-6-2D,v24,jaketown,core [all …]
|
| /tools/perf/pmu-events/arch/arm64/ |
| A D | mapfile.csv | 11 # Type is core, uncore etc 15 0x00000000410fd020,v1,arm/cortex-a34,core 16 0x00000000410fd030,v1,arm/cortex-a53,core 17 0x00000000420f1000,v1,arm/cortex-a53,core 18 0x00000000410fd040,v1,arm/cortex-a35,core 19 0x00000000410fd050,v1,arm/cortex-a55,core 24 0x00000000410fd090,v1,arm/cortex-a73,core 25 0x00000000410fd0a0,v1,arm/cortex-a75,core 32 0x00000000410fd440,v1,arm/cortex-x1,core 33 0x00000000410fd4c0,v1,arm/cortex-x1,core [all …]
|
| /tools/perf/tests/ |
| A D | parse-events.c | 39 __u32 type = evsel->core.attr.type; in test_config() 40 __u64 config = evsel->core.attr.config; in test_config() 247 evsel->core.attr.bp_type); in test__checkevent_breakpoint() 249 evsel->core.attr.bp_len); in test__checkevent_breakpoint() 765 !evsel->core.attr.exclude_user); in test__checkevent_pmu_events_mix() 1633 evsel->core.attr.bp_type); in test__checkevent_breakpoint_len() 1635 evsel->core.attr.bp_len); in test__checkevent_breakpoint_len() 1648 evsel->core.attr.bp_type); in test__checkevent_breakpoint_len_w() 1650 evsel->core.attr.bp_len); in test__checkevent_breakpoint_len_w() 2780 TEST_ASSERT_VAL("wrong type", evsel1->core.attr.type == evsel2->core.attr.type); in test__checkevent_pmu_events_alias() [all …]
|
| A D | task-exit.c | 80 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__task_exit() 89 evsel->core.attr.task = 1; in test__task_exit() 91 evsel->core.attr.sample_freq = 1000000; in test__task_exit() 93 evsel->core.attr.sample_freq = 1; in test__task_exit() 95 evsel->core.attr.inherit = 0; in test__task_exit() 96 evsel->core.attr.watermark = 0; in test__task_exit() 97 evsel->core.attr.wakeup_events = 1; in test__task_exit() 98 evsel->core.attr.exclude_kernel = 1; in test__task_exit() 118 if (perf_mmap__read_init(&md->core) < 0) in test__task_exit() 125 perf_mmap__consume(&md->core); in test__task_exit() [all …]
|
| A D | keep-tracking.c | 40 for (i = 0; i < evlist->core.nr_mmaps; i++) { in find_comm() 42 if (perf_mmap__read_init(&md->core) < 0) in find_comm() 44 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in find_comm() 50 perf_mmap__consume(&md->core); in find_comm() 52 perf_mmap__read_done(&md->core); in find_comm() 90 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__keep_tracking() 99 evsel->core.attr.comm = 1; in test__keep_tracking() 100 evsel->core.attr.disabled = 1; in test__keep_tracking() 101 evsel->core.attr.enable_on_exec = 0; in test__keep_tracking()
|
| A D | perf-time-to-tsc.c | 102 perf_evlist__set_maps(&evlist->core, cpus, threads); in test__perf_time_to_tsc() 110 evsel->core.attr.comm = 1; in test__perf_time_to_tsc() 111 evsel->core.attr.disabled = 1; in test__perf_time_to_tsc() 112 evsel->core.attr.enable_on_exec = 0; in test__perf_time_to_tsc() 126 pc = evlist->mmap[0].core.base; in test__perf_time_to_tsc() 148 for (i = 0; i < evlist->core.nr_mmaps; i++) { in test__perf_time_to_tsc() 150 if (perf_mmap__read_init(&md->core) < 0) in test__perf_time_to_tsc() 153 while ((event = perf_mmap__read_event(&md->core)) != NULL) { in test__perf_time_to_tsc() 173 perf_mmap__consume(&md->core); in test__perf_time_to_tsc() 176 perf_mmap__read_done(&md->core); in test__perf_time_to_tsc()
|
| /tools/perf/arch/x86/util/ |
| A D | topdown.c | 38 return evsel->core.attr.type == PERF_TYPE_RAW && in arch_is_topdown_slots() 39 evsel->core.attr.config == TOPDOWN_SLOTS && in arch_is_topdown_slots() 40 evsel->core.attr.config1 == 0; in arch_is_topdown_slots() 46 return evsel->core.attr.type == PERF_TYPE_RAW && in arch_is_topdown_metrics() 48 evsel->core.attr.config1 == 0; in arch_is_topdown_metrics() 72 if (evsel->core.leader != leader->core.leader) in arch_topdown_sample_read() 93 evsel->core.attr.config = TOPDOWN_SLOTS; in topdown_insert_slots_event() 94 evsel->core.cpus = perf_cpu_map__get(metric_event->core.cpus); in topdown_insert_slots_event() 95 evsel->core.pmu_cpus = perf_cpu_map__get(metric_event->core.pmu_cpus); in topdown_insert_slots_event() 96 evsel->core.is_pmu_core = true; in topdown_insert_slots_event() [all …]
|
| A D | evsel.c | 52 u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK; in arch_evsel__hw_name() 53 u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; in arch_evsel__hw_name() 110 if (!evsel->core.attr.precise_ip && in arch_evsel__open_strerror() 115 if (evsel->core.attr.exclude_kernel || evsel->core.attr.exclude_user || in arch_evsel__open_strerror() 116 evsel->core.attr.exclude_hv || evsel->core.attr.exclude_idle || in arch_evsel__open_strerror() 117 evsel->core.attr.exclude_host || evsel->core.attr.exclude_guest) { in arch_evsel__open_strerror()
|
| A D | evlist.c | 64 if (lhs->core.leader != rhs->core.leader) { in arch_evlist__cmp() 82 return lhs->core.idx - rhs->core.idx; in arch_evlist__cmp() 93 list_for_each_entry(pos, list, core.node) { in arch_evlist__add_required_events()
|
| /tools/testing/selftests/kvm/lib/riscv/ |
| A D | processor.c | 222 struct kvm_riscv_core core; in vcpu_arch_dump() local 224 core.mode = vcpu_get_reg(vcpu, RISCV_CORE_REG(mode)); in vcpu_arch_dump() 259 " MODE: 0x%lx\n", core.mode); in vcpu_arch_dump() 262 core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp); in vcpu_arch_dump() 265 core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2); in vcpu_arch_dump() 268 core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1); in vcpu_arch_dump() 271 core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5); in vcpu_arch_dump() 274 core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3); in vcpu_arch_dump() 277 core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7); in vcpu_arch_dump() 280 core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11); in vcpu_arch_dump() [all …]
|
| /tools/perf/arch/x86/tests/ |
| A D | hybrid.c | 12 return (evsel->core.attr.config & PERF_HW_EVENT_MASK) == expected_config; in test_config() 22 return (evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT) == expected_config; in test_hybrid_type() 29 TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->core.nr_entries); in test__hybrid_hw_event_with_pmu() 41 TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); in test__hybrid_hw_group_event() 99 TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); in test__hybrid_group_modifier1() 116 perf_evlist__for_each_evsel(&evlist->core, evsel) { in test__hybrid_raw1() 153 TEST_ASSERT_VAL("wrong config", 10 == evsel->core.attr.config); in test__checkevent_pmu() 154 TEST_ASSERT_VAL("wrong config1", 1 == evsel->core.attr.config1); in test__checkevent_pmu() 155 TEST_ASSERT_VAL("wrong config2", 3 == evsel->core.attr.config2); in test__checkevent_pmu() 156 TEST_ASSERT_VAL("wrong config3", 0 == evsel->core.attr.config3); in test__checkevent_pmu() [all …]
|
| /tools/perf/pmu-events/arch/s390/ |
| A D | mapfile.csv | 2 ^IBM.209[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z10,core 3 ^IBM.281[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z196,core 4 ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core 5 ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core 6 ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core 7 ^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core 8 ^IBM.393[12].*$,3,cf_z16,core 9 ^IBM.917[56].*$,3,cf_z17,core
|
| /tools/perf/util/ |
| A D | mmap.c | 278 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap() 294 map->core.flush = mp->flush; in mmap__mmap() 332 size = md->core.end - md->core.start; in perf_mmap__push() 334 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push() 335 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 336 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push() 337 md->core.start += size; in perf_mmap__push() 345 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push() 346 size = md->core.end - md->core.start; in perf_mmap__push() 347 md->core.start += size; in perf_mmap__push() [all …]
|
| A D | sideband_evlist.c | 25 evsel = evsel__new_idx(attr, evlist->core.nr_entries); in evlist__add_sb_event() 58 for (i = 0; i < evlist->core.nr_mmaps; i++) { in perf_evlist__poll_thread() 62 if (perf_mmap__read_init(&map->core)) in perf_evlist__poll_thread() 72 perf_mmap__consume(&map->core); in perf_evlist__poll_thread() 75 perf_mmap__read_done(&map->core); in perf_evlist__poll_thread() 89 evsel->core.attr.sample_id_all = 1; in evlist__set_cb() 90 evsel->core.attr.watermark = 1; in evlist__set_cb() 91 evsel->core.attr.wakeup_watermark = 1; in evlist__set_cb() 107 if (evlist->core.nr_entries > 1) { in evlist__start_sb_thread() 117 if (evsel__open(counter, evlist->core.user_requested_cpus, in evlist__start_sb_thread() [all …]
|
| A D | bpf_counter_cgroup.c | 67 BUG_ON(evlist->core.nr_entries % nr_cgroups != 0); in bperf_load_program() 74 map_size = evlist->core.nr_entries / nr_cgroups; in bperf_load_program() 77 map_size = evlist->core.nr_entries; in bperf_load_program() 123 perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) { in bperf_load_program() 125 __u32 idx = evsel->core.idx * total_cpus + cpu.cpu; in bperf_load_program() 212 if (evsel->core.idx) in bperf_cgrp__enable() 223 if (evsel->core.idx) in bperf_cgrp__disable() 240 if (evsel->core.idx) in bperf_cgrp__read() 252 __u32 idx = evsel->core.idx; in bperf_cgrp__read() 263 perf_cpu_map__for_each_cpu(cpu, i, evsel->core.cpus) { in bperf_cgrp__read() [all …]
|
| A D | evlist.c | 79 perf_evlist__init(&evlist->core); in evlist__init() 168 list_del_init(&pos->core.node); in evlist__purge() 173 evlist->core.nr_entries = 0; in evlist__purge() 200 perf_evlist__add(&evlist->core, &entry->core); in evlist__add() 211 perf_evlist__remove(&evlist->core, &evsel->core); in evlist__remove() 818 return &maps[idx].core; in perf_evlist__mmap_cb_get() 980 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); in evlist__mmap_ex() 1311 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) in evlist__valid_sample_id_all() 1401 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { in evlist__open() 1410 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); in evlist__open() [all …]
|
| A D | evsel.h | 46 struct perf_evsel core; member 234 return perf_evsel__cpus(&evsel->core); in evsel__cpus() 430 return list_entry(evsel->core.node.next, struct evsel, core.node); in evsel__next() 435 return list_entry(evsel->core.node.prev, struct evsel, core.node); in evsel__prev() 447 return evsel->core.leader == &evsel->core; in evsel__is_group_leader() 486 return evsel->core.idx - evsel->core.leader->idx; in evsel__group_idx() 491 for ((_evsel) = list_entry((_leader)->core.node.next, struct evsel, core.node); \ 493 (_evsel)->core.leader == &(_leader)->core; \ 494 (_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node)) 503 (_evsel)->core.leader == &(_leader)->core; \ [all …]
|
| A D | evsel.c | 492 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); in evsel__clone() 493 evsel->core.pmu_cpus = perf_cpu_map__get(orig->core.pmu_cpus); in evsel__clone() 494 evsel->core.threads = perf_thread_map__get(orig->core.threads); in evsel__clone() 495 evsel->core.nr_members = orig->core.nr_members; in evsel__clone() 496 evsel->core.system_wide = orig->core.system_wide; in evsel__clone() 497 evsel->core.requires_cpu = orig->core.requires_cpu; in evsel__clone() 498 evsel->core.is_pmu_core = orig->core.is_pmu_core; in evsel__clone() 540 evsel->core.leader = orig->core.leader; in evsel__clone() 3890 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, in store_evsel_ids() 3943 return evsel->core.leader == &leader->core; in evsel__has_leader() [all …]
|
| A D | perf_api_probe.c | 32 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); in perf_do_probe_api() 46 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, flags); in perf_do_probe_api() 84 evsel->core.attr.sample_type |= PERF_SAMPLE_IDENTIFIER; in perf_probe_sample_identifier() 89 evsel->core.attr.comm_exec = 1; in perf_probe_comm_exec() 94 evsel->core.attr.context_switch = 1; in perf_probe_context_switch() 99 evsel->core.attr.text_poke = 1; in perf_probe_text_poke() 104 evsel->core.attr.build_id = 1; in perf_probe_build_id() 109 evsel->core.attr.cgroup = 1; in perf_probe_cgroup()
|
| A D | record.c | 55 struct perf_event_attr *attr = &evsel->core.attr; in evsel__config_leader_sampling() 91 attr->sample_type = read_sampler->core.attr.sample_type | in evsel__config_leader_sampling() 92 leader->core.attr.sample_type; in evsel__config_leader_sampling() 102 if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0) in evlist__config() 110 evsel->core.attr.comm_exec = 1; in evlist__config() 125 } else if (evlist->core.nr_entries > 1) { in evlist__config() 129 if (evsel->core.attr.sample_type == first->core.attr.sample_type) in evlist__config() 240 if (!evlist || perf_cpu_map__is_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { in evlist__can_select_event() 248 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0); in evlist__can_select_event() 252 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu, -1, in evlist__can_select_event()
|
| A D | tool_pmu.c | 101 return (enum tool_pmu_event)evsel->core.attr.config; in evsel__tool_event() 106 return tool_pmu__event_to_str(evsel->core.attr.config); in evsel__tool_pmu_event_name() 230 if (evsel->core.attr.sample_period) /* no sampling */ in evsel__tool_pmu_open() 245 if (!evsel->cgrp && !evsel->core.system_wide) in evsel__tool_pmu_open() 253 if (evsel->core.attr.sample_period) { in evsel__tool_pmu_open() 279 cpu = perf_cpu_map__cpu(evsel->core.cpus, idx); in evsel__tool_pmu_open() 355 *result = perf_cpu_map__nr(evsel->core.cpus); in tool_pmu__read_event() 362 if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) in tool_pmu__read_event() 363 *result = perf_cpu_map__nr(evsel->core.pmu_cpus); in tool_pmu__read_event() 381 perf_cpu_map__intersect(online, evsel->core.cpus); in tool_pmu__read_event() [all …]
|
| /tools/perf/pmu-events/arch/riscv/ |
| A D | mapfile.csv | 5 # MVENDORID JEDEC code of the core provider 13 # Type is core, uncore etc 17 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/bullet,core 18 0x489-0x8000000000000[1-9a-e]07-0x[78ac][[:xdigit:]]+,v1,sifive/bullet-07,core 19 0x489-0x8000000000000[1-9a-e]07-0xd[[:xdigit:]]+,v1,sifive/bullet-0d,core 20 0x489-0x8000000000000008-0x[[:xdigit:]]+,v1,sifive/p550,core 21 0x489-0x8000000000000[1-6]08-0x[9b][[:xdigit:]]+,v1,sifive/p650,core 22 0x5b7-0x0-0x0,v1,thead/c900-legacy,core 23 0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core 24 0x31e-0x8000000000008a45-0x[[:xdigit:]]+,v1,andes/ax45,core
|
| /tools/perf/pmu-events/arch/powerpc/ |
| A D | mapfile.csv | 10 # Type is core, uncore etc 14 0x004[bcd][[:xdigit:]]{4},1,power8,core 15 0x0066[[:xdigit:]]{4},1,power8,core 16 0x004e[[:xdigit:]]{4},1,power9,core 17 0x0080[[:xdigit:]]{4},1,power10,core 18 0x0082[[:xdigit:]]{4},1,power10,core 19 0x00ffffff,1,compat,core
|
| /tools/testing/selftests/bpf/ |
| A D | test_kmod.sh | 28 sysctl -w net.core.bpf_jit_enable=$1 2>&1 > /dev/null 29 sysctl -w net.core.bpf_jit_harden=$2 2>&1 > /dev/null 56 JE=`sysctl -n net.core.bpf_jit_enable` 57 JH=`sysctl -n net.core.bpf_jit_harden` 62 sysctl -w net.core.bpf_jit_enable=$JE 2>&1 > /dev/null 63 sysctl -w net.core.bpf_jit_harden=$JH 2>&1 > /dev/null
|
| /tools/power/x86/intel-speed-select/ |
| A D | Build | 1 …-speed-select-y += isst-config.o isst-core.o isst-display.o isst-daemon.o hfi-events.o isst-core-…
|