| /tools/perf/util/ |
| A D | counts.c | 12 struct perf_counts *counts = zalloc(sizeof(*counts)); in perf_counts__new() local 14 if (counts) { in perf_counts__new() 19 free(counts); in perf_counts__new() 23 counts->values = values; in perf_counts__new() 28 free(counts); in perf_counts__new() 32 counts->loaded = values; in perf_counts__new() 35 return counts; in perf_counts__new() 40 if (counts) { in perf_counts__delete() 43 free(counts); in perf_counts__delete() 49 xyarray__reset(counts->loaded); in perf_counts__reset() [all …]
|
| A D | counts.h | 20 perf_counts(struct perf_counts *counts, int cpu_map_idx, int thread) in perf_counts() argument 22 return xyarray__entry(counts->values, cpu_map_idx, thread); in perf_counts() 26 perf_counts__is_loaded(struct perf_counts *counts, int cpu_map_idx, int thread) in perf_counts__is_loaded() argument 28 return *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)); in perf_counts__is_loaded() 32 perf_counts__set_loaded(struct perf_counts *counts, int cpu_map_idx, int thread, bool loaded) in perf_counts__set_loaded() argument 34 *((bool *) xyarray__entry(counts->loaded, cpu_map_idx, thread)) = loaded; in perf_counts__set_loaded() 38 void perf_counts__delete(struct perf_counts *counts); 39 void perf_counts__reset(struct perf_counts *counts);
|
| A D | stat.c | 158 struct perf_counts *counts; in evsel__alloc_prev_raw_counts() local 161 if (counts) in evsel__alloc_prev_raw_counts() 164 return counts ? 0 : -ENOMEM; in evsel__alloc_prev_raw_counts() 443 ps_aggr->counts.val = 0; in process_counter_values() 444 ps_aggr->counts.ena = 0; in process_counter_values() 593 counts.val += aggr->counts.val; in evsel__update_percore_stats() 594 counts.ena += aggr->counts.ena; in evsel__update_percore_stats() 595 counts.run += aggr->counts.run; in evsel__update_percore_stats() 606 aggr->counts.val = counts.val; in evsel__update_percore_stats() 607 aggr->counts.ena = counts.ena; in evsel__update_percore_stats() [all …]
|
| A D | bpf_counter.c | 245 counts = perf_counts(evsel->counts, idx, 0); in bpf_program_profiler__read() 246 counts->val = 0; in bpf_program_profiler__read() 247 counts->ena = 0; in bpf_program_profiler__read() 248 counts->run = 0; in bpf_program_profiler__read() 267 counts = perf_counts(evsel->counts, idx, 0); in bpf_program_profiler__read() 659 counts = perf_counts(evsel->counts, j, 0); in bperf__read() 668 counts = perf_counts(evsel->counts, i, 0); in bperf__read() 675 counts = perf_counts(evsel->counts, 0, i); in bperf__read() 676 counts->val = 0; in bperf__read() 677 counts->ena = 0; in bperf__read() [all …]
|
| A D | branch.c | 27 st->counts[flags->type]++; in branch_type_count() 118 total += st->counts[i]; in branch_type_stat_display() 152 if (st->counts[i] > 0) in branch_type_stat_display() 156 (double)st->counts[i] / (double)total); in branch_type_stat_display() 180 total += st->counts[i]; in branch_type_str() 198 if (st->counts[i] > 0) in branch_type_str()
|
| A D | bpf_counter_cgroup.c | 236 struct perf_counts_values *counts; in bperf_cgrp__read() local 264 counts = perf_counts(evsel->counts, i, 0); in bperf_cgrp__read() 265 counts->val = values[cpu.cpu].counter; in bperf_cgrp__read() 266 counts->ena = values[cpu.cpu].enabled; in bperf_cgrp__read() 267 counts->run = values[cpu.cpu].running; in bperf_cgrp__read()
|
| /tools/lib/perf/tests/ |
| A D | test-evsel.c | 75 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread() 108 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 114 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_thread_enable() 175 start = counts.val; in test_stat_user_read() 180 end = counts.val; in test_stat_user_read() 198 struct perf_counts_values counts; in test_stat_read_format_single() local 212 memset(&counts, -1, sizeof(counts)); in test_stat_read_format_single() 221 __T("failed to read ID", counts.id); in test_stat_read_format_single() 233 struct perf_counts_values counts; in test_stat_read_format_group() local 258 memset(&counts, -1, sizeof(counts)); in test_stat_read_format_group() [all …]
|
| A D | test-evlist.c | 528 min = counts[0].val; in test_stat_multiplexing() 531 i, counts[i].val, counts[i].run, counts[i].ena); in test_stat_multiplexing() 536 counts[i].val, in test_stat_multiplexing() 537 (double)counts[i].run / (double)counts[i].ena * 100.0, in test_stat_multiplexing() 538 counts[i].run, counts[i].ena); in test_stat_multiplexing() 545 if (counts[i].val > max) in test_stat_multiplexing() 546 max = counts[i].val; in test_stat_multiplexing() 548 if (counts[i].val < min) in test_stat_multiplexing() 549 min = counts[i].val; in test_stat_multiplexing() 551 avg += counts[i].val; in test_stat_multiplexing() [all …]
|
| /tools/testing/selftests/bpf/prog_tests/ |
| A D | sock_iter_batch.c | 31 if (!counts[i].cookie) { in insert() 41 counts[insert].cookie = cookie; in insert() 42 counts[insert].count++; in insert() 44 return counts[insert].count; in insert() 83 if (cookie == counts[i].cookie) in was_seen() 170 count = counts[i].count; in get_seen_count() 303 counts, counts_len); in remove_seen_established() 374 counts, counts_len); in remove_unseen_established() 381 counts, counts_len); in remove_unseen_established() 798 counts = calloc(tc->max_socks, sizeof(*counts)); in do_resume_test() [all …]
|
| /tools/testing/selftests/bpf/progs/ |
| A D | test_btf_newkv.c | 22 struct ipv_counts *counts; in test_long_fname_2() local 25 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 26 if (!counts) in test_long_fname_2() 29 counts->v6++; in test_long_fname_2()
|
| A D | test_btf_nokv.c | 21 struct ipv_counts *counts; in test_long_fname_2() local 24 counts = bpf_map_lookup_elem(&btf_map, &key); in test_long_fname_2() 25 if (!counts) in test_long_fname_2() 28 counts->v6++; in test_long_fname_2()
|
| /tools/lib/perf/Documentation/examples/ |
| A D | counting.c | 24 struct perf_counts_values counts; in main() local 73 perf_evsel__read(evsel, 0, 0, &counts); in main() 75 counts.val, counts.ena, counts.run); in main()
|
| /tools/testing/kunit/ |
| A D | kunit_parser.py | 46 self.counts = TestCounts() 59 self.counts.errors += 1 112 self.passed += counts.passed 113 self.failed += counts.failed 114 self.crashed += counts.crashed 116 self.errors += counts.errors 681 counts = test.counts 684 counts.add_subtest_counts(t.counts) 685 if counts.total() == 0: 686 counts.add_status(status) [all …]
|
| A D | kunit_tool_test.py | 145 self.assertEqual(result.counts.errors, 0) 152 self.assertEqual(result.counts.errors, 0) 159 self.assertEqual(result.counts.errors, 0) 166 self.assertEqual(result.counts.errors, 0) 175 self.assertEqual(result.counts.errors, 1) 195 self.assertEqual(result.counts.errors, 1) 218 self.assertEqual(result.counts.errors, 1) 257 self.assertEqual(result.counts.errors, 0) 265 self.assertEqual(result.counts.errors, 0) 273 self.assertEqual(result.counts.errors, 0) [all …]
|
| /tools/lib/perf/Documentation/ |
| A D | libperf-counting.txt | 37 * reads and displays event counts 171 Now we need to get the counts from events, following code iterates through the 172 events list and read counts: 177 82 perf_evsel__read(evsel, 0, 0, &counts); 179 84 counts.val, counts.ena, counts.run);
|
| /tools/perf/tests/ |
| A D | mmap-basic.c | 251 struct perf_counts_values counts = { .val = 0 }; in test_stat_user_read() local 324 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_user_read() 325 if (counts.val == 0) { in test_stat_user_read() 337 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_user_read() 338 start = counts.val; in test_stat_user_read() 342 perf_evsel__read(evsel, 0, 0, &counts); in test_stat_user_read() 343 end = counts.val; in test_stat_user_read()
|
| A D | openat-syscall.c | 58 if (perf_counts(evsel->counts, 0, 0)->val != nr_openat_calls) { in test__openat_syscall_event() 60 nr_openat_calls, perf_counts(evsel->counts, 0, 0)->val); in test__openat_syscall_event()
|
| A D | openat-syscall-all-cpus.c | 108 if (perf_counts(evsel->counts, idx, 0)->val != expected) { in test__openat_syscall_event_on_all_cpus() 110 expected, cpu.cpu, perf_counts(evsel->counts, idx, 0)->val); in test__openat_syscall_event_on_all_cpus()
|
| /tools/perf/scripts/python/bin/ |
| A D | syscall-counts-report | 10 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts.py $comm
|
| A D | syscall-counts-by-pid-report | 10 perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/syscall-counts-by-pid.py $comm
|
| /tools/memory-model/scripts/ |
| A D | cmplitmushist.sh | 73 echo Matching Observation result and counts: $2 108 echo Matching Observation result and counts: $obsline 1>&2
|
| /tools/perf/Documentation/ |
| A D | perf-stat.txt | 52 'percore' is a event qualifier that sums up the event counts for both 156 Do not aggregate counts across all monitored CPUs. 405 Aggregate counts per processor die for system-wide mode measurements. 437 Do not aggregate/merge counts across monitored CPUs or PMUs. 440 stat will, by default, aggregate the event counts and show the result 442 individual events and counts. 452 suffix of _0, _1, etc. By default the event counts will all be 460 Merge core event counts from all core PMUs. In hybrid or big.LITTLE 462 separately. This option forces core PMU counts to be combined to give 553 for all hardware threads in a core and show the counts per core. [all …]
|
| /tools/bpf/bpftool/skeleton/ |
| A D | profiler.bpf.c | 39 } counts SEC(".maps"); 116 count = bpf_map_lookup_elem(&counts, &zero); in BPF_PROG()
|
| /tools/perf/python/ |
| A D | counting.py | 27 counts = evsel.read(cpu, thread)
|
| /tools/usb/ |
| A D | ffs-test.c | 233 const __le32 counts[]; in descs_to_legacy() member 235 const __le32 *counts = in->counts; in descs_to_legacy() local 255 ret = le32_to_cpu(*counts); \ in descs_to_legacy() 257 ++counts; \ in descs_to_legacy() 267 descs_start = (const void *)counts; in descs_to_legacy()
|