1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <limits.h>
5 #include <stdbool.h>
6 #include <stdio.h>
7 #include <unistd.h>
8 #include <linux/types.h>
9 #include <sys/prctl.h>
10 #include <perf/cpumap.h>
11 #include <perf/evlist.h>
12 #include <perf/mmap.h>
13
14 #include "debug.h"
15 #include "parse-events.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "thread_map.h"
19 #include "record.h"
20 #include "tsc.h"
21 #include "mmap.h"
22 #include "tests.h"
23 #include "util/sample.h"
24
25 /*
26 * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just
27 * enable the test for x86_64/i386 and Arm64 archs.
28 */
29 #if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__)
30 #define TSC_IS_SUPPORTED 1
31 #else
32 #define TSC_IS_SUPPORTED 0
33 #endif
34
35 #define CHECK__(x) { \
36 while ((x) < 0) { \
37 pr_debug(#x " failed!\n"); \
38 goto out_err; \
39 } \
40 }
41
42 #define CHECK_NOT_NULL__(x) { \
43 while ((x) == NULL) { \
44 pr_debug(#x " failed!\n"); \
45 goto out_err; \
46 } \
47 }
48
test__tsc_is_supported(struct test_suite * test __maybe_unused,int subtest __maybe_unused)49 static int test__tsc_is_supported(struct test_suite *test __maybe_unused,
50 int subtest __maybe_unused)
51 {
52 if (!TSC_IS_SUPPORTED) {
53 pr_debug("Test not supported on this architecture\n");
54 return TEST_SKIP;
55 }
56
57 return TEST_OK;
58 }
59
60 /**
61 * test__perf_time_to_tsc - test converting perf time to TSC.
62 *
63 * This function implements a test that checks that the conversion of perf time
64 * to and from TSC is consistent with the order of events. If the test passes
65 * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
66 * supported then the test passes but " (not supported)" is printed.
67 */
test__perf_time_to_tsc(struct test_suite * test __maybe_unused,int subtest __maybe_unused)68 static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
69 {
70 struct record_opts opts = {
71 .mmap_pages = UINT_MAX,
72 .user_freq = UINT_MAX,
73 .user_interval = ULLONG_MAX,
74 .target = {
75 .uses_mmap = true,
76 },
77 .sample_time = true,
78 };
79 struct perf_thread_map *threads = NULL;
80 struct perf_cpu_map *cpus = NULL;
81 struct evlist *evlist = NULL;
82 struct evsel *evsel = NULL;
83 int err = TEST_FAIL, ret, i;
84 const char *comm1, *comm2;
85 struct perf_tsc_conversion tc;
86 struct perf_event_mmap_page *pc;
87 union perf_event *event;
88 u64 test_tsc, comm1_tsc, comm2_tsc;
89 u64 test_time, comm1_time = 0, comm2_time = 0;
90 struct mmap *md;
91
92
93 threads = thread_map__new(-1, getpid(), UINT_MAX);
94 CHECK_NOT_NULL__(threads);
95
96 cpus = perf_cpu_map__new(NULL);
97 CHECK_NOT_NULL__(cpus);
98
99 evlist = evlist__new();
100 CHECK_NOT_NULL__(evlist);
101
102 perf_evlist__set_maps(&evlist->core, cpus, threads);
103
104 CHECK__(parse_event(evlist, "cycles:u"));
105
106 evlist__config(evlist, &opts, NULL);
107
108 /* For hybrid "cycles:u", it creates two events */
109 evlist__for_each_entry(evlist, evsel) {
110 evsel->core.attr.comm = 1;
111 evsel->core.attr.disabled = 1;
112 evsel->core.attr.enable_on_exec = 0;
113 }
114
115 ret = evlist__open(evlist);
116 if (ret < 0) {
117 if (ret == -ENOENT)
118 err = TEST_SKIP;
119 else
120 pr_debug("evlist__open() failed\n");
121 goto out_err;
122 }
123
124 CHECK__(evlist__mmap(evlist, UINT_MAX));
125
126 pc = evlist->mmap[0].core.base;
127 ret = perf_read_tsc_conversion(pc, &tc);
128 if (ret) {
129 if (ret == -EOPNOTSUPP) {
130 pr_debug("perf_read_tsc_conversion is not supported in current kernel\n");
131 err = TEST_SKIP;
132 }
133 goto out_err;
134 }
135
136 evlist__enable(evlist);
137
138 comm1 = "Test COMM 1";
139 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
140
141 test_tsc = rdtsc();
142
143 comm2 = "Test COMM 2";
144 CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
145
146 evlist__disable(evlist);
147
148 for (i = 0; i < evlist->core.nr_mmaps; i++) {
149 md = &evlist->mmap[i];
150 if (perf_mmap__read_init(&md->core) < 0)
151 continue;
152
153 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
154 struct perf_sample sample;
155
156 if (event->header.type != PERF_RECORD_COMM ||
157 (pid_t)event->comm.pid != getpid() ||
158 (pid_t)event->comm.tid != getpid())
159 goto next_event;
160
161 if (strcmp(event->comm.comm, comm1) == 0) {
162 CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
163 CHECK__(evsel__parse_sample(evsel, event, &sample));
164 comm1_time = sample.time;
165 }
166 if (strcmp(event->comm.comm, comm2) == 0) {
167 CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event));
168 CHECK__(evsel__parse_sample(evsel, event, &sample));
169 comm2_time = sample.time;
170 }
171 next_event:
172 perf_mmap__consume(&md->core);
173 }
174 perf_mmap__read_done(&md->core);
175 }
176
177 if (!comm1_time || !comm2_time)
178 goto out_err;
179
180 test_time = tsc_to_perf_time(test_tsc, &tc);
181 comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
182 comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
183
184 pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
185 comm1_time, comm1_tsc);
186 pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
187 test_time, test_tsc);
188 pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
189 comm2_time, comm2_tsc);
190
191 if (test_time <= comm1_time ||
192 test_time >= comm2_time)
193 goto out_err;
194
195 if (test_tsc <= comm1_tsc ||
196 test_tsc >= comm2_tsc)
197 goto out_err;
198
199 err = TEST_OK;
200
201 out_err:
202 evlist__delete(evlist);
203 perf_cpu_map__put(cpus);
204 perf_thread_map__put(threads);
205 return err;
206 }
207
208 static struct test_case time_to_tsc_tests[] = {
209 TEST_CASE_REASON("TSC support", tsc_is_supported,
210 "This architecture does not support"),
211 TEST_CASE_REASON("Perf time to TSC", perf_time_to_tsc,
212 "perf_read_tsc_conversion is not supported"),
213 { .name = NULL, }
214 };
215
216 struct test_suite suite__perf_time_to_tsc = {
217 .desc = "Convert perf time to TSC",
218 .test_cases = time_to_tsc_tests,
219 };
220