1 // SPDX-License-Identifier: GPL-2.0
2 #include "util/bpf_counter.h"
3 #include "util/debug.h"
4 #include "util/evsel.h"
5 #include "util/evlist.h"
6 #include "util/off_cpu.h"
7 #include "util/perf-hooks.h"
8 #include "util/record.h"
9 #include "util/session.h"
10 #include "util/target.h"
11 #include "util/cpumap.h"
12 #include "util/thread_map.h"
13 #include "util/cgroup.h"
14 #include "util/strlist.h"
15 #include <bpf/bpf.h>
16
17 #include "bpf_skel/off_cpu.skel.h"
18
19 #define MAX_STACKS 32
20 #define MAX_PROC 4096
21 /* we don't need actual timestamp, just want to put the samples at last */
22 #define OFF_CPU_TIMESTAMP (~0ull << 32)
23
24 static struct off_cpu_bpf *skel;
25
26 struct off_cpu_key {
27 u32 pid;
28 u32 tgid;
29 u32 stack_id;
30 u32 state;
31 u64 cgroup_id;
32 };
33
34 union off_cpu_data {
35 struct perf_event_header hdr;
36 u64 array[1024 / sizeof(u64)];
37 };
38
off_cpu_config(struct evlist * evlist)39 static int off_cpu_config(struct evlist *evlist)
40 {
41 struct evsel *evsel;
42 struct perf_event_attr attr = {
43 .type = PERF_TYPE_SOFTWARE,
44 .config = PERF_COUNT_SW_BPF_OUTPUT,
45 .size = sizeof(attr), /* to capture ABI version */
46 };
47 char *evname = strdup(OFFCPU_EVENT);
48
49 if (evname == NULL)
50 return -ENOMEM;
51
52 evsel = evsel__new(&attr);
53 if (!evsel) {
54 free(evname);
55 return -ENOMEM;
56 }
57
58 evsel->core.attr.freq = 1;
59 evsel->core.attr.sample_period = 1;
60 /* off-cpu analysis depends on stack trace */
61 evsel->core.attr.sample_type = PERF_SAMPLE_CALLCHAIN;
62
63 evlist__add(evlist, evsel);
64
65 free(evsel->name);
66 evsel->name = evname;
67
68 return 0;
69 }
70
off_cpu_start(void * arg)71 static void off_cpu_start(void *arg)
72 {
73 struct evlist *evlist = arg;
74
75 /* update task filter for the given workload */
76 if (!skel->bss->has_cpu && !skel->bss->has_task &&
77 perf_thread_map__pid(evlist->core.threads, 0) != -1) {
78 int fd;
79 u32 pid;
80 u8 val = 1;
81
82 skel->bss->has_task = 1;
83 skel->bss->uses_tgid = 1;
84 fd = bpf_map__fd(skel->maps.task_filter);
85 pid = perf_thread_map__pid(evlist->core.threads, 0);
86 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
87 }
88
89 skel->bss->enabled = 1;
90 }
91
off_cpu_finish(void * arg __maybe_unused)92 static void off_cpu_finish(void *arg __maybe_unused)
93 {
94 skel->bss->enabled = 0;
95 off_cpu_bpf__destroy(skel);
96 }
97
98 /* v5.18 kernel added prev_state arg, so it needs to check the signature */
check_sched_switch_args(void)99 static void check_sched_switch_args(void)
100 {
101 const struct btf *btf = bpf_object__btf(skel->obj);
102 const struct btf_type *t1, *t2, *t3;
103 u32 type_id;
104
105 type_id = btf__find_by_name_kind(btf, "btf_trace_sched_switch",
106 BTF_KIND_TYPEDEF);
107 if ((s32)type_id < 0)
108 return;
109
110 t1 = btf__type_by_id(btf, type_id);
111 if (t1 == NULL)
112 return;
113
114 t2 = btf__type_by_id(btf, t1->type);
115 if (t2 == NULL || !btf_is_ptr(t2))
116 return;
117
118 t3 = btf__type_by_id(btf, t2->type);
119 if (t3 && btf_is_func_proto(t3) && btf_vlen(t3) == 4) {
120 /* new format: pass prev_state as 4th arg */
121 skel->rodata->has_prev_state = true;
122 }
123 }
124
off_cpu_prepare(struct evlist * evlist,struct target * target,struct record_opts * opts)125 int off_cpu_prepare(struct evlist *evlist, struct target *target,
126 struct record_opts *opts)
127 {
128 int err, fd, i;
129 int ncpus = 1, ntasks = 1, ncgrps = 1;
130 struct strlist *pid_slist = NULL;
131 struct str_node *pos;
132
133 if (off_cpu_config(evlist) < 0) {
134 pr_err("Failed to config off-cpu BPF event\n");
135 return -1;
136 }
137
138 skel = off_cpu_bpf__open();
139 if (!skel) {
140 pr_err("Failed to open off-cpu BPF skeleton\n");
141 return -1;
142 }
143
144 /* don't need to set cpu filter for system-wide mode */
145 if (target->cpu_list) {
146 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
147 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
148 }
149
150 if (target->pid) {
151 pid_slist = strlist__new(target->pid, NULL);
152 if (!pid_slist) {
153 pr_err("Failed to create a strlist for pid\n");
154 return -1;
155 }
156
157 ntasks = 0;
158 strlist__for_each_entry(pos, pid_slist) {
159 char *end_ptr;
160 int pid = strtol(pos->s, &end_ptr, 10);
161
162 if (pid == INT_MIN || pid == INT_MAX ||
163 (*end_ptr != '\0' && *end_ptr != ','))
164 continue;
165
166 ntasks++;
167 }
168
169 if (ntasks < MAX_PROC)
170 ntasks = MAX_PROC;
171
172 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
173 } else if (target__has_task(target)) {
174 ntasks = perf_thread_map__nr(evlist->core.threads);
175 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
176 } else if (target__none(target)) {
177 bpf_map__set_max_entries(skel->maps.task_filter, MAX_PROC);
178 }
179
180 if (evlist__first(evlist)->cgrp) {
181 ncgrps = evlist->core.nr_entries - 1; /* excluding a dummy */
182 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
183
184 if (!cgroup_is_v2("perf_event"))
185 skel->rodata->uses_cgroup_v1 = true;
186 }
187
188 if (opts->record_cgroup) {
189 skel->rodata->needs_cgroup = true;
190
191 if (!cgroup_is_v2("perf_event"))
192 skel->rodata->uses_cgroup_v1 = true;
193 }
194
195 set_max_rlimit();
196 check_sched_switch_args();
197
198 err = off_cpu_bpf__load(skel);
199 if (err) {
200 pr_err("Failed to load off-cpu skeleton\n");
201 goto out;
202 }
203
204 if (target->cpu_list) {
205 u32 cpu;
206 u8 val = 1;
207
208 skel->bss->has_cpu = 1;
209 fd = bpf_map__fd(skel->maps.cpu_filter);
210
211 for (i = 0; i < ncpus; i++) {
212 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
213 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
214 }
215 }
216
217 if (target->pid) {
218 u8 val = 1;
219
220 skel->bss->has_task = 1;
221 skel->bss->uses_tgid = 1;
222 fd = bpf_map__fd(skel->maps.task_filter);
223
224 strlist__for_each_entry(pos, pid_slist) {
225 char *end_ptr;
226 u32 tgid;
227 int pid = strtol(pos->s, &end_ptr, 10);
228
229 if (pid == INT_MIN || pid == INT_MAX ||
230 (*end_ptr != '\0' && *end_ptr != ','))
231 continue;
232
233 tgid = pid;
234 bpf_map_update_elem(fd, &tgid, &val, BPF_ANY);
235 }
236 } else if (target__has_task(target)) {
237 u32 pid;
238 u8 val = 1;
239
240 skel->bss->has_task = 1;
241 fd = bpf_map__fd(skel->maps.task_filter);
242
243 for (i = 0; i < ntasks; i++) {
244 pid = perf_thread_map__pid(evlist->core.threads, i);
245 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
246 }
247 }
248
249 if (evlist__first(evlist)->cgrp) {
250 struct evsel *evsel;
251 u8 val = 1;
252
253 skel->bss->has_cgroup = 1;
254 fd = bpf_map__fd(skel->maps.cgroup_filter);
255
256 evlist__for_each_entry(evlist, evsel) {
257 struct cgroup *cgrp = evsel->cgrp;
258
259 if (cgrp == NULL)
260 continue;
261
262 if (!cgrp->id && read_cgroup_id(cgrp) < 0) {
263 pr_err("Failed to read cgroup id of %s\n",
264 cgrp->name);
265 goto out;
266 }
267
268 bpf_map_update_elem(fd, &cgrp->id, &val, BPF_ANY);
269 }
270 }
271
272 err = off_cpu_bpf__attach(skel);
273 if (err) {
274 pr_err("Failed to attach off-cpu BPF skeleton\n");
275 goto out;
276 }
277
278 if (perf_hooks__set_hook("record_start", off_cpu_start, evlist) ||
279 perf_hooks__set_hook("record_end", off_cpu_finish, evlist)) {
280 pr_err("Failed to attach off-cpu skeleton\n");
281 goto out;
282 }
283
284 return 0;
285
286 out:
287 off_cpu_bpf__destroy(skel);
288 return -1;
289 }
290
off_cpu_write(struct perf_session * session)291 int off_cpu_write(struct perf_session *session)
292 {
293 int bytes = 0, size;
294 int fd, stack;
295 u64 sample_type, val, sid = 0;
296 struct evsel *evsel;
297 struct perf_data_file *file = &session->data->file;
298 struct off_cpu_key prev, key;
299 union off_cpu_data data = {
300 .hdr = {
301 .type = PERF_RECORD_SAMPLE,
302 .misc = PERF_RECORD_MISC_USER,
303 },
304 };
305 u64 tstamp = OFF_CPU_TIMESTAMP;
306
307 skel->bss->enabled = 0;
308
309 evsel = evlist__find_evsel_by_str(session->evlist, OFFCPU_EVENT);
310 if (evsel == NULL) {
311 pr_err("%s evsel not found\n", OFFCPU_EVENT);
312 return 0;
313 }
314
315 sample_type = evsel->core.attr.sample_type;
316
317 if (sample_type & ~OFFCPU_SAMPLE_TYPES) {
318 pr_err("not supported sample type: %llx\n",
319 (unsigned long long)sample_type);
320 return -1;
321 }
322
323 if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) {
324 if (evsel->core.id)
325 sid = evsel->core.id[0];
326 }
327
328 fd = bpf_map__fd(skel->maps.off_cpu);
329 stack = bpf_map__fd(skel->maps.stacks);
330 memset(&prev, 0, sizeof(prev));
331
332 while (!bpf_map_get_next_key(fd, &prev, &key)) {
333 int n = 1; /* start from perf_event_header */
334 int ip_pos = -1;
335
336 bpf_map_lookup_elem(fd, &key, &val);
337
338 if (sample_type & PERF_SAMPLE_IDENTIFIER)
339 data.array[n++] = sid;
340 if (sample_type & PERF_SAMPLE_IP) {
341 ip_pos = n;
342 data.array[n++] = 0; /* will be updated */
343 }
344 if (sample_type & PERF_SAMPLE_TID)
345 data.array[n++] = (u64)key.pid << 32 | key.tgid;
346 if (sample_type & PERF_SAMPLE_TIME)
347 data.array[n++] = tstamp;
348 if (sample_type & PERF_SAMPLE_ID)
349 data.array[n++] = sid;
350 if (sample_type & PERF_SAMPLE_CPU)
351 data.array[n++] = 0;
352 if (sample_type & PERF_SAMPLE_PERIOD)
353 data.array[n++] = val;
354 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
355 int len = 0;
356
357 /* data.array[n] is callchain->nr (updated later) */
358 data.array[n + 1] = PERF_CONTEXT_USER;
359 data.array[n + 2] = 0;
360
361 bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
362 while (data.array[n + 2 + len])
363 len++;
364
365 /* update length of callchain */
366 data.array[n] = len + 1;
367
368 /* update sample ip with the first callchain entry */
369 if (ip_pos >= 0)
370 data.array[ip_pos] = data.array[n + 2];
371
372 /* calculate sample callchain data array length */
373 n += len + 2;
374 }
375 if (sample_type & PERF_SAMPLE_CGROUP)
376 data.array[n++] = key.cgroup_id;
377
378 size = n * sizeof(u64);
379 data.hdr.size = size;
380 bytes += size;
381
382 if (perf_data_file__write(file, &data, size) < 0) {
383 pr_err("failed to write perf data, error: %m\n");
384 return bytes;
385 }
386
387 prev = key;
388 /* increase dummy timestamp to sort later samples */
389 tstamp++;
390 }
391 return bytes;
392 }
393