Lines Matching refs:trace
142 struct trace { struct
224 static void trace__load_vmlinux_btf(struct trace *trace __maybe_unused) in trace__load_vmlinux_btf() argument
227 if (trace->btf != NULL) in trace__load_vmlinux_btf()
230 trace->btf = btf__load_vmlinux_btf(); in trace__load_vmlinux_btf()
232 fprintf(trace->output, trace->btf ? "vmlinux BTF loaded\n" : in trace__load_vmlinux_btf()
951 struct btf *btf = arg->trace->btf; in syscall_arg__strtoul_btf_enum()
973 trace__load_vmlinux_btf(arg->trace); in syscall_arg__strtoul_btf_type()
975 btf = arg->trace->btf; in syscall_arg__strtoul_btf_type()
1040 dump_data_opts.skip_names = !arg->trace->show_arg_names; in btf_struct_scnprintf()
1059 static size_t trace__btf_scnprintf(struct trace *trace, struct syscall_arg *arg, char *bf, in trace__btf_scnprintf() argument
1064 if (trace->btf == NULL) in trace__btf_scnprintf()
1069 syscall_arg_fmt__cache_btf_enum(arg_fmt, trace->btf, type); in trace__btf_scnprintf()
1077 return btf_enum_scnprintf(arg_fmt->type, trace->btf, bf, size, val); in trace__btf_scnprintf()
1079 return btf_struct_scnprintf(arg_fmt->type, trace->btf, bf, size, arg); in trace__btf_scnprintf()
1085 static size_t trace__btf_scnprintf(struct trace *trace __maybe_unused, struct syscall_arg *arg __ma… in trace__btf_scnprintf()
1672 struct trace *trace) in thread__fd_path() argument
1676 if (ttrace == NULL || trace->fd_path_disabled) in thread__fd_path()
1683 if (!trace->live) in thread__fd_path()
1685 ++trace->stats.proc_getname; in thread__fd_path()
1697 const char *path = thread__fd_path(arg->thread, fd, arg->trace); in syscall_arg__scnprintf_fd()
1705 size_t pid__scnprintf_fd(struct trace *trace, pid_t pid, int fd, char *bf, size_t size) in pid__scnprintf_fd() argument
1708 struct thread *thread = machine__find_thread(trace->host, pid, pid); in pid__scnprintf_fd()
1711 const char *path = thread__fd_path(thread, fd, trace); in pid__scnprintf_fd()
1768 if (!arg->trace->vfs_getname) in syscall_arg__scnprintf_filename()
1801 static bool trace__filter_duration(struct trace *trace, double t) in trace__filter_duration() argument
1803 return t < (trace->duration_filter * NSEC_PER_MSEC); in trace__filter_duration()
1806 static size_t __trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in __trace__fprintf_tstamp() argument
1808 double ts = (double)(tstamp - trace->base_time) / NSEC_PER_MSEC; in __trace__fprintf_tstamp()
1819 static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp) in trace__fprintf_tstamp() argument
1822 return __trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_tstamp()
1843 static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp) in trace__fprintf_comm_tid() argument
1847 if (trace->multiple_threads) { in trace__fprintf_comm_tid()
1848 if (trace->show_comm) in trace__fprintf_comm_tid()
1856 static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thread, in trace__fprintf_entry_head() argument
1861 if (trace->show_tstamp) in trace__fprintf_entry_head()
1862 printed = trace__fprintf_tstamp(trace, tstamp, fp); in trace__fprintf_entry_head()
1863 if (trace->show_duration) in trace__fprintf_entry_head()
1865 return printed + trace__fprintf_comm_tid(trace, thread, fp); in trace__fprintf_entry_head()
1868 static int trace__process_event(struct trace *trace, struct machine *machine, in trace__process_event() argument
1875 color_fprintf(trace->output, PERF_COLOR_RED, in trace__process_event()
1892 struct trace *trace = container_of(tool, struct trace, tool); in trace__tool_process() local
1893 return trace__process_event(trace, machine, event, sample); in trace__tool_process()
1914 static int trace__symbols_init(struct trace *trace, struct evlist *evlist) in trace__symbols_init() argument
1921 trace->host = machine__new_host(); in trace__symbols_init()
1922 if (trace->host == NULL) in trace__symbols_init()
1927 err = trace_event__register_resolver(trace->host, trace__machine__resolve_kernel_addr); in trace__symbols_init()
1931 err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target, in trace__symbols_init()
1941 static void trace__symbols__exit(struct trace *trace) in trace__symbols__exit() argument
1943 machine__exit(trace->host); in trace__symbols__exit()
1944 trace->host = NULL; in trace__symbols__exit()
2065 static int trace__read_syscall_info(struct trace *trace, int id) in trace__read_syscall_info() argument
2069 const char *name = syscalltbl__name(trace->sctbl, id); in trace__read_syscall_info()
2073 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
2074 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
2075 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2079 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
2081 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
2087 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2090 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
2092 trace->syscalls.table = table; in trace__read_syscall_info()
2093 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
2096 sc = trace->syscalls.table + id; in trace__read_syscall_info()
2147 trace__load_vmlinux_btf(trace); in trace__read_syscall_info()
2171 static int trace__validate_ev_qualifier(struct trace *trace) in trace__validate_ev_qualifier() argument
2176 size_t nr_used = 0, nr_allocated = strlist__nr_entries(trace->ev_qualifier); in trace__validate_ev_qualifier()
2178 trace->ev_qualifier_ids.entries = malloc(nr_allocated * in trace__validate_ev_qualifier()
2179 sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2181 if (trace->ev_qualifier_ids.entries == NULL) { in trace__validate_ev_qualifier()
2183 trace->output); in trace__validate_ev_qualifier()
2188 strlist__for_each_entry(pos, trace->ev_qualifier) { in trace__validate_ev_qualifier()
2190 int id = syscalltbl__id(trace->sctbl, sc), match_next = -1; in trace__validate_ev_qualifier()
2193 id = syscalltbl__strglobmatch_first(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2208 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2213 id = syscalltbl__strglobmatch_next(trace->sctbl, sc, &match_next); in trace__validate_ev_qualifier()
2220 entries = realloc(trace->ev_qualifier_ids.entries, in trace__validate_ev_qualifier()
2221 nr_allocated * sizeof(trace->ev_qualifier_ids.entries[0])); in trace__validate_ev_qualifier()
2224 fputs("\nError:\t Not enough memory for parsing\n", trace->output); in trace__validate_ev_qualifier()
2227 trace->ev_qualifier_ids.entries = entries; in trace__validate_ev_qualifier()
2229 trace->ev_qualifier_ids.entries[nr_used++] = id; in trace__validate_ev_qualifier()
2233 trace->ev_qualifier_ids.nr = nr_used; in trace__validate_ev_qualifier()
2234 qsort(trace->ev_qualifier_ids.entries, nr_used, sizeof(int), intcmp); in trace__validate_ev_qualifier()
2240 zfree(&trace->ev_qualifier_ids.entries); in trace__validate_ev_qualifier()
2241 trace->ev_qualifier_ids.nr = 0; in trace__validate_ev_qualifier()
2245 static __maybe_unused bool trace__syscall_enabled(struct trace *trace, int id) in trace__syscall_enabled() argument
2249 if (trace->ev_qualifier_ids.nr == 0) in trace__syscall_enabled()
2252 in_ev_qualifier = bsearch(&id, trace->ev_qualifier_ids.entries, in trace__syscall_enabled()
2253 trace->ev_qualifier_ids.nr, sizeof(int), intcmp) != NULL; in trace__syscall_enabled()
2256 return !trace->not_ev_qualifier; in trace__syscall_enabled()
2258 return trace->not_ev_qualifier; in trace__syscall_enabled()
2314 struct trace *trace, struct thread *thread) in syscall__scnprintf_args() argument
2327 .trace = trace, in syscall__scnprintf_args()
2329 .show_string_prefix = trace->show_string_prefix, in syscall__scnprintf_args()
2364 if (val == 0 && !trace->show_zeros && in syscall__scnprintf_args()
2371 if (trace->show_arg_names) in syscall__scnprintf_args()
2376 if (trace->force_btf || default_scnprintf == NULL || default_scnprintf == SCA_PTR) { in syscall__scnprintf_args()
2377 btf_printed = trace__btf_scnprintf(trace, &arg, bf + printed, in syscall__scnprintf_args()
2411 typedef int (*tracepoint_handler)(struct trace *trace, struct evsel *evsel,
2415 static struct syscall *trace__syscall_info(struct trace *trace, argument
2434 fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
2443 if (id > trace->sctbl->syscalls.max_id) {
2445 if (id >= trace->sctbl->syscalls.max_id) {
2451 err = trace__read_syscall_info(trace, id);
2457 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2458 (err = trace__read_syscall_info(trace, id)) != 0)
2461 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2464 return &trace->syscalls.table[id];
2469 …fprintf(trace->output, "Problems reading syscall %d: %d (%s)", id, -err, str_error_r(-err, sbuf, s…
2470 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2471 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2472 fputs(" information\n", trace->output);
2537 static int trace__printf_interrupted_entry(struct trace *trace) argument
2543 if (trace->failure_only || trace->current == NULL)
2546 ttrace = thread__priv(trace->current);
2551 …printed = trace__fprintf_entry_head(trace, trace->current, 0, false, ttrace->entry_time, trace->o…
2552 printed += len = fprintf(trace->output, "%s)", ttrace->entry_str);
2554 if (len < trace->args_alignment - 4)
2555 printed += fprintf(trace->output, "%-*s", trace->args_alignment - 4 - len, " ");
2557 printed += fprintf(trace->output, " ...\n");
2560 ++trace->nr_events_printed;
2565 static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel, argument
2570 if (trace->print_sample) {
2573 printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
2616 static int trace__sys_enter(struct trace *trace, struct evsel *evsel, argument
2627 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2633 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2634 ttrace = thread__trace(thread, trace->output);
2638 trace__fprintf_sample(trace, evsel, sample, thread);
2648 if (!(trace->duration_filter || trace->summary_only || trace->min_stack))
2649 trace__printf_interrupted_entry(trace);
2660 if (evsel != trace->syscalls.events.sys_enter)
2661 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2667 args, augmented_args, augmented_args_size, trace, thread);
2670 if (!(trace->duration_filter || trace->summary_only || trace->failure_only || trace->min_stack)) {
2673 trace__fprintf_entry_head(trace, thread, 0, false, ttrace->entry_time, trace->output);
2674 printed = fprintf(trace->output, "%s)", ttrace->entry_str);
2675 if (trace->args_alignment > printed)
2676 alignment = trace->args_alignment - printed;
2677 fprintf(trace->output, "%*s= ?\n", alignment, " ");
2685 if (trace->current != thread) {
2686 thread__put(trace->current);
2687 trace->current = thread__get(thread);
2695 static int trace__fprintf_sys_enter(struct trace *trace, struct evsel *evsel, argument
2701 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2709 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2710 ttrace = thread__trace(thread, trace->output);
2719 …augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_sy…
2720 …syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, th…
2721 fprintf(trace->output, "%s", msg);
2728 static int trace__resolve_callchain(struct trace *trace, struct evsel *evsel, argument
2735 trace->max_stack;
2739 if (machine__resolve(trace->host, &al, sample) < 0)
2748 static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sample) argument
2755 …chain(sample, 38, print_opts, get_tls_callchain_cursor(), symbol_conf.bt_stop_list, trace->output);
2765 static int trace__sys_exit(struct trace *trace, struct evsel *evsel, argument
2774 int alignment = trace->args_alignment;
2775 struct syscall *sc = trace__syscall_info(trace, evsel, id);
2781 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2782 ttrace = thread__trace(thread, trace->output);
2786 trace__fprintf_sample(trace, evsel, sample, thread);
2790 if (trace->summary)
2791 thread__update_stats(thread, ttrace, id, sample, ret, trace->errno_summary);
2793 if (!trace->fd_path_disabled && sc->is_open && ret >= 0 && ttrace->filename.pending_open) {
2796 ++trace->stats.vfs_getname;
2801 if (trace__filter_duration(trace, duration))
2804 } else if (trace->duration_filter)
2810 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
2812 if (cursor->nr < trace->min_stack)
2818 if (trace->summary_only || (ret >= 0 && trace->failure_only))
2821 …trace__fprintf_entry_head(trace, thread, duration, duration_calculated, ttrace->entry_time, trace-…
2824 printed = fprintf(trace->output, "%s", ttrace->entry_str);
2826 printed += fprintf(trace->output, " ... [");
2827 color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
2829 printed += fprintf(trace->output, "]: %s()", sc->name);
2839 fprintf(trace->output, ")%*s= ", alignment, " ");
2845 fprintf(trace->output, "%ld", ret);
2852 fprintf(trace->output, "-1 %s (%s)", e, emsg);
2855 fprintf(trace->output, "0 (Timeout)");
2861 .trace = trace,
2865 fprintf(trace->output, "%s", bf);
2867 fprintf(trace->output, "%#lx", ret);
2869 struct thread *child = machine__find_thread(trace->host, ret, ret);
2872 fprintf(trace->output, "%ld", ret);
2874 fprintf(trace->output, " (%s)", thread__comm_str(child));
2880 fputc('\n', trace->output);
2886 if (++trace->nr_events_printed == trace->max_events && trace->max_events != ULONG_MAX)
2890 trace__fprintf_callchain(trace, sample);
2901 static int trace__vfs_getname(struct trace *trace, struct evsel *evsel, argument
2905 struct thread *thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
2962 static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel, argument
2968 struct thread *thread = machine__findnew_thread(trace->host,
2971 struct thread_trace *ttrace = thread__trace(thread, trace->output);
2977 trace->runtime_ms += runtime_ms;
2983 fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
3016 static void bpf_output__fprintf(struct trace *trace, argument
3020 bpf_output__printer, NULL, trace->output);
3021 ++trace->nr_events_printed;
3024 static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel, struct perf_sample… argument
3041 .trace = trace,
3043 .show_string_prefix = trace->show_string_prefix,
3073 if (val == 0 && !trace->show_zeros && !arg->show_zero && arg->strtoul != STUL_BTF_TYPE)
3078 if (trace->show_arg_names)
3081 …btf_printed = trace__btf_scnprintf(trace, &syscall_arg, bf + printed, size - printed, val, field->…
3090 return printed + fprintf(trace->output, "%s", bf);
3093 static int trace__event_handler(struct trace *trace, struct evsel *evsel, argument
3108 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3113 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3115 if (cursor->nr < trace->min_stack)
3121 trace__printf_interrupted_entry(trace);
3122 trace__fprintf_tstamp(trace, sample->time, trace->output);
3124 if (trace->trace_syscalls && trace->show_duration)
3125 fprintf(trace->output, "( ): ");
3128 trace__fprintf_comm_tid(trace, thread, trace->output);
3130 if (evsel == trace->syscalls.events.bpf_output) {
3132 struct syscall *sc = trace__syscall_info(trace, evsel, id);
3135 fprintf(trace->output, "%s(", sc->name);
3136 trace__fprintf_sys_enter(trace, evsel, sample);
3137 fputc(')', trace->output);
3148 fprintf(trace->output, "%s(", evsel->name);
3151 bpf_output__fprintf(trace, sample);
3154 trace__fprintf_sys_enter(trace, evsel, sample)) {
3155 if (trace->libtraceevent_print) {
3158 trace->output);
3160 trace__fprintf_tp_fields(trace, evsel, sample, thread, NULL, 0);
3166 fprintf(trace->output, ")\n");
3169 trace__fprintf_callchain(trace, sample);
3173 ++trace->nr_events_printed;
3201 static int trace__pgfault(struct trace *trace, argument
3214 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3219 callchain_ret = trace__resolve_callchain(trace, evsel, sample, cursor);
3221 if (cursor->nr < trace->min_stack)
3227 ttrace = thread__trace(thread, trace->output);
3236 if (trace->summary_only)
3241 trace__fprintf_entry_head(trace, thread, 0, true, sample->time, trace->output);
3243 fprintf(trace->output, "%sfault [",
3247 print_location(trace->output, sample, &al, false, true);
3249 fprintf(trace->output, "] => ");
3262 print_location(trace->output, sample, &al, true, false);
3264 fprintf(trace->output, " (%c%c)\n", map_type, al.level);
3267 trace__fprintf_callchain(trace, sample);
3271 ++trace->nr_events_printed;
3280 static void trace__set_base_time(struct trace *trace, argument
3292 if (trace->base_time == 0 && !trace->full_time &&
3294 trace->base_time = sample->time;
3303 struct trace *trace = container_of(tool, struct trace, tool); local
3309 thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
3313 trace__set_base_time(trace, evsel, sample);
3316 ++trace->nr_events;
3317 handler(trace, evsel, event, sample);
3324 static int trace__record(struct trace *trace, int argc, const char **argv) argument
3356 if (trace->trace_syscalls) {
3374 if (trace->trace_pgfaults & TRACE_PFMAJ)
3378 if (trace->trace_pgfaults & TRACE_PFMIN)
3392 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
3455 static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *s… argument
3461 trace__process_event(trace, trace->host, event, sample);
3465 evsel = evlist__id2evsel(trace->evlist, sample->id);
3467 fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
3471 if (evswitch__discard(&trace->evswitch, evsel))
3474 trace__set_base_time(trace, evsel, sample);
3478 …fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
3483 handler(trace, evsel, event, sample);
3486 if (trace->nr_events_printed >= trace->max_events && trace->max_events != ULONG_MAX)
3490 static int trace__add_syscall_newtp(struct trace *trace) argument
3493 struct evlist *evlist = trace->evlist;
3510 evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
3511 evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
3516 if (callchain_param.enabled && !trace->kernel_syscallchains) {
3525 trace->syscalls.events.sys_enter = sys_enter;
3526 trace->syscalls.events.sys_exit = sys_exit;
3539 static int trace__set_ev_qualifier_tp_filter(struct trace *trace) argument
3543 char *filter = asprintf_expr_inout_ints("id", !trace->not_ev_qualifier,
3544 trace->ev_qualifier_ids.nr,
3545 trace->ev_qualifier_ids.entries);
3550 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3551 sys_exit = trace->syscalls.events.sys_exit;
3581 static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name) argument
3586 if (trace->skel->obj == NULL)
3589 bpf_object__for_each_program(pos, trace->skel->obj) {
3600 static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc, argument
3608 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3613 prog = trace__find_bpf_program_by_title(trace, default_prog_name);
3620 prog = trace__find_bpf_program_by_title(trace, prog_name);
3630 return trace->skel->progs.syscall_unaugmented;
3633 static void trace__init_syscall_bpf_progs(struct trace *trace, int id) argument
3635 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3640 …sc->bpf_prog.sys_enter = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3641 …sc->bpf_prog.sys_exit = trace__find_syscall_bpf_prog(trace, sc, sc->fmt ? sc->fmt->bpf_prog_name.…
3644 static int trace__bpf_prog_sys_enter_fd(struct trace *trace, int id) argument
3646 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3647 …return sc ? bpf_program__fd(sc->bpf_prog.sys_enter) : bpf_program__fd(trace->skel->progs.syscall_u…
3650 static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) argument
3652 struct syscall *sc = trace__syscall_info(trace, NULL, id);
3653 …return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->skel->progs.syscall_un…
3656 static int trace__bpf_sys_enter_beauty_map(struct trace *trace, int key, unsigned int *beauty_array) argument
3659 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3668 trace__load_vmlinux_btf(trace);
3669 if (trace->btf == NULL)
3694 if (syscall_arg_fmt__cache_btf_struct(&sc->arg_fmt[i], trace->btf, name))
3741 static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *s… argument
3755 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3756 int id = syscalltbl__id_at_idx(trace->sctbl, i);
3757 struct syscall *pair = trace__syscall_info(trace, NULL, id);
3762 pair->bpf_prog.sys_enter == trace->skel->progs.syscall_unaugmented)
3827 …pair_prog = trace__find_syscall_bpf_prog(trace, pair, pair->fmt ? pair->fmt->bpf_prog_name.sys_ent…
3828 if (pair_prog == trace->skel->progs.syscall_unaugmented)
3841 static int trace__init_syscalls_bpf_prog_array_maps(struct trace *trace) argument
3843 int map_enter_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_enter);
3844 int map_exit_fd = bpf_map__fd(trace->skel->maps.syscalls_sys_exit);
3845 int beauty_map_fd = bpf_map__fd(trace->skel->maps.beauty_map_enter);
3849 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3850 int prog_fd, key = syscalltbl__id_at_idx(trace->sctbl, i);
3852 if (!trace__syscall_enabled(trace, key))
3855 trace__init_syscall_bpf_progs(trace, key);
3858 prog_fd = trace__bpf_prog_sys_enter_fd(trace, key);
3862 prog_fd = trace__bpf_prog_sys_exit_fd(trace, key);
3869 err = trace__bpf_sys_enter_beauty_map(trace, key, (unsigned int *)beauty_array);
3905 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3906 int key = syscalltbl__id_at_idx(trace->sctbl, i);
3907 struct syscall *sc = trace__syscall_info(trace, NULL, key);
3918 if (sc->bpf_prog.sys_enter != trace->skel->progs.syscall_unaugmented)
3925 pair_prog = trace__find_usable_bpf_prog_entry(trace, sc);
3945 static int trace__set_ev_qualifier_filter(struct trace *trace) argument
3947 if (trace->syscalls.events.sys_enter)
3948 return trace__set_ev_qualifier_tp_filter(trace);
3970 static int trace__set_filter_loop_pids(struct trace *trace) argument
3976 struct thread *thread = machine__find_thread(trace->host, pids[0], pids[0]);
3979 struct thread *parent = machine__find_thread(trace->host,
3994 err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
3995 if (!err && trace->filter_pids.map)
3996 err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
4001 static int trace__set_filter_pids(struct trace *trace) argument
4010 if (trace->filter_pids.nr > 0) {
4011 err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
4012 trace->filter_pids.entries);
4013 if (!err && trace->filter_pids.map) {
4014 err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
4015 trace->filter_pids.entries);
4017 } else if (perf_thread_map__pid(trace->evlist->core.threads, 0) == -1) {
4018 err = trace__set_filter_loop_pids(trace);
4024 static int __trace__deliver_event(struct trace *trace, union perf_event *event) argument
4026 struct evlist *evlist = trace->evlist;
4031 fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
4033 trace__handle_event(trace, event, &sample);
4038 static int __trace__flush_events(struct trace *trace) argument
4040 u64 first = ordered_events__first_time(&trace->oe.data);
4041 u64 flush = trace->oe.last - NSEC_PER_SEC;
4045 return ordered_events__flush_time(&trace->oe.data, flush);
4050 static int trace__flush_events(struct trace *trace) argument
4052 return !trace->sort_events ? 0 : __trace__flush_events(trace);
4055 static int trace__deliver_event(struct trace *trace, union perf_event *event) argument
4059 if (!trace->sort_events)
4060 return __trace__deliver_event(trace, event);
4062 err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
4066 err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
4070 return trace__flush_events(trace);
4076 struct trace *trace = container_of(oe, struct trace, oe.data); local
4078 return __trace__deliver_event(trace, event->event);
4099 static int trace__expand_filter(struct trace *trace, struct evsel *evsel) argument
4152 .trace = trace,
4200 static int trace__expand_filters(struct trace *trace, struct evsel **err_evsel) argument
4202 struct evlist *evlist = trace->evlist;
4209 if (trace__expand_filter(trace, evsel)) {
4218 static int trace__run(struct trace *trace, int argc, const char **argv) argument
4220 struct evlist *evlist = trace->evlist;
4227 trace->live = true;
4229 if (!trace->raw_augmented_syscalls) {
4230 if (trace->trace_syscalls && trace__add_syscall_newtp(trace))
4233 if (trace->trace_syscalls)
4234 trace->vfs_getname = evlist__add_vfs_getname(evlist);
4237 if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
4241 evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
4245 if ((trace->trace_pgfaults & TRACE_PFMIN)) {
4249 evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
4254 trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
4256 if (trace->sched &&
4284 if (trace->cgroup)
4285 evlist__set_default_cgroup(trace->evlist, trace->cgroup);
4287 err = evlist__create_maps(evlist, &trace->opts.target);
4289 fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
4293 err = trace__symbols_init(trace, evlist);
4295 fprintf(trace->output, "Problems initializing symbol libraries!\n");
4299 evlist__config(evlist, &trace->opts, &callchain_param);
4302 err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
4304 fprintf(trace->output, "Couldn't run the workload!\n");
4314 if (trace->syscalls.events.bpf_output) {
4321 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
4322 bpf_map__update_elem(trace->skel->maps.__augmented_syscalls__,
4324 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
4330 err = trace__set_filter_pids(trace);
4335 if (trace->skel && trace->skel->progs.sys_enter)
4336 trace__init_syscalls_bpf_prog_array_maps(trace);
4339 if (trace->ev_qualifier_ids.nr > 0) {
4340 err = trace__set_ev_qualifier_filter(trace);
4344 if (trace->syscalls.events.sys_exit) {
4346 trace->syscalls.events.sys_exit->filter);
4361 trace->fd_path_disabled = !trace__syscall_enabled(trace, syscalltbl__id(trace->sctbl, "close"));
4363 err = trace__expand_filters(trace, &evsel);
4366 err = evlist__apply_filters(evlist, &evsel, &trace->opts.target);
4370 err = evlist__mmap(evlist, trace->opts.mmap_pages);
4374 if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
4380 if (trace->opts.target.initial_delay) {
4381 usleep(trace->opts.target.initial_delay * 1000);
4385 trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
4398 evsel->core.attr.sample_max_stack = trace->max_stack;
4401 before = trace->nr_events;
4412 ++trace->nr_events;
4414 err = trace__deliver_event(trace, event);
4431 if (trace->nr_events == before) {
4440 if (trace__flush_events(trace))
4448 thread__zput(trace->current);
4452 if (trace->sort_events)
4453 ordered_events__flush(&trace->oe.data, OE_FLUSH__FINAL);
4456 if (trace->summary)
4457 trace__fprintf_thread_summary(trace, trace->output);
4459 if (trace->show_tool_stats) {
4460 fprintf(trace->output, "Stats:\n "
4463 trace->stats.vfs_getname,
4464 trace->stats.proc_getname);
4469 trace__symbols__exit(trace);
4472 cgroup__put(trace->cgroup);
4473 trace->evlist = NULL;
4474 trace->live = false;
4495 fprintf(trace->output, "%s\n", errbuf);
4499 fprintf(trace->output,
4506 fprintf(trace->output, "Not enough memory to run!\n");
4510 fprintf(trace->output, "errno=%d,%s\n", errno, strerror(errno));
4514 static int trace__replay(struct trace *trace) argument
4522 .force = trace->force,
4528 trace->tool.sample = trace__process_sample;
4529 trace->tool.mmap = perf_event__process_mmap;
4530 trace->tool.mmap2 = perf_event__process_mmap2;
4531 trace->tool.comm = perf_event__process_comm;
4532 trace->tool.exit = perf_event__process_exit;
4533 trace->tool.fork = perf_event__process_fork;
4534 trace->tool.attr = perf_event__process_attr;
4535 trace->tool.tracing_data = perf_event__process_tracing_data;
4536 trace->tool.build_id = perf_event__process_build_id;
4537 trace->tool.namespaces = perf_event__process_namespaces;
4539 trace->tool.ordered_events = true;
4540 trace->tool.ordering_requires_timestamps = true;
4543 trace->multiple_threads = true;
4545 session = perf_session__new(&data, &trace->tool);
4549 if (trace->opts.target.pid)
4550 symbol_conf.pid_list_str = strdup(trace->opts.target.pid);
4552 if (trace->opts.target.tid)
4553 symbol_conf.tid_list_str = strdup(trace->opts.target.tid);
4558 trace->host = &session->machines.host;
4565 trace->syscalls.events.sys_enter = evsel;
4578 trace->syscalls.events.sys_exit = evsel;
4602 else if (trace->summary)
4603 trace__fprintf_thread_summary(trace, trace->output);
4635 struct trace *trace, FILE *fp) argument
4663 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
4669 if (trace->errno_summary && stats->nr_failures) {
4674 …fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]…
4686 static size_t trace__fprintf_thread(FILE *fp, struct thread *thread, struct trace *trace) argument
4695 ratio = (double)ttrace->nr_events / trace->nr_events * 100.0;
4704 if (trace->sched)
4709 printed += thread__dump_stats(ttrace, trace, fp);
4737 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp) argument
4742 if (machine__thread_list(trace->host, &threads) == 0) {
4748 printed += trace__fprintf_thread(fp, pos->thread, trace);
4757 struct trace *trace = opt->value; local
4759 trace->duration_filter = atof(str);
4768 struct trace *trace = opt->value; local
4778 i = trace->filter_pids.nr = intlist__nr_entries(list) + 1;
4779 trace->filter_pids.entries = calloc(i, sizeof(pid_t));
4781 if (trace->filter_pids.entries == NULL)
4784 trace->filter_pids.entries[0] = getpid();
4786 for (i = 1; i < trace->filter_pids.nr; ++i)
4787 trace->filter_pids.entries[i] = intlist__entry(list, i - 1)->i;
4795 static int trace__open_output(struct trace *trace, const char *filename) argument
4807 trace->output = fopen(filename, "w");
4809 return trace->output == NULL ? -errno : 0;
4905 struct trace *trace = (struct trace *)opt->value; local
4918 trace->not_ev_qualifier = true;
4926 if (syscalltbl__id(trace->sctbl, s) >= 0 ||
4927 syscalltbl__strglobmatch_first(trace->sctbl, s, &idx) >= 0) {
4963 trace->ev_qualifier = strlist__new(lists[1], &slist_config);
4964 if (trace->ev_qualifier == NULL) {
4965 fputs("Not enough memory to parse event qualifier", trace->output);
4969 if (trace__validate_ev_qualifier(trace))
4971 trace->trace_syscalls = true;
4978 .evlistp = &trace->evlist,
4997 struct trace *trace = opt->value; local
4999 if (!list_empty(&trace->evlist->core.entries)) {
5001 .value = &trace->evlist,
5005 trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
5012 struct trace *trace = arg; local
5016 trace->perfconfig_events = strdup(value);
5017 if (trace->perfconfig_events == NULL) {
5022 trace->show_tstamp = perf_config_bool(var, value);
5024 trace->show_duration = perf_config_bool(var, value);
5026 trace->show_arg_names = perf_config_bool(var, value);
5027 if (!trace->show_arg_names)
5028 trace->show_zeros = true;
5031 if (!trace->show_arg_names && !new_show_zeros) {
5035 trace->show_zeros = new_show_zeros;
5037 trace->show_string_prefix = perf_config_bool(var, value);
5039 trace->opts.no_inherit = perf_config_bool(var, value);
5043 trace->args_alignment = args_alignment;
5046 trace->libtraceevent_print = true;
5048 trace->libtraceevent_print = false;
5054 static void trace__exit(struct trace *trace) argument
5058 strlist__delete(trace->ev_qualifier);
5059 zfree(&trace->ev_qualifier_ids.entries);
5060 if (trace->syscalls.table) {
5061 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
5062 syscall__exit(&trace->syscalls.table[i]);
5063 zfree(&trace->syscalls.table);
5065 syscalltbl__delete(trace->sctbl);
5066 zfree(&trace->perfconfig_events);
5090 struct trace trace = { local
5114 OPT_CALLBACK('e', "event", &trace, "event",
5117 OPT_CALLBACK(0, "filter", &trace.evlist, "filter",
5119 OPT_BOOLEAN(0, "comm", &trace.show_comm,
5121 OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
5122 OPT_CALLBACK(0, "expr", &trace, "expr", "list of syscalls/events to trace",
5126 OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
5128 OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
5130 OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
5132 OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
5134 OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
5136 OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
5138 OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
5140 OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
5142 OPT_CALLBACK(0, "duration", &trace, "float",
5145 OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
5147 OPT_BOOLEAN('T', "time", &trace.full_time,
5149 OPT_BOOLEAN(0, "failure", &trace.failure_only,
5151 OPT_BOOLEAN('s', "summary", &trace.summary_only,
5153 OPT_BOOLEAN('S', "with-summary", &trace.summary,
5155 OPT_BOOLEAN(0, "errno-summary", &trace.errno_summary,
5157 OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
5159 OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
5160 OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
5161 OPT_CALLBACK(0, "call-graph", &trace.opts,
5164 OPT_BOOLEAN(0, "libtraceevent_print", &trace.libtraceevent_print,
5166 OPT_BOOLEAN(0, "kernel-syscall-graph", &trace.kernel_syscallchains,
5168 OPT_ULONG(0, "max-events", &trace.max_events,
5170 OPT_UINTEGER(0, "min-stack", &trace.min_stack,
5173 OPT_UINTEGER(0, "max-stack", &trace.max_stack,
5177 OPT_BOOLEAN(0, "sort-events", &trace.sort_events,
5179 OPT_BOOLEAN(0, "print-sample", &trace.print_sample,
5183 OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
5185 OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
5188 OPT_BOOLEAN(0, "force-btf", &trace.force_btf, "Prefer btf_dump general pretty printer"
5190 OPTS_EVSWITCH(&trace.evswitch),
5210 trace.evlist = evlist__new();
5211 trace.sctbl = syscalltbl__new();
5213 if (trace.evlist == NULL || trace.sctbl == NULL) {
5228 err = perf_config(trace__config, &trace);
5246 if (!trace.trace_syscalls && !trace.trace_pgfaults &&
5247 trace.evlist->core.nr_entries == 0 /* Was --events used? */) {
5248 trace.trace_syscalls = true;
5256 if (trace.perfconfig_events != NULL) {
5260 err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
5262 parse_events_error__print(&parse_err, trace.perfconfig_events);
5268 if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
5274 if (!trace.trace_syscalls)
5282 trace.skel = augmented_raw_syscalls_bpf__open();
5283 if (!trace.skel) {
5292 bpf_object__for_each_program(prog, trace.skel->obj) {
5293 if (prog != trace.skel->progs.sys_enter && prog != trace.skel->progs.sys_exit)
5297 err = augmented_raw_syscalls_bpf__load(trace.skel);
5303 augmented_raw_syscalls_bpf__attach(trace.skel);
5304 trace__add_syscall_newtp(&trace);
5308 err = bpf__setup_bpf_output(trace.evlist);
5314 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
5315 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
5320 if (trace.trace_pgfaults) {
5321 trace.opts.sample_address = true;
5322 trace.opts.sample_time = true;
5325 if (trace.opts.mmap_pages == UINT_MAX)
5328 if (trace.max_stack == UINT_MAX) {
5329 trace.max_stack = input_name ? PERF_MAX_STACK_DEPTH : sysctl__max_stack();
5334 if ((trace.min_stack || max_stack_user_set) && !callchain_param.enabled) {
5335 record_opts__parse_callchain(&trace.opts, &callchain_param, "dwarf", false);
5341 trace.opts.mmap_pages = perf_event_mlock_kb_in_pages() * 4;
5346 if (trace.evlist->core.nr_entries > 0) {
5349 evlist__set_default_evsel_handler(trace.evlist, trace__event_handler);
5350 if (evlist__set_syscall_tp_fields(trace.evlist, &use_btf)) {
5356 trace__load_vmlinux_btf(&trace);
5359 if (trace.sort_events) {
5360 ordered_events__init(&trace.oe.data, ordered_events__deliver_event, &trace);
5361 ordered_events__set_copy_on_queue(&trace.oe.data, true);
5375 if (trace.syscalls.events.bpf_output) {
5376 evlist__for_each_entry(trace.evlist, evsel) {
5380 trace.raw_augmented_syscalls = true;
5384 if (trace.syscalls.events.bpf_output->priv == NULL &&
5386 struct evsel *augmented = trace.syscalls.events.bpf_output;
5433 if (trace.raw_augmented_syscalls)
5434 trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
5442 return trace__record(&trace, argc-1, &argv[1]);
5445 if (trace.errno_summary && !trace.summary && !trace.summary_only)
5446 trace.summary_only = true;
5449 if (trace.summary_only)
5450 trace.summary = trace.summary_only;
5453 err = trace__open_output(&trace, output_name);
5460 err = evswitch__init(&trace.evswitch, trace.evlist, stderr);
5464 err = target__validate(&trace.opts.target);
5466 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5467 fprintf(trace.output, "%s", bf);
5471 err = target__parse_uid(&trace.opts.target);
5473 target__strerror(&trace.opts.target, err, bf, sizeof(bf));
5474 fprintf(trace.output, "%s", bf);
5478 if (!argc && target__none(&trace.opts.target))
5479 trace.opts.target.system_wide = true;
5482 err = trace__replay(&trace);
5484 err = trace__run(&trace, argc, argv);
5488 fclose(trace.output);
5490 trace__exit(&trace);
5492 augmented_raw_syscalls_bpf__destroy(trace.skel);