Lines Matching refs:syscalls
152 } syscalls; member
2073 if (trace->syscalls.table == NULL) { in trace__read_syscall_info()
2074 trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc)); in trace__read_syscall_info()
2075 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2079 if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) { in trace__read_syscall_info()
2081 struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); in trace__read_syscall_info()
2087 if (trace->syscalls.table == NULL) in trace__read_syscall_info()
2090 …memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof… in trace__read_syscall_info()
2092 trace->syscalls.table = table; in trace__read_syscall_info()
2093 trace->sctbl->syscalls.max_id = id; in trace__read_syscall_info()
2096 sc = trace->syscalls.table + id; in trace__read_syscall_info()
2443 if (id > trace->sctbl->syscalls.max_id) {
2445 if (id >= trace->sctbl->syscalls.max_id) {
2457 if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
2461 if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
2464 return &trace->syscalls.table[id];
2470 if (id <= trace->sctbl->syscalls.max_id && trace->syscalls.table[id].name != NULL)
2471 fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
2660 if (evsel != trace->syscalls.events.sys_enter)
3130 if (evsel == trace->syscalls.events.bpf_output) {
3525 trace->syscalls.events.sys_enter = sys_enter;
3526 trace->syscalls.events.sys_exit = sys_exit;
3550 if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
3551 sys_exit = trace->syscalls.events.sys_exit;
3755 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3849 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3905 for (int i = 0; i < trace->sctbl->syscalls.nr_entries; ++i) {
3947 if (trace->syscalls.events.sys_enter)
4314 if (trace->syscalls.events.bpf_output) {
4321 perf_cpu_map__for_each_cpu(cpu, i, trace->syscalls.events.bpf_output->core.cpus) {
4324 xyarray__entry(trace->syscalls.events.bpf_output->core.fd,
4344 if (trace->syscalls.events.sys_exit) {
4346 trace->syscalls.events.sys_exit->filter);
4565 trace->syscalls.events.sys_enter = evsel;
4578 trace->syscalls.events.sys_exit = evsel;
4663 sc = &trace->syscalls.table[syscall_stats_entry->syscall];
5060 if (trace->syscalls.table) {
5061 for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
5062 syscall__exit(&trace->syscalls.table[i]);
5063 zfree(&trace->syscalls.table);
5314 trace.syscalls.events.bpf_output = evlist__last(trace.evlist);
5315 assert(evsel__name_is(trace.syscalls.events.bpf_output, "__augmented_syscalls__"));
5375 if (trace.syscalls.events.bpf_output) {
5384 if (trace.syscalls.events.bpf_output->priv == NULL &&
5386 struct evsel *augmented = trace.syscalls.events.bpf_output;