Lines Matching refs:rec

126 static bool switch_output_signal(struct record *rec)  in switch_output_signal()  argument
128 return rec->switch_output.signal && in switch_output_signal()
132 static bool switch_output_size(struct record *rec) in switch_output_size() argument
134 return rec->switch_output.size && in switch_output_size()
136 (rec->bytes_written >= rec->switch_output.size); in switch_output_size()
139 static bool switch_output_time(struct record *rec) in switch_output_time() argument
141 return rec->switch_output.time && in switch_output_time()
145 static bool record__output_max_size_exceeded(struct record *rec) in record__output_max_size_exceeded() argument
147 return rec->output_max_size && in record__output_max_size_exceeded()
148 (rec->bytes_written >= rec->output_max_size); in record__output_max_size_exceeded()
151 static int record__write(struct record *rec, struct mmap *map __maybe_unused, in record__write() argument
154 struct perf_data_file *file = &rec->session->data->file; in record__write()
161 rec->bytes_written += size; in record__write()
163 if (record__output_max_size_exceeded(rec) && !done) { in record__write()
166 rec->bytes_written >> 10); in record__write()
170 if (switch_output_size(rec)) in record__write()
176 static int record__aio_enabled(struct record *rec);
177 static int record__comp_enabled(struct record *rec);
289 struct record *rec; member
312 if (record__comp_enabled(aio->rec)) { in record__aio_pushfn()
313 size = zstd_compress(aio->rec->session, aio->data + aio->size, in record__aio_pushfn()
339 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off) in record__aio_push() argument
342 int trace_fd = rec->session->data->file.fd; in record__aio_push()
343 struct record_aio aio = { .rec = rec, .size = 0 }; in record__aio_push()
356 rec->samples++; in record__aio_push()
360 rec->bytes_written += aio.size; in record__aio_push()
361 if (switch_output_size(rec)) in record__aio_push()
386 static void record__aio_mmap_read_sync(struct record *rec) in record__aio_mmap_read_sync() argument
389 struct evlist *evlist = rec->evlist; in record__aio_mmap_read_sync()
392 if (!record__aio_enabled(rec)) in record__aio_mmap_read_sync()
426 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused, in record__aio_push()
441 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused) in record__aio_mmap_read_sync()
446 static int record__aio_enabled(struct record *rec) in record__aio_enabled() argument
448 return rec->opts.nr_cblocks > 0; in record__aio_enabled()
507 static int record__comp_enabled(struct record *rec) in record__comp_enabled() argument
509 return rec->opts.comp_level > 0; in record__comp_enabled()
517 struct record *rec = container_of(tool, struct record, tool); in process_synthesized_event() local
518 return record__write(rec, NULL, event, event->header.size); in process_synthesized_event()
537 struct record *rec = to; in record__pushfn() local
539 if (record__comp_enabled(rec)) { in record__pushfn()
540 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size); in record__pushfn()
544 rec->samples++; in record__pushfn()
545 return record__write(rec, map, bf, size); in record__pushfn()
601 struct record *rec = container_of(tool, struct record, tool); in record__process_auxtrace() local
602 struct perf_data *data = &rec->data; in record__process_auxtrace()
614 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index, in record__process_auxtrace()
625 record__write(rec, map, event, event->header.size); in record__process_auxtrace()
626 record__write(rec, map, data1, len1); in record__process_auxtrace()
628 record__write(rec, map, data2, len2); in record__process_auxtrace()
629 record__write(rec, map, &pad, padding); in record__process_auxtrace()
634 static int record__auxtrace_mmap_read(struct record *rec, in record__auxtrace_mmap_read() argument
639 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read()
645 rec->samples++; in record__auxtrace_mmap_read()
650 static int record__auxtrace_mmap_read_snapshot(struct record *rec, in record__auxtrace_mmap_read_snapshot() argument
655 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool, in record__auxtrace_mmap_read_snapshot()
657 rec->opts.auxtrace_snapshot_size); in record__auxtrace_mmap_read_snapshot()
662 rec->samples++; in record__auxtrace_mmap_read_snapshot()
667 static int record__auxtrace_read_snapshot_all(struct record *rec) in record__auxtrace_read_snapshot_all() argument
672 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) { in record__auxtrace_read_snapshot_all()
673 struct mmap *map = &rec->evlist->mmap[i]; in record__auxtrace_read_snapshot_all()
678 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) { in record__auxtrace_read_snapshot_all()
687 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit) in record__read_auxtrace_snapshot() argument
690 if (record__auxtrace_read_snapshot_all(rec) < 0) { in record__read_auxtrace_snapshot()
693 if (auxtrace_record__snapshot_finish(rec->itr, on_exit)) in record__read_auxtrace_snapshot()
700 static int record__auxtrace_snapshot_exit(struct record *rec) in record__auxtrace_snapshot_exit() argument
706 auxtrace_record__snapshot_start(rec->itr)) in record__auxtrace_snapshot_exit()
709 record__read_auxtrace_snapshot(rec, true); in record__auxtrace_snapshot_exit()
716 static int record__auxtrace_init(struct record *rec) in record__auxtrace_init() argument
720 if (!rec->itr) { in record__auxtrace_init()
721 rec->itr = auxtrace_record__init(rec->evlist, &err); in record__auxtrace_init()
726 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts, in record__auxtrace_init()
727 rec->opts.auxtrace_snapshot_opts); in record__auxtrace_init()
731 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts, in record__auxtrace_init()
732 rec->opts.auxtrace_sample_opts); in record__auxtrace_init()
736 auxtrace_regroup_aux_output(rec->evlist); in record__auxtrace_init()
738 return auxtrace_parse_filters(rec->evlist); in record__auxtrace_init()
744 int record__auxtrace_mmap_read(struct record *rec __maybe_unused, in record__auxtrace_mmap_read()
751 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused, in record__read_auxtrace_snapshot()
763 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused) in record__auxtrace_snapshot_exit()
768 static int record__auxtrace_init(struct record *rec __maybe_unused) in record__auxtrace_init()
843 static int record__mmap_evlist(struct record *rec, in record__mmap_evlist() argument
846 struct record_opts *opts = &rec->opts; in record__mmap_evlist()
879 static int record__mmap(struct record *rec) in record__mmap() argument
881 return record__mmap_evlist(rec, rec->evlist); in record__mmap()
884 static int record__open(struct record *rec) in record__open() argument
888 struct evlist *evlist = rec->evlist; in record__open()
889 struct perf_session *session = rec->session; in record__open()
890 struct record_opts *opts = &rec->opts; in record__open()
964 rc = record__mmap(rec); in record__open()
974 static void set_timestamp_boundary(struct record *rec, u64 sample_time) in set_timestamp_boundary() argument
976 if (rec->evlist->first_sample_time == 0) in set_timestamp_boundary()
977 rec->evlist->first_sample_time = sample_time; in set_timestamp_boundary()
980 rec->evlist->last_sample_time = sample_time; in set_timestamp_boundary()
989 struct record *rec = container_of(tool, struct record, tool); in process_sample_event() local
991 set_timestamp_boundary(rec, sample->time); in process_sample_event()
993 if (rec->buildid_all) in process_sample_event()
996 rec->samples++; in process_sample_event()
1000 static int process_buildids(struct record *rec) in process_buildids() argument
1002 struct perf_session *session = rec->session; in process_buildids()
1004 if (perf_data__size(&rec->data) == 0) in process_buildids()
1024 if (rec->buildid_all && !rec->timestamp_boundary) in process_buildids()
1025 rec->tool.sample = NULL; in process_buildids()
1064 static void record__adjust_affinity(struct record *rec, struct mmap *map) in record__adjust_affinity() argument
1066 if (rec->opts.affinity != PERF_AFFINITY_SYS && in record__adjust_affinity()
1067 !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits, in record__adjust_affinity()
1068 rec->affinity_mask.nbits)) { in record__adjust_affinity()
1069 bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits); in record__adjust_affinity()
1070 bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits, in record__adjust_affinity()
1071 map->affinity_mask.bits, rec->affinity_mask.nbits); in record__adjust_affinity()
1072 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask), in record__adjust_affinity()
1073 (cpu_set_t *)rec->affinity_mask.bits); in record__adjust_affinity()
1075 mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread"); in record__adjust_affinity()
1110 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist, in record__mmap_read_evlist() argument
1113 u64 bytes_written = rec->bytes_written; in record__mmap_read_evlist()
1117 int trace_fd = rec->data.file.fd; in record__mmap_read_evlist()
1130 if (record__aio_enabled(rec)) in record__mmap_read_evlist()
1138 record__adjust_affinity(rec, map); in record__mmap_read_evlist()
1143 if (!record__aio_enabled(rec)) { in record__mmap_read_evlist()
1144 if (perf_mmap__push(map, rec, record__pushfn) < 0) { in record__mmap_read_evlist()
1151 if (record__aio_push(rec, map, &off) < 0) { in record__mmap_read_evlist()
1163 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && in record__mmap_read_evlist()
1164 !rec->opts.auxtrace_sample_mode && in record__mmap_read_evlist()
1165 record__auxtrace_mmap_read(rec, map) != 0) { in record__mmap_read_evlist()
1171 if (record__aio_enabled(rec)) in record__mmap_read_evlist()
1178 if (bytes_written != rec->bytes_written) in record__mmap_read_evlist()
1179 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event)); in record__mmap_read_evlist()
1187 static int record__mmap_read_all(struct record *rec, bool synch) in record__mmap_read_all() argument
1191 err = record__mmap_read_evlist(rec, rec->evlist, false, synch); in record__mmap_read_all()
1195 return record__mmap_read_evlist(rec, rec->evlist, true, synch); in record__mmap_read_all()
1198 static void record__init_features(struct record *rec) in record__init_features() argument
1200 struct perf_session *session = rec->session; in record__init_features()
1206 if (rec->no_buildid) in record__init_features()
1209 if (!have_tracepoints(&rec->evlist->core.entries)) in record__init_features()
1212 if (!rec->opts.branch_stack) in record__init_features()
1215 if (!rec->opts.full_auxtrace) in record__init_features()
1218 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns)) in record__init_features()
1221 if (!rec->opts.use_clockid) in record__init_features()
1225 if (!record__comp_enabled(rec)) in record__init_features()
1232 record__finish_output(struct record *rec) in record__finish_output() argument
1234 struct perf_data *data = &rec->data; in record__finish_output()
1240 rec->session->header.data_size += rec->bytes_written; in record__finish_output()
1243 if (!rec->no_buildid) { in record__finish_output()
1244 process_buildids(rec); in record__finish_output()
1246 if (rec->buildid_all) in record__finish_output()
1247 dsos__hit_all(rec->session); in record__finish_output()
1249 perf_session__write_header(rec->session, rec->evlist, fd, true); in record__finish_output()
1254 static int record__synthesize_workload(struct record *rec, bool tail) in record__synthesize_workload() argument
1258 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize_workload()
1260 if (rec->opts.tail_synthesize != tail) in record__synthesize_workload()
1263 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid); in record__synthesize_workload()
1267 err = perf_event__synthesize_thread_map(&rec->tool, thread_map, in record__synthesize_workload()
1269 &rec->session->machines.host, in record__synthesize_workload()
1271 rec->opts.sample_address); in record__synthesize_workload()
1276 static int record__synthesize(struct record *rec, bool tail);
1279 record__switch_output(struct record *rec, bool at_exit) in record__switch_output() argument
1281 struct perf_data *data = &rec->data; in record__switch_output()
1288 record__aio_mmap_read_sync(rec); in record__switch_output()
1290 record__synthesize(rec, true); in record__switch_output()
1291 if (target__none(&rec->opts.target)) in record__switch_output()
1292 record__synthesize_workload(rec, true); in record__switch_output()
1294 rec->samples = 0; in record__switch_output()
1295 record__finish_output(rec); in record__switch_output()
1303 rec->session->header.data_offset, in record__switch_output()
1306 rec->bytes_written = 0; in record__switch_output()
1307 rec->session->header.data_size = 0; in record__switch_output()
1314 if (rec->switch_output.num_files) { in record__switch_output()
1315 int n = rec->switch_output.cur_file + 1; in record__switch_output()
1317 if (n >= rec->switch_output.num_files) in record__switch_output()
1319 rec->switch_output.cur_file = n; in record__switch_output()
1320 if (rec->switch_output.filenames[n]) { in record__switch_output()
1321 remove(rec->switch_output.filenames[n]); in record__switch_output()
1322 zfree(&rec->switch_output.filenames[n]); in record__switch_output()
1324 rec->switch_output.filenames[n] = new_filename; in record__switch_output()
1331 record__synthesize(rec, false); in record__switch_output()
1342 if (target__none(&rec->opts.target)) in record__switch_output()
1343 record__synthesize_workload(rec, false); in record__switch_output()
1378 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec) in record__pick_pc() argument
1380 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist); in record__pick_pc()
1386 static int record__synthesize(struct record *rec, bool tail) in record__synthesize() argument
1388 struct perf_session *session = rec->session; in record__synthesize()
1390 struct perf_data *data = &rec->data; in record__synthesize()
1391 struct record_opts *opts = &rec->opts; in record__synthesize()
1392 struct perf_tool *tool = &rec->tool; in record__synthesize()
1396 if (rec->opts.tail_synthesize != tail) in record__synthesize()
1405 rec->bytes_written += err; in record__synthesize()
1408 err = perf_event__synth_time_conv(record__pick_pc(rec), tool, in record__synthesize()
1414 if (rec->opts.auxtrace_sample_mode || rec->opts.full_auxtrace) { in record__synthesize()
1422 if (rec->opts.full_auxtrace) { in record__synthesize()
1423 err = perf_event__synthesize_auxtrace_info(rec->itr, tool, in record__synthesize()
1429 if (!evlist__exclude_kernel(rec->evlist)) { in record__synthesize()
1448 err = perf_event__synthesize_extra_attr(&rec->tool, in record__synthesize()
1449 rec->evlist, in record__synthesize()
1455 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads, in record__synthesize()
1463 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus, in record__synthesize()
1475 if (rec->opts.synth & PERF_SYNTH_CGROUP) { in record__synthesize()
1482 if (rec->opts.nr_threads_synthesize > 1) { in record__synthesize()
1487 if (rec->opts.synth & PERF_SYNTH_TASK) { in record__synthesize()
1488 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP; in record__synthesize()
1491 rec->evlist->core.threads, in record__synthesize()
1493 rec->opts.nr_threads_synthesize); in record__synthesize()
1496 if (rec->opts.nr_threads_synthesize > 1) in record__synthesize()
1505 struct record *rec = data; in record__process_signal_event() local
1506 pthread_kill(rec->thread_id, SIGUSR2); in record__process_signal_event()
1510 static int record__setup_sb_evlist(struct record *rec) in record__setup_sb_evlist() argument
1512 struct record_opts *opts = &rec->opts; in record__setup_sb_evlist()
1514 if (rec->sb_evlist != NULL) { in record__setup_sb_evlist()
1520 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec); in record__setup_sb_evlist()
1521 rec->thread_id = pthread_self(); in record__setup_sb_evlist()
1525 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
1526 rec->sb_evlist = evlist__new(); in record__setup_sb_evlist()
1528 if (rec->sb_evlist == NULL) { in record__setup_sb_evlist()
1534 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) { in record__setup_sb_evlist()
1540 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) { in record__setup_sb_evlist()
1548 static int record__init_clock(struct record *rec) in record__init_clock() argument
1550 struct perf_session *session = rec->session; in record__init_clock()
1555 if (!rec->opts.use_clockid) in record__init_clock()
1558 if (rec->opts.use_clockid && rec->opts.clockid_res_ns) in record__init_clock()
1559 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns; in record__init_clock()
1561 session->header.env.clock.clockid = rec->opts.clockid; in record__init_clock()
1568 if (clock_gettime(rec->opts.clockid, &ref_clockid)) { in record__init_clock()
1585 static void hit_auxtrace_snapshot_trigger(struct record *rec) in hit_auxtrace_snapshot_trigger() argument
1590 if (auxtrace_record__snapshot_start(rec->itr)) in hit_auxtrace_snapshot_trigger()
1595 static void record__uniquify_name(struct record *rec) in record__uniquify_name() argument
1598 struct evlist *evlist = rec->evlist; in record__uniquify_name()
1621 static int __cmd_record(struct record *rec, int argc, const char **argv) in __cmd_record() argument
1627 struct perf_tool *tool = &rec->tool; in __cmd_record()
1628 struct record_opts *opts = &rec->opts; in __cmd_record()
1629 struct perf_data *data = &rec->data; in __cmd_record()
1642 if (rec->opts.record_namespaces) in __cmd_record()
1645 if (rec->opts.record_cgroup) { in __cmd_record()
1654 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) { in __cmd_record()
1656 if (rec->opts.auxtrace_snapshot_mode) in __cmd_record()
1658 if (rec->switch_output.enabled) in __cmd_record()
1671 rec->session = session; in __cmd_record()
1673 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) { in __cmd_record()
1684 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd); in __cmd_record()
1693 session->header.env.comp_level = rec->opts.comp_level; in __cmd_record()
1695 if (rec->opts.kcore && in __cmd_record()
1701 if (record__init_clock(rec)) in __cmd_record()
1704 record__init_features(rec); in __cmd_record()
1707 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe, in __cmd_record()
1722 if (data->is_pipe && rec->evlist->core.nr_entries == 1) in __cmd_record()
1723 rec->opts.sample_id = true; in __cmd_record()
1725 record__uniquify_name(rec); in __cmd_record()
1727 if (record__open(rec) != 0) { in __cmd_record()
1733 if (rec->opts.kcore) { in __cmd_record()
1755 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) { in __cmd_record()
1757 rec->tool.ordered_events = false; in __cmd_record()
1760 if (!rec->evlist->core.nr_groups) in __cmd_record()
1768 err = perf_session__write_header(session, rec->evlist, fd, false); in __cmd_record()
1774 if (!rec->no_buildid in __cmd_record()
1781 err = record__setup_sb_evlist(rec); in __cmd_record()
1785 err = record__synthesize(rec, false); in __cmd_record()
1789 if (rec->realtime_prio) { in __cmd_record()
1792 param.sched_priority = rec->realtime_prio; in __cmd_record()
1806 evlist__enable(rec->evlist); in __cmd_record()
1829 rec->evlist->workload.pid, in __cmd_record()
1849 rec->evlist->workload.pid, in __cmd_record()
1854 evlist__start_workload(rec->evlist); in __cmd_record()
1857 if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack)) in __cmd_record()
1864 evlist__enable(rec->evlist); in __cmd_record()
1873 unsigned long long hits = rec->samples; in __cmd_record()
1884 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING); in __cmd_record()
1886 if (record__mmap_read_all(rec, false) < 0) { in __cmd_record()
1896 record__read_auxtrace_snapshot(rec, false); in __cmd_record()
1914 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING) in __cmd_record()
1923 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING); in __cmd_record()
1929 fd = record__switch_output(rec, false); in __cmd_record()
1938 if (rec->switch_output.time) in __cmd_record()
1939 alarm(rec->switch_output.time); in __cmd_record()
1942 if (hits == rec->samples) { in __cmd_record()
1945 err = evlist__poll(rec->evlist, -1); in __cmd_record()
1954 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0) in __cmd_record()
1958 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) { in __cmd_record()
1961 hit_auxtrace_snapshot_trigger(rec); in __cmd_record()
1962 evlist__ctlfd_ack(rec->evlist); in __cmd_record()
1985 evlist__disable(rec->evlist); in __cmd_record()
1994 record__auxtrace_snapshot_exit(rec); in __cmd_record()
2000 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels); in __cmd_record()
2011 if (target__none(&rec->opts.target)) in __cmd_record()
2012 record__synthesize_workload(rec, true); in __cmd_record()
2015 evlist__finalize_ctlfd(rec->evlist); in __cmd_record()
2016 record__mmap_read_all(rec, true); in __cmd_record()
2017 record__aio_mmap_read_sync(rec); in __cmd_record()
2019 if (rec->session->bytes_transferred && rec->session->bytes_compressed) { in __cmd_record()
2020 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed; in __cmd_record()
2028 kill(rec->evlist->workload.pid, SIGTERM); in __cmd_record()
2041 record__synthesize(rec, true); in __cmd_record()
2043 rec->samples = 0; in __cmd_record()
2046 if (!rec->timestamp_filename) { in __cmd_record()
2047 record__finish_output(rec); in __cmd_record()
2049 fd = record__switch_output(rec, true); in __cmd_record()
2061 const char *postfix = rec->timestamp_filename ? in __cmd_record()
2064 if (rec->samples && !rec->opts.full_auxtrace) in __cmd_record()
2066 " (%" PRIu64 " samples)", rec->samples); in __cmd_record()
2075 rec->session->bytes_transferred / 1024.0 / 1024.0, in __cmd_record()
2090 evlist__stop_sb_thread(rec->sb_evlist); in __cmd_record()
2154 struct record *rec = cb; in perf_record_config() local
2158 rec->no_buildid_cache = false; in perf_record_config()
2160 rec->no_buildid_cache = true; in perf_record_config()
2162 rec->no_buildid = true; in perf_record_config()
2164 rec->buildid_mmap = true; in perf_record_config()
2175 rec->opts.nr_cblocks = strtol(value, NULL, 0); in perf_record_config()
2176 if (!rec->opts.nr_cblocks) in perf_record_config()
2177 rec->opts.nr_cblocks = nr_cblocks_default; in perf_record_config()
2279 static void switch_output_size_warn(struct record *rec) in switch_output_size_warn() argument
2281 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages); in switch_output_size_warn()
2282 struct switch_output *s = &rec->switch_output; in switch_output_size_warn()
2296 static int switch_output_setup(struct record *rec) in switch_output_setup() argument
2298 struct switch_output *s = &rec->switch_output; in switch_output_setup()
2320 if (rec->switch_output_event_set) in switch_output_setup()
2351 rec->timestamp_filename = true; in switch_output_setup()
2354 if (s->size && !rec->opts.no_buffering) in switch_output_setup()
2355 switch_output_size_warn(rec); in switch_output_setup()
2397 struct record *rec = container_of(tool, struct record, tool); in process_timestamp_boundary() local
2399 set_timestamp_boundary(rec, sample->time); in process_timestamp_boundary()
2674 struct record *rec = &record; in cmd_record() local
2700 rec->opts.affinity = PERF_AFFINITY_SYS; in cmd_record()
2702 rec->evlist = evlist__new(); in cmd_record()
2703 if (rec->evlist == NULL) in cmd_record()
2706 err = perf_config(perf_record_config, rec); in cmd_record()
2720 if (!argc && target__none(&rec->opts.target)) in cmd_record()
2721 rec->opts.target.system_wide = true; in cmd_record()
2723 if (nr_cgroups && !rec->opts.target.system_wide) { in cmd_record()
2729 if (rec->buildid_mmap) { in cmd_record()
2739 rec->opts.build_id = true; in cmd_record()
2741 rec->no_buildid = true; in cmd_record()
2744 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) { in cmd_record()
2750 if (rec->opts.kcore) in cmd_record()
2751 rec->data.is_dir = true; in cmd_record()
2753 if (rec->opts.comp_level != 0) { in cmd_record()
2755 rec->no_buildid = true; in cmd_record()
2758 if (rec->opts.record_switch_events && in cmd_record()
2766 if (switch_output_setup(rec)) { in cmd_record()
2772 if (rec->switch_output.time) { in cmd_record()
2774 alarm(rec->switch_output.time); in cmd_record()
2777 if (rec->switch_output.num_files) { in cmd_record()
2778 rec->switch_output.filenames = calloc(sizeof(char *), in cmd_record()
2779 rec->switch_output.num_files); in cmd_record()
2780 if (!rec->switch_output.filenames) { in cmd_record()
2794 if (rec->opts.affinity != PERF_AFFINITY_SYS) { in cmd_record()
2795 rec->affinity_mask.nbits = cpu__max_cpu(); in cmd_record()
2796 rec->affinity_mask.bits = bitmap_zalloc(rec->affinity_mask.nbits); in cmd_record()
2797 if (!rec->affinity_mask.bits) { in cmd_record()
2798 pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits); in cmd_record()
2802 pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits); in cmd_record()
2805 err = record__auxtrace_init(rec); in cmd_record()
2812 err = bpf__setup_stdout(rec->evlist); in cmd_record()
2814 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf)); in cmd_record()
2822 if (rec->no_buildid_cache || rec->no_buildid) { in cmd_record()
2824 } else if (rec->switch_output.enabled) { in cmd_record()
2842 if (rec->no_buildid_set && !rec->no_buildid) in cmd_record()
2844 if (rec->no_buildid_cache_set && !rec->no_buildid_cache) in cmd_record()
2847 rec->no_buildid = true; in cmd_record()
2848 rec->no_buildid_cache = true; in cmd_record()
2856 if (rec->evlist->core.nr_entries == 0) { in cmd_record()
2858 err = evlist__add_default_hybrid(rec->evlist, in cmd_record()
2861 err = __evlist__add_default(rec->evlist, in cmd_record()
2871 if (rec->opts.target.tid && !rec->opts.no_inherit_set) in cmd_record()
2872 rec->opts.no_inherit = true; in cmd_record()
2874 err = target__validate(&rec->opts.target); in cmd_record()
2876 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
2880 err = target__parse_uid(&rec->opts.target); in cmd_record()
2884 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ); in cmd_record()
2892 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid; in cmd_record()
2894 if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) { in cmd_record()
2896 rec->opts.target.cpu_list); in cmd_record()
2900 rec->opts.target.hybrid = perf_pmu__has_hybrid(); in cmd_record()
2902 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) in cmd_record()
2905 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts); in cmd_record()
2914 if (rec->opts.full_auxtrace) in cmd_record()
2915 rec->buildid_all = true; in cmd_record()
2917 if (rec->opts.text_poke) { in cmd_record()
2918 err = record__config_text_poke(rec->evlist); in cmd_record()
2925 if (record_opts__config(&rec->opts)) { in cmd_record()
2930 if (rec->opts.nr_cblocks > nr_cblocks_max) in cmd_record()
2931 rec->opts.nr_cblocks = nr_cblocks_max; in cmd_record()
2932 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); in cmd_record()
2934 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]); in cmd_record()
2935 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush); in cmd_record()
2937 if (rec->opts.comp_level > comp_level_max) in cmd_record()
2938 rec->opts.comp_level = comp_level_max; in cmd_record()
2939 pr_debug("comp level: %d\n", rec->opts.comp_level); in cmd_record()
2943 bitmap_free(rec->affinity_mask.bits); in cmd_record()
2944 evlist__delete(rec->evlist); in cmd_record()
2946 auxtrace_record__free(rec->itr); in cmd_record()
2948 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); in cmd_record()
2954 struct record *rec = &record; in snapshot_sig_handler() local
2956 hit_auxtrace_snapshot_trigger(rec); in snapshot_sig_handler()
2958 if (switch_output_signal(rec)) in snapshot_sig_handler()
2964 struct record *rec = &record; in alarm_sig_handler() local
2966 if (switch_output_time(rec)) in alarm_sig_handler()