Lines Matching refs:evsel_list

168 static struct evlist	*evsel_list;  variable
295 evlist__reset_stats(evsel_list); in perf_stat__reset_stats()
375 int nthreads = perf_thread_map__nr(evsel_list->core.threads); in read_counter_cpu()
435 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in read_affinity_counters()
457 evlist__for_each_entry(evsel_list, counter) { in read_bpf_map_counters()
482 evlist__for_each_entry(evsel_list, counter) { in process_counters()
490 perf_stat_merge_counters(&stat_config, evsel_list); in process_counters()
491 perf_stat_process_percore(&stat_config, evsel_list); in process_counters()
492 perf_stat_process_shadow_stats(&stat_config, evsel_list); in process_counters()
503 evlist__reset_aggr_stats(evsel_list); in process_interval()
533 evlist__for_each_entry(evsel_list, evsel) { in enable_counters()
544 evlist__enable(evsel_list); in enable_counters()
559 evlist__for_each_entry(evsel_list, counter) in disable_counters()
562 evlist__disable(evsel_list); in disable_counters()
666 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; in dispatch_events()
672 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ in dispatch_events()
677 process_evlist(evsel_list, interval); in dispatch_events()
720 evsel_list->core.threads && in stat_handle_error()
721 evsel_list->core.threads->err_thread != -1) { in stat_handle_error()
726 if (!thread_map__remove(evsel_list->core.threads, in stat_handle_error()
727 evsel_list->core.threads->err_thread)) { in stat_handle_error()
728 evsel_list->core.threads->err_thread = -1; in stat_handle_error()
759 …if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0)… in __run_perf_stat()
763 child_pid = evsel_list->workload.pid; in __run_perf_stat()
766 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { in __run_perf_stat()
772 evlist__for_each_entry(evsel_list, counter) { in __run_perf_stat()
780 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat()
808 evlist__reset_weak_group(evsel_list, counter, false); in __run_perf_stat()
836 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat()
845 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { in __run_perf_stat()
871 evlist__for_each_entry(evsel_list, counter) { in __run_perf_stat()
882 evsel__store_ids(counter, evsel_list)) in __run_perf_stat()
886 if (evlist__apply_filters(evsel_list, &counter)) { in __run_perf_stat()
899 err = perf_session__write_header(perf_stat.session, evsel_list, in __run_perf_stat()
906 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list, in __run_perf_stat()
922 evlist__start_workload(evsel_list); in __run_perf_stat()
937 if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) in __run_perf_stat()
970 evlist__copy_prev_raw_counts(evsel_list); in __run_perf_stat()
971 evlist__reset_prev_raw_counts(evsel_list); in __run_perf_stat()
972 evlist__reset_aggr_stats(evsel_list); in __run_perf_stat()
993 evlist__close(evsel_list); in __run_perf_stat()
1032 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); in print_counters()
1166 OPT_CALLBACK('e', "event", &evsel_list, "event",
1169 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1215 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1260 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1279 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1284 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1293 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1451 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, in perf_stat_init_aggr_mode()
1461 nr = perf_thread_map__nr(evsel_list->core.threads); in perf_stat_init_aggr_mode()
1480 if (evsel_list->core.user_requested_cpus) in perf_stat_init_aggr_mode()
1481 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; in perf_stat_init_aggr_mode()
1685 int nr = perf_thread_map__nr(evsel_list->core.threads); in perf_stat_init_aggr_mode_file()
1703 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, in perf_stat_init_aggr_mode_file()
1847 return metricgroup__parse_groups(evsel_list, "transaction", in add_default_attributes()
1858 err = parse_events(evsel_list, transaction_attrs, in add_default_attributes()
1861 err = parse_events(evsel_list, in add_default_attributes()
1899 err = parse_events(evsel_list, smi_cost_attrs, &errinfo); in add_default_attributes()
1913 const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); in add_default_attributes()
1969 err = parse_events(evsel_list, str, &errinfo); in add_default_attributes()
1990 if (!evsel_list->core.nr_entries) { in add_default_attributes()
1994 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) in add_default_attributes()
1997 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) in add_default_attributes()
2001 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) in add_default_attributes()
2004 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) in add_default_attributes()
2007 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) in add_default_attributes()
2017 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) in add_default_attributes()
2024 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) in add_default_attributes()
2031 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); in add_default_attributes()
2077 session->evlist = evsel_list; in __cmd_record()
2146 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); in set_maps()
2148 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) in set_maps()
2253 evsel_list = session->evlist; in __cmd_report()
2282 evlist__for_each_entry(evsel_list, counter) { in setup_system_wide()
2289 if (evsel_list->core.nr_entries) in setup_system_wide()
2309 evsel_list = evlist__new(); in cmd_stat()
2310 if (evsel_list == NULL) in cmd_stat()
2485 status = iostat_prepare(evsel_list, &stat_config); in cmd_stat()
2489 iostat_list(evsel_list, &stat_config); in cmd_stat()
2492 iostat_list(evsel_list, &stat_config); in cmd_stat()
2514 metricgroup__parse_groups(evsel_list, metrics, in cmd_stat()
2535 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, in cmd_stat()
2543 if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { in cmd_stat()
2549 if (evlist__create_maps(evsel_list, &target) < 0) { in cmd_stat()
2562 evlist__check_cpu_maps(evsel_list); in cmd_stat()
2569 thread_map__read_comms(evsel_list->core.threads); in cmd_stat()
2605 if (evlist__alloc_stats(&stat_config, evsel_list, interval)) in cmd_stat()
2632 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) in cmd_stat()
2636 evlist__first(evsel_list)->ignore_missing_thread = target.pid; in cmd_stat()
2644 evlist__reset_prev_raw_counts(evsel_list); in cmd_stat()
2656 evlist__finalize_ctlfd(evsel_list); in cmd_stat()
2688 perf_session__write_header(perf_stat.session, evsel_list, fd, true); in cmd_stat()
2691 evlist__close(evsel_list); in cmd_stat()
2696 evlist__free_stats(evsel_list); in cmd_stat()
2699 iostat_release(evsel_list); in cmd_stat()
2707 evlist__delete(evsel_list); in cmd_stat()