Lines Matching refs:evsel

101 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)  in evsel__no_extra_init()
108 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) in evsel__no_extra_fini()
114 int (*init)(struct evsel *evsel);
115 void (*fini)(struct evsel *evsel);
117 .size = sizeof(struct evsel),
122 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), in evsel__object_config() argument
123 void (*fini)(struct evsel *evsel)) in evsel__object_config() argument
222 void evsel__calc_id_pos(struct evsel *evsel) in evsel__calc_id_pos() argument
224 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
225 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
228 void __evsel__set_sample_bit(struct evsel *evsel, in __evsel__set_sample_bit() argument
231 if (!(evsel->core.attr.sample_type & bit)) { in __evsel__set_sample_bit()
232 evsel->core.attr.sample_type |= bit; in __evsel__set_sample_bit()
233 evsel->sample_size += sizeof(u64); in __evsel__set_sample_bit()
234 evsel__calc_id_pos(evsel); in __evsel__set_sample_bit()
238 void __evsel__reset_sample_bit(struct evsel *evsel, in __evsel__reset_sample_bit() argument
241 if (evsel->core.attr.sample_type & bit) { in __evsel__reset_sample_bit()
242 evsel->core.attr.sample_type &= ~bit; in __evsel__reset_sample_bit()
243 evsel->sample_size -= sizeof(u64); in __evsel__reset_sample_bit()
244 evsel__calc_id_pos(evsel); in __evsel__reset_sample_bit()
248 void evsel__set_sample_id(struct evsel *evsel, in evsel__set_sample_id() argument
252 evsel__reset_sample_bit(evsel, ID); in evsel__set_sample_id()
253 evsel__set_sample_bit(evsel, IDENTIFIER); in evsel__set_sample_id()
255 evsel__set_sample_bit(evsel, ID); in evsel__set_sample_id()
257 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__set_sample_id()
268 bool evsel__is_function_event(struct evsel *evsel) in evsel__is_function_event() argument
272 return evsel->name && in evsel__is_function_event()
273 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); in evsel__is_function_event()
278 void evsel__init(struct evsel *evsel, in evsel__init() argument
281 perf_evsel__init(&evsel->core, attr, idx); in evsel__init()
282 evsel->tracking = !idx; in evsel__init()
283 evsel->unit = strdup(""); in evsel__init()
284 evsel->scale = 1.0; in evsel__init()
285 evsel->max_events = ULONG_MAX; in evsel__init()
286 evsel->evlist = NULL; in evsel__init()
287 evsel->bpf_obj = NULL; in evsel__init()
288 evsel->bpf_fd = -1; in evsel__init()
289 INIT_LIST_HEAD(&evsel->config_terms); in evsel__init()
290 INIT_LIST_HEAD(&evsel->bpf_counter_list); in evsel__init()
291 INIT_LIST_HEAD(&evsel->bpf_filters); in evsel__init()
292 perf_evsel__object.init(evsel); in evsel__init()
293 evsel->sample_size = __evsel__sample_size(attr->sample_type); in evsel__init()
294 evsel__calc_id_pos(evsel); in evsel__init()
295 evsel->cmdline_group_boundary = false; in evsel__init()
296 evsel->metric_events = NULL; in evsel__init()
297 evsel->per_pkg_mask = NULL; in evsel__init()
298 evsel->collect_stat = false; in evsel__init()
299 evsel->pmu_name = NULL; in evsel__init()
300 evsel->group_pmu_name = NULL; in evsel__init()
301 evsel->skippable = false; in evsel__init()
304 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) in evsel__new_idx()
306 struct evsel *evsel = zalloc(perf_evsel__object.size); in evsel__new_idx() local
308 if (!evsel) in evsel__new_idx()
310 evsel__init(evsel, attr, idx); in evsel__new_idx()
312 if (evsel__is_bpf_output(evsel) && !attr->sample_type) { in evsel__new_idx()
313 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | in evsel__new_idx()
315 evsel->core.attr.sample_period = 1; in evsel__new_idx()
318 if (evsel__is_clock(evsel)) { in evsel__new_idx()
319 free((char *)evsel->unit); in evsel__new_idx()
320 evsel->unit = strdup("msec"); in evsel__new_idx()
321 evsel->scale = 1e-6; in evsel__new_idx()
324 return evsel; in evsel__new_idx()
349 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) in evsel__copy_config_terms()
361 struct evsel *evsel__clone(struct evsel *orig) in evsel__clone()
363 struct evsel *evsel; in evsel__clone() local
374 evsel = evsel__new(&orig->core.attr); in evsel__clone()
375 if (evsel == NULL) in evsel__clone()
378 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); in evsel__clone()
379 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); in evsel__clone()
380 evsel->core.threads = perf_thread_map__get(orig->core.threads); in evsel__clone()
381 evsel->core.nr_members = orig->core.nr_members; in evsel__clone()
382 evsel->core.system_wide = orig->core.system_wide; in evsel__clone()
383 evsel->core.requires_cpu = orig->core.requires_cpu; in evsel__clone()
384 evsel->core.is_pmu_core = orig->core.is_pmu_core; in evsel__clone()
387 evsel->name = strdup(orig->name); in evsel__clone()
388 if (evsel->name == NULL) in evsel__clone()
392 evsel->group_name = strdup(orig->group_name); in evsel__clone()
393 if (evsel->group_name == NULL) in evsel__clone()
397 evsel->pmu_name = strdup(orig->pmu_name); in evsel__clone()
398 if (evsel->pmu_name == NULL) in evsel__clone()
402 evsel->group_pmu_name = strdup(orig->group_pmu_name); in evsel__clone()
403 if (evsel->group_pmu_name == NULL) in evsel__clone()
407 evsel->filter = strdup(orig->filter); in evsel__clone()
408 if (evsel->filter == NULL) in evsel__clone()
412 evsel->metric_id = strdup(orig->metric_id); in evsel__clone()
413 if (evsel->metric_id == NULL) in evsel__clone()
416 evsel->cgrp = cgroup__get(orig->cgrp); in evsel__clone()
418 evsel->tp_format = orig->tp_format; in evsel__clone()
420 evsel->handler = orig->handler; in evsel__clone()
421 evsel->core.leader = orig->core.leader; in evsel__clone()
423 evsel->max_events = orig->max_events; in evsel__clone()
424 evsel->tool_event = orig->tool_event; in evsel__clone()
425 free((char *)evsel->unit); in evsel__clone()
426 evsel->unit = strdup(orig->unit); in evsel__clone()
427 if (evsel->unit == NULL) in evsel__clone()
430 evsel->scale = orig->scale; in evsel__clone()
431 evsel->snapshot = orig->snapshot; in evsel__clone()
432 evsel->per_pkg = orig->per_pkg; in evsel__clone()
433 evsel->percore = orig->percore; in evsel__clone()
434 evsel->precise_max = orig->precise_max; in evsel__clone()
435 evsel->is_libpfm_event = orig->is_libpfm_event; in evsel__clone()
437 evsel->exclude_GH = orig->exclude_GH; in evsel__clone()
438 evsel->sample_read = orig->sample_read; in evsel__clone()
439 evsel->auto_merge_stats = orig->auto_merge_stats; in evsel__clone()
440 evsel->collect_stat = orig->collect_stat; in evsel__clone()
441 evsel->weak_group = orig->weak_group; in evsel__clone()
442 evsel->use_config_name = orig->use_config_name; in evsel__clone()
443 evsel->pmu = orig->pmu; in evsel__clone()
445 if (evsel__copy_config_terms(evsel, orig) < 0) in evsel__clone()
448 return evsel; in evsel__clone()
451 evsel__delete(evsel); in evsel__clone()
459 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format) in evsel__newtp_idx()
461 struct evsel *evsel = zalloc(perf_evsel__object.size); in evsel__newtp_idx() local
464 if (evsel == NULL) { in evsel__newtp_idx()
473 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) in evsel__newtp_idx()
479 evsel->tp_format = trace_event__tp_format(sys, name); in evsel__newtp_idx()
480 if (IS_ERR(evsel->tp_format)) { in evsel__newtp_idx()
481 err = PTR_ERR(evsel->tp_format); in evsel__newtp_idx()
484 attr.config = evsel->tp_format->id; in evsel__newtp_idx()
491 evsel__init(evsel, &attr, idx); in evsel__newtp_idx()
494 return evsel; in evsel__newtp_idx()
497 zfree(&evsel->name); in evsel__newtp_idx()
498 free(evsel); in evsel__newtp_idx()
547 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) in evsel__add_modifiers() argument
550 struct perf_event_attr *attr = &evsel->core.attr; in evsel__add_modifiers()
583 int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) in arch_evsel__hw_name() argument
585 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); in arch_evsel__hw_name()
588 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) in evsel__hw_name() argument
590 int r = arch_evsel__hw_name(evsel, bf, size); in evsel__hw_name()
591 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__hw_name()
614 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) in evsel__sw_name() argument
616 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); in evsel__sw_name()
617 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__sw_name()
643 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) in evsel__bp_name() argument
645 struct perf_event_attr *attr = &evsel->core.attr; in evsel__bp_name()
647 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__bp_name()
739 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) in evsel__hw_cache_name() argument
741 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); in evsel__hw_cache_name()
742 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); in evsel__hw_cache_name()
745 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) in evsel__raw_name() argument
747 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); in evsel__raw_name()
748 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); in evsel__raw_name()
751 const char *evsel__name(struct evsel *evsel) in evsel__name() argument
755 if (!evsel) in evsel__name()
758 if (evsel->name) in evsel__name()
759 return evsel->name; in evsel__name()
761 switch (evsel->core.attr.type) { in evsel__name()
763 evsel__raw_name(evsel, bf, sizeof(bf)); in evsel__name()
767 evsel__hw_name(evsel, bf, sizeof(bf)); in evsel__name()
771 evsel__hw_cache_name(evsel, bf, sizeof(bf)); in evsel__name()
775 if (evsel__is_tool(evsel)) in evsel__name()
776 evsel__tool_name(evsel__tool_event(evsel), bf, sizeof(bf)); in evsel__name()
778 evsel__sw_name(evsel, bf, sizeof(bf)); in evsel__name()
786 evsel__bp_name(evsel, bf, sizeof(bf)); in evsel__name()
791 evsel->core.attr.type); in evsel__name()
795 evsel->name = strdup(bf); in evsel__name()
797 if (evsel->name) in evsel__name()
798 return evsel->name; in evsel__name()
803 bool evsel__name_is(struct evsel *evsel, const char *name) in evsel__name_is() argument
805 return !strcmp(evsel__name(evsel), name); in evsel__name_is()
808 const char *evsel__metric_id(const struct evsel *evsel) in evsel__metric_id() argument
810 if (evsel->metric_id) in evsel__metric_id()
811 return evsel->metric_id; in evsel__metric_id()
813 if (evsel__is_tool(evsel)) in evsel__metric_id()
814 return perf_tool_event__to_str(evsel__tool_event(evsel)); in evsel__metric_id()
819 const char *evsel__group_name(struct evsel *evsel) in evsel__group_name() argument
821 return evsel->group_name ?: "anon group"; in evsel__group_name()
834 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) in evsel__group_desc() argument
838 struct evsel *pos; in evsel__group_desc()
839 const char *group_name = evsel__group_name(evsel); in evsel__group_desc()
841 if (!evsel->forced_leader) in evsel__group_desc()
844 for_each_group_evsel(pos, evsel) { in evsel__group_desc()
854 if (!evsel->forced_leader) in evsel__group_desc()
860 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, in __evsel__config_callchain() argument
863 bool function = evsel__is_function_event(evsel); in __evsel__config_callchain()
864 struct perf_event_attr *attr = &evsel->core.attr; in __evsel__config_callchain()
865 const char *arch = perf_env__arch(evsel__env(evsel)); in __evsel__config_callchain()
867 evsel__set_sample_bit(evsel, CALLCHAIN); in __evsel__config_callchain()
882 evsel__set_sample_bit(evsel, BRANCH_STACK); in __evsel__config_callchain()
896 evsel__set_sample_bit(evsel, REGS_USER); in __evsel__config_callchain()
897 evsel__set_sample_bit(evsel, STACK_USER); in __evsel__config_callchain()
921 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, in evsel__config_callchain() argument
925 return __evsel__config_callchain(evsel, opts, param); in evsel__config_callchain()
928 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) in evsel__reset_callgraph() argument
930 struct perf_event_attr *attr = &evsel->core.attr; in evsel__reset_callgraph()
932 evsel__reset_sample_bit(evsel, CALLCHAIN); in evsel__reset_callgraph()
934 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__reset_callgraph()
940 evsel__reset_sample_bit(evsel, REGS_USER); in evsel__reset_callgraph()
941 evsel__reset_sample_bit(evsel, STACK_USER); in evsel__reset_callgraph()
945 static void evsel__apply_config_terms(struct evsel *evsel, in evsel__apply_config_terms() argument
949 struct list_head *config_terms = &evsel->config_terms; in evsel__apply_config_terms()
950 struct perf_event_attr *attr = &evsel->core.attr; in evsel__apply_config_terms()
965 evsel__reset_sample_bit(evsel, PERIOD); in evsel__apply_config_terms()
972 evsel__set_sample_bit(evsel, PERIOD); in evsel__apply_config_terms()
977 evsel__set_sample_bit(evsel, TIME); in evsel__apply_config_terms()
979 evsel__reset_sample_bit(evsel, TIME); in evsel__apply_config_terms()
986 evsel__set_sample_bit(evsel, BRANCH_STACK); in evsel__apply_config_terms()
990 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__apply_config_terms()
999 evsel->max_events = term->val.max_events; in evsel__apply_config_terms()
1050 evsel->name); in evsel__apply_config_terms()
1064 evsel__reset_callgraph(evsel, &callchain_param); in evsel__apply_config_terms()
1069 evsel__set_sample_bit(evsel, ADDR); in evsel__apply_config_terms()
1070 evsel__set_sample_bit(evsel, DATA_SRC); in evsel__apply_config_terms()
1071 evsel->core.attr.mmap_data = track; in evsel__apply_config_terms()
1073 evsel__config_callchain(evsel, opts, &param); in evsel__apply_config_terms()
1078 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) in __evsel__get_config_term() argument
1082 list_for_each_entry(term, &evsel->config_terms, list) { in __evsel__get_config_term()
1090 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) in arch_evsel__set_sample_weight() argument
1092 evsel__set_sample_bit(evsel, WEIGHT); in arch_evsel__set_sample_weight()
1095 void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused, in arch__post_evsel_config()
1111 static bool evsel__is_offcpu_event(struct evsel *evsel) in evsel__is_offcpu_event() argument
1113 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT); in evsel__is_offcpu_event()
1144 void evsel__config(struct evsel *evsel, struct record_opts *opts, in evsel__config() argument
1147 struct evsel *leader = evsel__leader(evsel); in evsel__config()
1148 struct perf_event_attr *attr = &evsel->core.attr; in evsel__config()
1149 int track = evsel->tracking; in evsel__config()
1157 evsel__set_sample_bit(evsel, IP); in evsel__config()
1158 evsel__set_sample_bit(evsel, TID); in evsel__config()
1160 if (evsel->sample_read) { in evsel__config()
1161 evsel__set_sample_bit(evsel, READ); in evsel__config()
1167 evsel__set_sample_id(evsel, false); in evsel__config()
1183 if ((evsel->is_libpfm_event && !attr->sample_period) || in evsel__config()
1184 (!evsel->is_libpfm_event && (!attr->sample_period || in evsel__config()
1194 evsel__set_sample_bit(evsel, PERIOD); in evsel__config()
1200 evsel->core.attr.read_format |= in evsel__config()
1208 evsel__set_sample_bit(evsel, ADDR); in evsel__config()
1217 if (evsel__is_function_event(evsel)) in evsel__config()
1218 evsel->core.attr.exclude_callchain_user = 1; in evsel__config()
1220 if (callchain && callchain->enabled && !evsel->no_aux_samples) in evsel__config()
1221 evsel__config_callchain(evsel, opts, callchain); in evsel__config()
1223 if (opts->sample_intr_regs && !evsel->no_aux_samples && in evsel__config()
1224 !evsel__is_dummy_event(evsel)) { in evsel__config()
1226 evsel__set_sample_bit(evsel, REGS_INTR); in evsel__config()
1229 if (opts->sample_user_regs && !evsel->no_aux_samples && in evsel__config()
1230 !evsel__is_dummy_event(evsel)) { in evsel__config()
1232 evsel__set_sample_bit(evsel, REGS_USER); in evsel__config()
1236 evsel__set_sample_bit(evsel, CPU); in evsel__config()
1245 evsel__set_sample_bit(evsel, TIME); in evsel__config()
1247 if (opts->raw_samples && !evsel->no_aux_samples) { in evsel__config()
1248 evsel__set_sample_bit(evsel, TIME); in evsel__config()
1249 evsel__set_sample_bit(evsel, RAW); in evsel__config()
1250 evsel__set_sample_bit(evsel, CPU); in evsel__config()
1254 evsel__set_sample_bit(evsel, DATA_SRC); in evsel__config()
1257 evsel__set_sample_bit(evsel, PHYS_ADDR); in evsel__config()
1263 if (opts->branch_stack && !evsel->no_aux_samples) { in evsel__config()
1264 evsel__set_sample_bit(evsel, BRANCH_STACK); in evsel__config()
1269 arch_evsel__set_sample_weight(evsel); in evsel__config()
1290 evsel__set_sample_bit(evsel, CGROUP); in evsel__config()
1294 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); in evsel__config()
1297 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); in evsel__config()
1303 evsel__set_sample_bit(evsel, TRANSACTION); in evsel__config()
1306 evsel->core.attr.read_format |= in evsel__config()
1317 if (evsel__is_group_leader(evsel)) in evsel__config()
1324 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && in evsel__config()
1328 if (evsel->immediate) { in evsel__config()
1339 if (evsel->precise_max) in evsel__config()
1352 if (evsel->core.own_cpus || evsel->unit) in evsel__config()
1353 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__config()
1359 evsel__apply_config_terms(evsel, opts, track); in evsel__config()
1361 evsel->ignore_missing_thread = opts->ignore_missing_thread; in evsel__config()
1366 evsel__set_sample_bit(evsel, PERIOD); in evsel__config()
1368 evsel__reset_sample_bit(evsel, PERIOD); in evsel__config()
1379 if (evsel__is_dummy_event(evsel)) in evsel__config()
1380 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__config()
1382 if (evsel__is_offcpu_event(evsel)) in evsel__config()
1383 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES; in evsel__config()
1385 arch__post_evsel_config(evsel, attr); in evsel__config()
1388 int evsel__set_filter(struct evsel *evsel, const char *filter) in evsel__set_filter() argument
1393 free(evsel->filter); in evsel__set_filter()
1394 evsel->filter = new_filter; in evsel__set_filter()
1401 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) in evsel__append_filter() argument
1405 if (evsel->filter == NULL) in evsel__append_filter()
1406 return evsel__set_filter(evsel, filter); in evsel__append_filter()
1408 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { in evsel__append_filter()
1409 free(evsel->filter); in evsel__append_filter()
1410 evsel->filter = new_filter; in evsel__append_filter()
1417 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) in evsel__append_tp_filter() argument
1419 return evsel__append_filter(evsel, "(%s) && (%s)", filter); in evsel__append_tp_filter()
1422 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) in evsel__append_addr_filter() argument
1424 return evsel__append_filter(evsel, "%s,%s", filter); in evsel__append_addr_filter()
1428 int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx) in evsel__enable_cpu() argument
1430 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx); in evsel__enable_cpu()
1433 int evsel__enable(struct evsel *evsel) in evsel__enable() argument
1435 int err = perf_evsel__enable(&evsel->core); in evsel__enable()
1438 evsel->disabled = false; in evsel__enable()
1443 int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx) in evsel__disable_cpu() argument
1445 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx); in evsel__disable_cpu()
1448 int evsel__disable(struct evsel *evsel) in evsel__disable() argument
1450 int err = perf_evsel__disable(&evsel->core); in evsel__disable()
1458 evsel->disabled = true; in evsel__disable()
1475 static void evsel__free_config_terms(struct evsel *evsel) in evsel__free_config_terms() argument
1477 free_config_terms(&evsel->config_terms); in evsel__free_config_terms()
1480 void evsel__exit(struct evsel *evsel) in evsel__exit() argument
1482 assert(list_empty(&evsel->core.node)); in evsel__exit()
1483 assert(evsel->evlist == NULL); in evsel__exit()
1484 bpf_counter__destroy(evsel); in evsel__exit()
1485 perf_bpf_filter__destroy(evsel); in evsel__exit()
1486 evsel__free_counts(evsel); in evsel__exit()
1487 perf_evsel__free_fd(&evsel->core); in evsel__exit()
1488 perf_evsel__free_id(&evsel->core); in evsel__exit()
1489 evsel__free_config_terms(evsel); in evsel__exit()
1490 cgroup__put(evsel->cgrp); in evsel__exit()
1491 perf_cpu_map__put(evsel->core.cpus); in evsel__exit()
1492 perf_cpu_map__put(evsel->core.own_cpus); in evsel__exit()
1493 perf_thread_map__put(evsel->core.threads); in evsel__exit()
1494 zfree(&evsel->group_name); in evsel__exit()
1495 zfree(&evsel->name); in evsel__exit()
1496 zfree(&evsel->filter); in evsel__exit()
1497 zfree(&evsel->pmu_name); in evsel__exit()
1498 zfree(&evsel->group_pmu_name); in evsel__exit()
1499 zfree(&evsel->unit); in evsel__exit()
1500 zfree(&evsel->metric_id); in evsel__exit()
1501 evsel__zero_per_pkg(evsel); in evsel__exit()
1502 hashmap__free(evsel->per_pkg_mask); in evsel__exit()
1503 evsel->per_pkg_mask = NULL; in evsel__exit()
1504 zfree(&evsel->metric_events); in evsel__exit()
1505 perf_evsel__object.fini(evsel); in evsel__exit()
1506 if (evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME || in evsel__exit()
1507 evsel__tool_event(evsel) == PERF_TOOL_USER_TIME) in evsel__exit()
1508 xyarray__delete(evsel->start_times); in evsel__exit()
1511 void evsel__delete(struct evsel *evsel) in evsel__delete() argument
1513 if (!evsel) in evsel__delete()
1516 evsel__exit(evsel); in evsel__delete()
1517 free(evsel); in evsel__delete()
1520 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread, in evsel__compute_deltas() argument
1525 if (!evsel->prev_raw_counts) in evsel__compute_deltas()
1528 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread); in evsel__compute_deltas()
1529 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count; in evsel__compute_deltas()
1536 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread) in evsel__read_one() argument
1538 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread); in evsel__read_one()
1540 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count); in evsel__read_one()
1543 static int evsel__read_retire_lat(struct evsel *evsel, int cpu_map_idx, int thread) in evsel__read_retire_lat() argument
1545 return tpebs_set_evsel(evsel, cpu_map_idx, thread); in evsel__read_retire_lat()
1548 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread, in evsel__set_count()
1569 static bool evsel__group_has_tpebs(struct evsel *leader) in evsel__group_has_tpebs()
1571 struct evsel *evsel; in evsel__group_has_tpebs() local
1573 for_each_group_evsel(evsel, leader) { in evsel__group_has_tpebs()
1574 if (evsel__is_retire_lat(evsel)) in evsel__group_has_tpebs()
1580 static u64 evsel__group_read_nr_members(struct evsel *leader) in evsel__group_read_nr_members()
1583 struct evsel *evsel; in evsel__group_read_nr_members() local
1585 for_each_group_evsel(evsel, leader) { in evsel__group_read_nr_members()
1586 if (evsel__is_retire_lat(evsel)) in evsel__group_read_nr_members()
1592 static u64 evsel__group_read_size(struct evsel *leader) in evsel__group_read_size()
1623 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data) in evsel__process_group_data()
1642 struct evsel *counter; in evsel__process_group_data()
1657 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread) in evsel__read_group()
1779 static int evsel__read_tool(struct evsel *evsel, int cpu_map_idx, int thread) in evsel__read_tool() argument
1786 count = perf_counts(evsel->counts, cpu_map_idx, thread); in evsel__read_tool()
1788 switch (evsel__tool_event(evsel)) { in evsel__read_tool()
1795 start_time = &evsel->start_time; in evsel__read_tool()
1803 bool system = evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME; in evsel__read_tool()
1805 start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread); in evsel__read_tool()
1806 fd = FD(evsel, cpu_map_idx, thread); in evsel__read_tool()
1808 if (evsel->pid_stat) { in evsel__read_tool()
1817 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, in evsel__read_tool()
1848 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread) in evsel__read_counter() argument
1850 if (evsel__is_tool(evsel)) in evsel__read_counter()
1851 return evsel__read_tool(evsel, cpu_map_idx, thread); in evsel__read_counter()
1853 if (evsel__is_retire_lat(evsel)) in evsel__read_counter()
1854 return evsel__read_retire_lat(evsel, cpu_map_idx, thread); in evsel__read_counter()
1856 if (evsel->core.attr.read_format & PERF_FORMAT_GROUP) in evsel__read_counter()
1857 return evsel__read_group(evsel, cpu_map_idx, thread); in evsel__read_counter()
1859 return evsel__read_one(evsel, cpu_map_idx, thread); in evsel__read_counter()
1862 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale) in __evsel__read_on_cpu() argument
1867 if (FD(evsel, cpu_map_idx, thread) < 0) in __evsel__read_on_cpu()
1870 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0) in __evsel__read_on_cpu()
1873 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0) in __evsel__read_on_cpu()
1876 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count); in __evsel__read_on_cpu()
1878 *perf_counts(evsel->counts, cpu_map_idx, thread) = count; in __evsel__read_on_cpu()
1882 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, in evsel__match_other_cpu() argument
1887 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx); in evsel__match_other_cpu()
1891 static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx) in evsel__hybrid_group_cpu_map_idx() argument
1893 struct evsel *leader = evsel__leader(evsel); in evsel__hybrid_group_cpu_map_idx()
1895 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || in evsel__hybrid_group_cpu_map_idx()
1896 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { in evsel__hybrid_group_cpu_map_idx()
1897 return evsel__match_other_cpu(evsel, leader, cpu_map_idx); in evsel__hybrid_group_cpu_map_idx()
1903 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread) in get_group_fd() argument
1905 struct evsel *leader = evsel__leader(evsel); in get_group_fd()
1908 if (evsel__is_group_leader(evsel)) in get_group_fd()
1917 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx); in get_group_fd()
1931 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) in evsel__remove_fd()
1938 static int update_fds(struct evsel *evsel, in update_fds() argument
1942 struct evsel *pos; in update_fds()
1947 evlist__for_each_entry(evsel->evlist, pos) { in update_fds()
1948 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx; in update_fds()
1956 if (pos == evsel) in update_fds()
1962 static bool evsel__ignore_missing_thread(struct evsel *evsel, in evsel__ignore_missing_thread() argument
1969 if (!evsel->ignore_missing_thread) in evsel__ignore_missing_thread()
1973 if (evsel->core.system_wide) in evsel__ignore_missing_thread()
1988 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread)) in evsel__ignore_missing_thread()
2015 bool evsel__precise_ip_fallback(struct evsel *evsel) in evsel__precise_ip_fallback() argument
2018 if (!evsel->precise_max) in evsel__precise_ip_fallback()
2025 if (!evsel->core.attr.precise_ip) { in evsel__precise_ip_fallback()
2026 evsel->core.attr.precise_ip = evsel->precise_ip_original; in evsel__precise_ip_fallback()
2030 if (!evsel->precise_ip_original) in evsel__precise_ip_fallback()
2031 evsel->precise_ip_original = evsel->core.attr.precise_ip; in evsel__precise_ip_fallback()
2033 evsel->core.attr.precise_ip--; in evsel__precise_ip_fallback()
2034 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); in evsel__precise_ip_fallback()
2035 display_attr(&evsel->core.attr); in evsel__precise_ip_fallback()
2042 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, in __evsel__prepare_open() argument
2047 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || in __evsel__prepare_open()
2048 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) in __evsel__prepare_open()
2071 if (evsel->core.fd == NULL && in __evsel__prepare_open()
2072 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0) in __evsel__prepare_open()
2075 if ((evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME || in __evsel__prepare_open()
2076 evsel__tool_event(evsel) == PERF_TOOL_USER_TIME) && in __evsel__prepare_open()
2077 !evsel->start_times) { in __evsel__prepare_open()
2078 evsel->start_times = xyarray__new(perf_cpu_map__nr(cpus), nthreads, sizeof(__u64)); in __evsel__prepare_open()
2079 if (!evsel->start_times) in __evsel__prepare_open()
2083 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; in __evsel__prepare_open()
2084 if (evsel->cgrp) in __evsel__prepare_open()
2085 evsel->open_flags |= PERF_FLAG_PID_CGROUP; in __evsel__prepare_open()
2090 static void evsel__disable_missing_features(struct evsel *evsel) in evsel__disable_missing_features() argument
2093 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS; in evsel__disable_missing_features()
2095 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST; in evsel__disable_missing_features()
2097 evsel__set_sample_bit(evsel, WEIGHT); in evsel__disable_missing_features()
2098 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); in evsel__disable_missing_features()
2101 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ in evsel__disable_missing_features()
2103 evsel->core.attr.use_clockid = 0; in evsel__disable_missing_features()
2104 evsel->core.attr.clockid = 0; in evsel__disable_missing_features()
2107 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; in evsel__disable_missing_features()
2109 evsel->core.attr.mmap2 = 0; in evsel__disable_missing_features()
2110 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) in evsel__disable_missing_features()
2111 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; in evsel__disable_missing_features()
2113 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | in evsel__disable_missing_features()
2115 if (perf_missing_features.group_read && evsel->core.attr.inherit) in evsel__disable_missing_features()
2116 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); in evsel__disable_missing_features()
2118 evsel->core.attr.ksymbol = 0; in evsel__disable_missing_features()
2120 evsel->core.attr.bpf_event = 0; in evsel__disable_missing_features()
2122 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; in evsel__disable_missing_features()
2124 evsel->core.attr.sample_id_all = 0; in evsel__disable_missing_features()
2127 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__prepare_open() argument
2132 err = __evsel__prepare_open(evsel, cpus, threads); in evsel__prepare_open()
2136 evsel__disable_missing_features(evsel); in evsel__prepare_open()
2141 bool evsel__detect_missing_features(struct evsel *evsel) in evsel__detect_missing_features() argument
2148 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) { in evsel__detect_missing_features()
2153 (evsel->core.attr.read_format & PERF_FORMAT_LOST)) { in evsel__detect_missing_features()
2158 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { in evsel__detect_missing_features()
2163 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { in evsel__detect_missing_features()
2168 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { in evsel__detect_missing_features()
2172 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { in evsel__detect_missing_features()
2177 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { in evsel__detect_missing_features()
2181 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { in evsel__detect_missing_features()
2185 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { in evsel__detect_missing_features()
2189 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { in evsel__detect_missing_features()
2193 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { in evsel__detect_missing_features()
2197 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { in evsel__detect_missing_features()
2201 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { in evsel__detect_missing_features()
2205 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { in evsel__detect_missing_features()
2209 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { in evsel__detect_missing_features()
2213 } else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) { in evsel__detect_missing_features()
2214 if (evsel->pmu == NULL) in evsel__detect_missing_features()
2215 evsel->pmu = evsel__find_pmu(evsel); in evsel__detect_missing_features()
2217 if (evsel->pmu) in evsel__detect_missing_features()
2218 evsel->pmu->missing_features.exclude_guest = true; in evsel__detect_missing_features()
2221 evsel->core.attr.exclude_host = false; in evsel__detect_missing_features()
2222 evsel->core.attr.exclude_guest = false; in evsel__detect_missing_features()
2225 if (evsel->exclude_GH) { in evsel__detect_missing_features()
2239 (evsel->core.attr.branch_sample_type & in evsel__detect_missing_features()
2246 evsel->core.attr.inherit && in evsel__detect_missing_features()
2247 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && in evsel__detect_missing_features()
2248 evsel__is_group_leader(evsel)) { in evsel__detect_missing_features()
2257 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__open_cpu() argument
2265 if (evsel__tool_event(evsel) == PERF_TOOL_DURATION_TIME) { in evsel__open_cpu()
2266 if (evsel->core.attr.sample_period) /* no sampling */ in evsel__open_cpu()
2268 evsel->start_time = rdclock(); in evsel__open_cpu()
2272 if (evsel__is_retire_lat(evsel)) in evsel__open_cpu()
2273 return tpebs_start(evsel->evlist); in evsel__open_cpu()
2275 err = __evsel__prepare_open(evsel, cpus, threads); in evsel__open_cpu()
2287 if (evsel->cgrp) in evsel__open_cpu()
2288 pid = evsel->cgrp->fd; in evsel__open_cpu()
2291 evsel__disable_missing_features(evsel); in evsel__open_cpu()
2293 pr_debug3("Opening: %s\n", evsel__name(evsel)); in evsel__open_cpu()
2294 display_attr(&evsel->core.attr); in evsel__open_cpu()
2304 if (!evsel->cgrp && !evsel->core.system_wide) in evsel__open_cpu()
2307 if (evsel__tool_event(evsel) == PERF_TOOL_USER_TIME || in evsel__open_cpu()
2308 evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME) { in evsel__open_cpu()
2309 bool system = evsel__tool_event(evsel) == PERF_TOOL_SYSTEM_TIME; in evsel__open_cpu()
2312 if (evsel->core.attr.sample_period) { in evsel__open_cpu()
2322 evsel->pid_stat = true; in evsel__open_cpu()
2326 FD(evsel, idx, thread) = fd; in evsel__open_cpu()
2331 start_time = xyarray__entry(evsel->start_times, idx, thread); in evsel__open_cpu()
2338 cpu = perf_cpu_map__cpu(evsel->core.cpus, idx); in evsel__open_cpu()
2347 group_fd = get_group_fd(evsel, idx, thread); in evsel__open_cpu()
2350 pr_debug("broken group leader for %s\n", evsel->name); in evsel__open_cpu()
2359 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags); in evsel__open_cpu()
2361 fd = sys_perf_event_open(&evsel->core.attr, pid, in evsel__open_cpu()
2363 group_fd, evsel->open_flags); in evsel__open_cpu()
2365 FD(evsel, idx, thread) = fd; in evsel__open_cpu()
2375 bpf_counter__install_pe(evsel, idx, fd); in evsel__open_cpu()
2378 test_attr__open(&evsel->core.attr, pid, in evsel__open_cpu()
2380 fd, group_fd, evsel->open_flags); in evsel__open_cpu()
2386 if (evsel->bpf_fd >= 0) { in evsel__open_cpu()
2388 int bpf_fd = evsel->bpf_fd; in evsel__open_cpu()
2418 if (evsel__precise_ip_fallback(evsel)) in evsel__open_cpu()
2421 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus), in evsel__open_cpu()
2440 if (evsel__detect_missing_features(evsel)) in evsel__open_cpu()
2449 if (FD(evsel, idx, thread) >= 0) in evsel__open_cpu()
2450 close(FD(evsel, idx, thread)); in evsel__open_cpu()
2451 FD(evsel, idx, thread) = -1; in evsel__open_cpu()
2459 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__open() argument
2462 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus)); in evsel__open()
2465 void evsel__close(struct evsel *evsel) in evsel__close() argument
2467 if (evsel__is_retire_lat(evsel)) in evsel__close()
2469 perf_evsel__close(&evsel->core); in evsel__close()
2470 perf_evsel__free_id(&evsel->core); in evsel__close()
2473 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx) in evsel__open_per_cpu() argument
2476 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus)); in evsel__open_per_cpu()
2478 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1); in evsel__open_per_cpu()
2481 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) in evsel__open_per_thread() argument
2483 return evsel__open(evsel, NULL, threads); in evsel__open_per_thread()
2486 static int perf_evsel__parse_id_sample(const struct evsel *evsel, in perf_evsel__parse_id_sample() argument
2490 u64 type = evsel->core.attr.sample_type; in perf_evsel__parse_id_sample()
2492 bool swapped = evsel->needs_swap; in perf_evsel__parse_id_sample()
2637 static inline bool evsel__has_branch_counters(const struct evsel *evsel) in evsel__has_branch_counters() argument
2639 struct evsel *leader = evsel__leader(evsel); in evsel__has_branch_counters()
2642 if (!leader || !evsel->evlist) in evsel__has_branch_counters()
2645 if (evsel->evlist->nr_br_cntr < 0) in evsel__has_branch_counters()
2646 evlist__update_br_cntr(evsel->evlist); in evsel__has_branch_counters()
2654 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, in evsel__parse_sample() argument
2657 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample()
2658 bool swapped = evsel->needs_swap; in evsel__parse_sample()
2673 data->period = evsel->core.attr.sample_period; in evsel__parse_sample()
2680 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample()
2682 return perf_evsel__parse_id_sample(evsel, event, data); in evsel__parse_sample()
2687 if (perf_event__check_size(event, evsel->sample_size)) in evsel__parse_sample()
2753 u64 read_format = evsel->core.attr.read_format; in evsel__parse_sample()
2857 if (evsel__has_branch_hw_idx(evsel)) { in evsel__parse_sample()
2887 if (evsel__has_branch_counters(evsel)) { in evsel__parse_sample()
2902 u64 mask = evsel->core.attr.sample_regs_user; in evsel__parse_sample()
2958 u64 mask = evsel->core.attr.sample_regs_intr; in evsel__parse_sample()
3008 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, in evsel__parse_sample_timestamp() argument
3011 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample_timestamp()
3022 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample_timestamp()
3024 if (perf_evsel__parse_id_sample(evsel, event, &data)) in evsel__parse_sample_timestamp()
3033 if (perf_event__check_size(event, evsel->sample_size)) in evsel__parse_sample_timestamp()
3051 u16 evsel__id_hdr_size(const struct evsel *evsel) in evsel__id_hdr_size() argument
3053 u64 sample_type = evsel->core.attr.sample_type; in evsel__id_hdr_size()
3078 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) in evsel__field() argument
3080 return tep_find_field(evsel->tp_format, name); in evsel__field()
3083 struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name) in evsel__common_field() argument
3085 return tep_find_common_field(evsel->tp_format, name); in evsel__common_field()
3088 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__rawptr() argument
3090 struct tep_format_field *field = evsel__field(evsel, name); in evsel__rawptr()
3147 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__intval() argument
3149 struct tep_format_field *field = evsel__field(evsel, name); in evsel__intval()
3151 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; in evsel__intval()
3154 u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__intval_common() argument
3156 struct tep_format_field *field = evsel__common_field(evsel, name); in evsel__intval_common()
3158 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; in evsel__intval_common()
3161 char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__taskstate() argument
3170 field = evsel__field(evsel, name); in evsel__taskstate()
3189 val = evsel__intval(evsel, sample, name); in evsel__taskstate()
3196 bool evsel__fallback(struct evsel *evsel, struct target *target, int err, in evsel__fallback() argument
3202 evsel->core.attr.type == PERF_TYPE_HARDWARE && in evsel__fallback()
3203 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { in evsel__fallback()
3211 evsel->core.attr.type = PERF_TYPE_SOFTWARE; in evsel__fallback()
3212 evsel->core.attr.config = target__has_cpu(target) in evsel__fallback()
3219 zfree(&evsel->name); in evsel__fallback()
3221 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && in evsel__fallback()
3223 const char *name = evsel__name(evsel); in evsel__fallback()
3228 if (evsel->core.attr.exclude_user) in evsel__fallback()
3233 (strchr(name, ':') && !evsel->is_libpfm_event)) in evsel__fallback()
3239 free(evsel->name); in evsel__fallback()
3240 evsel->name = new_name; in evsel__fallback()
3244 evsel->core.attr.exclude_kernel = 1; in evsel__fallback()
3245 evsel->core.attr.exclude_hv = 1; in evsel__fallback()
3289 int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused, in arch_evsel__open_strerror()
3296 int evsel__open_strerror(struct evsel *evsel, struct target *target, in evsel__open_strerror() argument
3320 "No permission to enable %s event.\n\n", evsel__name(evsel)); in evsel__open_strerror()
3338 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); in evsel__open_strerror()
3346 if (evsel__has_callchain(evsel) && in evsel__open_strerror()
3359 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK) in evsel__open_strerror()
3362 evsel__name(evsel)); in evsel__open_strerror()
3363 if (evsel->core.attr.aux_output) in evsel__open_strerror()
3366 evsel__name(evsel)); in evsel__open_strerror()
3367 if (evsel->core.attr.sample_period != 0) in evsel__open_strerror()
3370 evsel__name(evsel)); in evsel__open_strerror()
3371 if (evsel->core.attr.precise_ip) in evsel__open_strerror()
3375 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) in evsel__open_strerror()
3387 …if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_s… in evsel__open_strerror()
3389 …if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_s… in evsel__open_strerror()
3391 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) in evsel__open_strerror()
3402 evsel__name(evsel)); in evsel__open_strerror()
3412 ret = arch_evsel__open_strerror(evsel, msg, size); in evsel__open_strerror()
3419 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); in evsel__open_strerror()
3422 struct perf_env *evsel__env(struct evsel *evsel) in evsel__env() argument
3424 if (evsel && evsel->evlist && evsel->evlist->env) in evsel__env()
3425 return evsel->evlist->env; in evsel__env()
3429 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) in store_evsel_ids() argument
3433 if (evsel__is_retire_lat(evsel)) in store_evsel_ids()
3436 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) { in store_evsel_ids()
3437 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); in store_evsel_ids()
3439 int fd = FD(evsel, cpu_map_idx, thread); in store_evsel_ids()
3441 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, in store_evsel_ids()
3450 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) in evsel__store_ids() argument
3452 struct perf_cpu_map *cpus = evsel->core.cpus; in evsel__store_ids()
3453 struct perf_thread_map *threads = evsel->core.threads; in evsel__store_ids()
3455 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr)) in evsel__store_ids()
3458 return store_evsel_ids(evsel, evlist); in evsel__store_ids()
3461 void evsel__zero_per_pkg(struct evsel *evsel) in evsel__zero_per_pkg() argument
3466 if (evsel->per_pkg_mask) { in evsel__zero_per_pkg()
3467 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) in evsel__zero_per_pkg()
3470 hashmap__clear(evsel->per_pkg_mask); in evsel__zero_per_pkg()
3479 bool evsel__is_hybrid(const struct evsel *evsel) in evsel__is_hybrid() argument
3484 return evsel->core.is_pmu_core; in evsel__is_hybrid()
3487 struct evsel *evsel__leader(const struct evsel *evsel) in evsel__leader() argument
3489 return container_of(evsel->core.leader, struct evsel, core); in evsel__leader()
3492 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) in evsel__has_leader() argument
3494 return evsel->core.leader == &leader->core; in evsel__has_leader()
3497 bool evsel__is_leader(struct evsel *evsel) in evsel__is_leader() argument
3499 return evsel__has_leader(evsel, evsel); in evsel__is_leader()
3502 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) in evsel__set_leader() argument
3504 evsel->core.leader = &leader->core; in evsel__set_leader()
3507 int evsel__source_count(const struct evsel *evsel) in evsel__source_count() argument
3509 struct evsel *pos; in evsel__source_count()
3512 evlist__for_each_entry(evsel->evlist, pos) { in evsel__source_count()
3513 if (pos->metric_leader == evsel) in evsel__source_count()
3519 bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused) in arch_evsel__must_be_in_group()
3529 void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader) in evsel__remove_from_group() argument
3531 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) { in evsel__remove_from_group()
3532 evsel__set_leader(evsel, evsel); in evsel__remove_from_group()
3533 evsel->core.nr_members = 0; in evsel__remove_from_group()