Lines Matching refs:evsel

62 static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)  in evsel__no_extra_init()
69 static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused) in evsel__no_extra_fini()
75 int (*init)(struct evsel *evsel);
76 void (*fini)(struct evsel *evsel);
78 .size = sizeof(struct evsel),
83 int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel), in evsel__object_config() argument
84 void (*fini)(struct evsel *evsel)) in evsel__object_config() argument
183 void evsel__calc_id_pos(struct evsel *evsel) in evsel__calc_id_pos() argument
185 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
186 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type); in evsel__calc_id_pos()
189 void __evsel__set_sample_bit(struct evsel *evsel, in __evsel__set_sample_bit() argument
192 if (!(evsel->core.attr.sample_type & bit)) { in __evsel__set_sample_bit()
193 evsel->core.attr.sample_type |= bit; in __evsel__set_sample_bit()
194 evsel->sample_size += sizeof(u64); in __evsel__set_sample_bit()
195 evsel__calc_id_pos(evsel); in __evsel__set_sample_bit()
199 void __evsel__reset_sample_bit(struct evsel *evsel, in __evsel__reset_sample_bit() argument
202 if (evsel->core.attr.sample_type & bit) { in __evsel__reset_sample_bit()
203 evsel->core.attr.sample_type &= ~bit; in __evsel__reset_sample_bit()
204 evsel->sample_size -= sizeof(u64); in __evsel__reset_sample_bit()
205 evsel__calc_id_pos(evsel); in __evsel__reset_sample_bit()
209 void evsel__set_sample_id(struct evsel *evsel, in evsel__set_sample_id() argument
213 evsel__reset_sample_bit(evsel, ID); in evsel__set_sample_id()
214 evsel__set_sample_bit(evsel, IDENTIFIER); in evsel__set_sample_id()
216 evsel__set_sample_bit(evsel, ID); in evsel__set_sample_id()
218 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__set_sample_id()
229 bool evsel__is_function_event(struct evsel *evsel) in evsel__is_function_event() argument
233 return evsel->name && in evsel__is_function_event()
234 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT)); in evsel__is_function_event()
239 void evsel__init(struct evsel *evsel, in evsel__init() argument
242 perf_evsel__init(&evsel->core, attr, idx); in evsel__init()
243 evsel->tracking = !idx; in evsel__init()
244 evsel->unit = strdup(""); in evsel__init()
245 evsel->scale = 1.0; in evsel__init()
246 evsel->max_events = ULONG_MAX; in evsel__init()
247 evsel->evlist = NULL; in evsel__init()
248 evsel->bpf_obj = NULL; in evsel__init()
249 evsel->bpf_fd = -1; in evsel__init()
250 INIT_LIST_HEAD(&evsel->config_terms); in evsel__init()
251 INIT_LIST_HEAD(&evsel->bpf_counter_list); in evsel__init()
252 perf_evsel__object.init(evsel); in evsel__init()
253 evsel->sample_size = __evsel__sample_size(attr->sample_type); in evsel__init()
254 evsel__calc_id_pos(evsel); in evsel__init()
255 evsel->cmdline_group_boundary = false; in evsel__init()
256 evsel->metric_expr = NULL; in evsel__init()
257 evsel->metric_name = NULL; in evsel__init()
258 evsel->metric_events = NULL; in evsel__init()
259 evsel->per_pkg_mask = NULL; in evsel__init()
260 evsel->collect_stat = false; in evsel__init()
261 evsel->pmu_name = NULL; in evsel__init()
264 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx) in evsel__new_idx()
266 struct evsel *evsel = zalloc(perf_evsel__object.size); in evsel__new_idx() local
268 if (!evsel) in evsel__new_idx()
270 evsel__init(evsel, attr, idx); in evsel__new_idx()
272 if (evsel__is_bpf_output(evsel)) { in evsel__new_idx()
273 evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | in evsel__new_idx()
275 evsel->core.attr.sample_period = 1; in evsel__new_idx()
278 if (evsel__is_clock(evsel)) { in evsel__new_idx()
279 free((char *)evsel->unit); in evsel__new_idx()
280 evsel->unit = strdup("msec"); in evsel__new_idx()
281 evsel->scale = 1e-6; in evsel__new_idx()
284 return evsel; in evsel__new_idx()
292 struct evsel *evsel__new_cycles(bool precise __maybe_unused, __u32 type, __u64 config) in evsel__new_cycles()
299 struct evsel *evsel; in evsel__new_cycles() local
307 evsel = evsel__new(&attr); in evsel__new_cycles()
308 if (evsel == NULL) in evsel__new_cycles()
311 arch_evsel__fixup_new_cycles(&evsel->core.attr); in evsel__new_cycles()
313 evsel->precise_max = true; in evsel__new_cycles()
316 if (asprintf(&evsel->name, "cycles%s%s%.*s", in evsel__new_cycles()
322 return evsel; in evsel__new_cycles()
324 evsel__delete(evsel); in evsel__new_cycles()
325 evsel = NULL; in evsel__new_cycles()
351 static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src) in evsel__copy_config_terms()
363 struct evsel *evsel__clone(struct evsel *orig) in evsel__clone()
365 struct evsel *evsel; in evsel__clone() local
376 evsel = evsel__new(&orig->core.attr); in evsel__clone()
377 if (evsel == NULL) in evsel__clone()
380 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus); in evsel__clone()
381 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus); in evsel__clone()
382 evsel->core.threads = perf_thread_map__get(orig->core.threads); in evsel__clone()
383 evsel->core.nr_members = orig->core.nr_members; in evsel__clone()
384 evsel->core.system_wide = orig->core.system_wide; in evsel__clone()
387 evsel->name = strdup(orig->name); in evsel__clone()
388 if (evsel->name == NULL) in evsel__clone()
392 evsel->group_name = strdup(orig->group_name); in evsel__clone()
393 if (evsel->group_name == NULL) in evsel__clone()
397 evsel->pmu_name = strdup(orig->pmu_name); in evsel__clone()
398 if (evsel->pmu_name == NULL) in evsel__clone()
402 evsel->filter = strdup(orig->filter); in evsel__clone()
403 if (evsel->filter == NULL) in evsel__clone()
407 evsel->metric_id = strdup(orig->metric_id); in evsel__clone()
408 if (evsel->metric_id == NULL) in evsel__clone()
411 evsel->cgrp = cgroup__get(orig->cgrp); in evsel__clone()
412 evsel->tp_format = orig->tp_format; in evsel__clone()
413 evsel->handler = orig->handler; in evsel__clone()
414 evsel->core.leader = orig->core.leader; in evsel__clone()
416 evsel->max_events = orig->max_events; in evsel__clone()
417 evsel->tool_event = orig->tool_event; in evsel__clone()
418 free((char *)evsel->unit); in evsel__clone()
419 evsel->unit = strdup(orig->unit); in evsel__clone()
420 if (evsel->unit == NULL) in evsel__clone()
423 evsel->scale = orig->scale; in evsel__clone()
424 evsel->snapshot = orig->snapshot; in evsel__clone()
425 evsel->per_pkg = orig->per_pkg; in evsel__clone()
426 evsel->percore = orig->percore; in evsel__clone()
427 evsel->precise_max = orig->precise_max; in evsel__clone()
428 evsel->use_uncore_alias = orig->use_uncore_alias; in evsel__clone()
429 evsel->is_libpfm_event = orig->is_libpfm_event; in evsel__clone()
431 evsel->exclude_GH = orig->exclude_GH; in evsel__clone()
432 evsel->sample_read = orig->sample_read; in evsel__clone()
433 evsel->auto_merge_stats = orig->auto_merge_stats; in evsel__clone()
434 evsel->collect_stat = orig->collect_stat; in evsel__clone()
435 evsel->weak_group = orig->weak_group; in evsel__clone()
436 evsel->use_config_name = orig->use_config_name; in evsel__clone()
438 if (evsel__copy_config_terms(evsel, orig) < 0) in evsel__clone()
441 return evsel; in evsel__clone()
444 evsel__delete(evsel); in evsel__clone()
451 struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx) in evsel__newtp_idx()
453 struct evsel *evsel = zalloc(perf_evsel__object.size); in evsel__newtp_idx() local
456 if (evsel == NULL) { in evsel__newtp_idx()
465 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) in evsel__newtp_idx()
468 evsel->tp_format = trace_event__tp_format(sys, name); in evsel__newtp_idx()
469 if (IS_ERR(evsel->tp_format)) { in evsel__newtp_idx()
470 err = PTR_ERR(evsel->tp_format); in evsel__newtp_idx()
475 attr.config = evsel->tp_format->id; in evsel__newtp_idx()
477 evsel__init(evsel, &attr, idx); in evsel__newtp_idx()
480 return evsel; in evsel__newtp_idx()
483 zfree(&evsel->name); in evsel__newtp_idx()
484 free(evsel); in evsel__newtp_idx()
532 static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size) in evsel__add_modifiers() argument
535 struct perf_event_attr *attr = &evsel->core.attr; in evsel__add_modifiers()
568 static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size) in evsel__hw_name() argument
570 int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config)); in evsel__hw_name()
571 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__hw_name()
594 static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size) in evsel__sw_name() argument
596 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config)); in evsel__sw_name()
597 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__sw_name()
618 static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size) in evsel__bp_name() argument
620 struct perf_event_attr *attr = &evsel->core.attr; in evsel__bp_name()
622 return r + evsel__add_modifiers(evsel, bf + r, size - r); in evsel__bp_name()
714 static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size) in evsel__hw_cache_name() argument
716 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size); in evsel__hw_cache_name()
717 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); in evsel__hw_cache_name()
720 static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size) in evsel__raw_name() argument
722 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config); in evsel__raw_name()
723 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret); in evsel__raw_name()
732 const char *evsel__name(struct evsel *evsel) in evsel__name() argument
736 if (!evsel) in evsel__name()
739 if (evsel->name) in evsel__name()
740 return evsel->name; in evsel__name()
742 switch (evsel->core.attr.type) { in evsel__name()
744 evsel__raw_name(evsel, bf, sizeof(bf)); in evsel__name()
748 evsel__hw_name(evsel, bf, sizeof(bf)); in evsel__name()
752 evsel__hw_cache_name(evsel, bf, sizeof(bf)); in evsel__name()
756 if (evsel->tool_event) in evsel__name()
759 evsel__sw_name(evsel, bf, sizeof(bf)); in evsel__name()
767 evsel__bp_name(evsel, bf, sizeof(bf)); in evsel__name()
772 evsel->core.attr.type); in evsel__name()
776 evsel->name = strdup(bf); in evsel__name()
778 if (evsel->name) in evsel__name()
779 return evsel->name; in evsel__name()
784 const char *evsel__metric_id(const struct evsel *evsel) in evsel__metric_id() argument
786 if (evsel->metric_id) in evsel__metric_id()
787 return evsel->metric_id; in evsel__metric_id()
789 if (evsel->core.attr.type == PERF_TYPE_SOFTWARE && evsel->tool_event) in evsel__metric_id()
795 const char *evsel__group_name(struct evsel *evsel) in evsel__group_name() argument
797 return evsel->group_name ?: "anon group"; in evsel__group_name()
810 int evsel__group_desc(struct evsel *evsel, char *buf, size_t size) in evsel__group_desc() argument
813 struct evsel *pos; in evsel__group_desc()
814 const char *group_name = evsel__group_name(evsel); in evsel__group_desc()
816 if (!evsel->forced_leader) in evsel__group_desc()
819 ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel)); in evsel__group_desc()
821 for_each_group_member(pos, evsel) in evsel__group_desc()
824 if (!evsel->forced_leader) in evsel__group_desc()
830 static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, in __evsel__config_callchain() argument
833 bool function = evsel__is_function_event(evsel); in __evsel__config_callchain()
834 struct perf_event_attr *attr = &evsel->core.attr; in __evsel__config_callchain()
836 evsel__set_sample_bit(evsel, CALLCHAIN); in __evsel__config_callchain()
851 evsel__set_sample_bit(evsel, BRANCH_STACK); in __evsel__config_callchain()
865 evsel__set_sample_bit(evsel, REGS_USER); in __evsel__config_callchain()
866 evsel__set_sample_bit(evsel, STACK_USER); in __evsel__config_callchain()
889 void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts, in evsel__config_callchain() argument
893 return __evsel__config_callchain(evsel, opts, param); in evsel__config_callchain()
896 static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param) in evsel__reset_callgraph() argument
898 struct perf_event_attr *attr = &evsel->core.attr; in evsel__reset_callgraph()
900 evsel__reset_sample_bit(evsel, CALLCHAIN); in evsel__reset_callgraph()
902 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__reset_callgraph()
908 evsel__reset_sample_bit(evsel, REGS_USER); in evsel__reset_callgraph()
909 evsel__reset_sample_bit(evsel, STACK_USER); in evsel__reset_callgraph()
913 static void evsel__apply_config_terms(struct evsel *evsel, in evsel__apply_config_terms() argument
917 struct list_head *config_terms = &evsel->config_terms; in evsel__apply_config_terms()
918 struct perf_event_attr *attr = &evsel->core.attr; in evsel__apply_config_terms()
933 evsel__reset_sample_bit(evsel, PERIOD); in evsel__apply_config_terms()
940 evsel__set_sample_bit(evsel, PERIOD); in evsel__apply_config_terms()
945 evsel__set_sample_bit(evsel, TIME); in evsel__apply_config_terms()
947 evsel__reset_sample_bit(evsel, TIME); in evsel__apply_config_terms()
954 evsel__set_sample_bit(evsel, BRANCH_STACK); in evsel__apply_config_terms()
958 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__apply_config_terms()
967 evsel->max_events = term->val.max_events; in evsel__apply_config_terms()
1018 evsel->name); in evsel__apply_config_terms()
1032 evsel__reset_callgraph(evsel, &callchain_param); in evsel__apply_config_terms()
1037 evsel__set_sample_bit(evsel, ADDR); in evsel__apply_config_terms()
1038 evsel__set_sample_bit(evsel, DATA_SRC); in evsel__apply_config_terms()
1039 evsel->core.attr.mmap_data = track; in evsel__apply_config_terms()
1041 evsel__config_callchain(evsel, opts, &param); in evsel__apply_config_terms()
1046 struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type) in __evsel__get_config_term() argument
1050 list_for_each_entry(term, &evsel->config_terms, list) { in __evsel__get_config_term()
1058 void __weak arch_evsel__set_sample_weight(struct evsel *evsel) in arch_evsel__set_sample_weight() argument
1060 evsel__set_sample_bit(evsel, WEIGHT); in arch_evsel__set_sample_weight()
1095 void evsel__config(struct evsel *evsel, struct record_opts *opts, in evsel__config() argument
1098 struct evsel *leader = evsel__leader(evsel); in evsel__config()
1099 struct perf_event_attr *attr = &evsel->core.attr; in evsel__config()
1100 int track = evsel->tracking; in evsel__config()
1107 evsel__set_sample_bit(evsel, IP); in evsel__config()
1108 evsel__set_sample_bit(evsel, TID); in evsel__config()
1110 if (evsel->sample_read) { in evsel__config()
1111 evsel__set_sample_bit(evsel, READ); in evsel__config()
1117 evsel__set_sample_id(evsel, false); in evsel__config()
1146 evsel__set_sample_bit(evsel, PERIOD); in evsel__config()
1152 evsel->core.attr.read_format |= in evsel__config()
1160 evsel__set_sample_bit(evsel, ADDR); in evsel__config()
1169 if (evsel__is_function_event(evsel)) in evsel__config()
1170 evsel->core.attr.exclude_callchain_user = 1; in evsel__config()
1172 if (callchain && callchain->enabled && !evsel->no_aux_samples) in evsel__config()
1173 evsel__config_callchain(evsel, opts, callchain); in evsel__config()
1175 if (opts->sample_intr_regs && !evsel->no_aux_samples && in evsel__config()
1176 !evsel__is_dummy_event(evsel)) { in evsel__config()
1178 evsel__set_sample_bit(evsel, REGS_INTR); in evsel__config()
1181 if (opts->sample_user_regs && !evsel->no_aux_samples && in evsel__config()
1182 !evsel__is_dummy_event(evsel)) { in evsel__config()
1184 evsel__set_sample_bit(evsel, REGS_USER); in evsel__config()
1188 evsel__set_sample_bit(evsel, CPU); in evsel__config()
1197 evsel__set_sample_bit(evsel, TIME); in evsel__config()
1199 if (opts->raw_samples && !evsel->no_aux_samples) { in evsel__config()
1200 evsel__set_sample_bit(evsel, TIME); in evsel__config()
1201 evsel__set_sample_bit(evsel, RAW); in evsel__config()
1202 evsel__set_sample_bit(evsel, CPU); in evsel__config()
1206 evsel__set_sample_bit(evsel, DATA_SRC); in evsel__config()
1209 evsel__set_sample_bit(evsel, PHYS_ADDR); in evsel__config()
1215 if (opts->branch_stack && !evsel->no_aux_samples) { in evsel__config()
1216 evsel__set_sample_bit(evsel, BRANCH_STACK); in evsel__config()
1221 arch_evsel__set_sample_weight(evsel); in evsel__config()
1242 evsel__set_sample_bit(evsel, CGROUP); in evsel__config()
1246 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE); in evsel__config()
1249 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE); in evsel__config()
1255 evsel__set_sample_bit(evsel, TRANSACTION); in evsel__config()
1258 evsel->core.attr.read_format |= in evsel__config()
1269 if (evsel__is_group_leader(evsel)) in evsel__config()
1276 if (target__none(&opts->target) && evsel__is_group_leader(evsel) && in evsel__config()
1280 if (evsel->immediate) { in evsel__config()
1291 if (evsel->precise_max) in evsel__config()
1304 if (evsel->core.own_cpus || evsel->unit) in evsel__config()
1305 evsel->core.attr.read_format |= PERF_FORMAT_ID; in evsel__config()
1311 evsel__apply_config_terms(evsel, opts, track); in evsel__config()
1313 evsel->ignore_missing_thread = opts->ignore_missing_thread; in evsel__config()
1318 evsel__set_sample_bit(evsel, PERIOD); in evsel__config()
1320 evsel__reset_sample_bit(evsel, PERIOD); in evsel__config()
1331 if (evsel__is_dummy_event(evsel)) in evsel__config()
1332 evsel__reset_sample_bit(evsel, BRANCH_STACK); in evsel__config()
1335 int evsel__set_filter(struct evsel *evsel, const char *filter) in evsel__set_filter() argument
1340 free(evsel->filter); in evsel__set_filter()
1341 evsel->filter = new_filter; in evsel__set_filter()
1348 static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter) in evsel__append_filter() argument
1352 if (evsel->filter == NULL) in evsel__append_filter()
1353 return evsel__set_filter(evsel, filter); in evsel__append_filter()
1355 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) { in evsel__append_filter()
1356 free(evsel->filter); in evsel__append_filter()
1357 evsel->filter = new_filter; in evsel__append_filter()
1364 int evsel__append_tp_filter(struct evsel *evsel, const char *filter) in evsel__append_tp_filter() argument
1366 return evsel__append_filter(evsel, "(%s) && (%s)", filter); in evsel__append_tp_filter()
1369 int evsel__append_addr_filter(struct evsel *evsel, const char *filter) in evsel__append_addr_filter() argument
1371 return evsel__append_filter(evsel, "%s,%s", filter); in evsel__append_addr_filter()
1375 int evsel__enable_cpu(struct evsel *evsel, int cpu) in evsel__enable_cpu() argument
1377 return perf_evsel__enable_cpu(&evsel->core, cpu); in evsel__enable_cpu()
1380 int evsel__enable(struct evsel *evsel) in evsel__enable() argument
1382 int err = perf_evsel__enable(&evsel->core); in evsel__enable()
1385 evsel->disabled = false; in evsel__enable()
1390 int evsel__disable_cpu(struct evsel *evsel, int cpu) in evsel__disable_cpu() argument
1392 return perf_evsel__disable_cpu(&evsel->core, cpu); in evsel__disable_cpu()
1395 int evsel__disable(struct evsel *evsel) in evsel__disable() argument
1397 int err = perf_evsel__disable(&evsel->core); in evsel__disable()
1405 evsel->disabled = true; in evsel__disable()
1422 static void evsel__free_config_terms(struct evsel *evsel) in evsel__free_config_terms() argument
1424 free_config_terms(&evsel->config_terms); in evsel__free_config_terms()
1427 void evsel__exit(struct evsel *evsel) in evsel__exit() argument
1429 assert(list_empty(&evsel->core.node)); in evsel__exit()
1430 assert(evsel->evlist == NULL); in evsel__exit()
1431 bpf_counter__destroy(evsel); in evsel__exit()
1432 evsel__free_counts(evsel); in evsel__exit()
1433 perf_evsel__free_fd(&evsel->core); in evsel__exit()
1434 perf_evsel__free_id(&evsel->core); in evsel__exit()
1435 evsel__free_config_terms(evsel); in evsel__exit()
1436 cgroup__put(evsel->cgrp); in evsel__exit()
1437 perf_cpu_map__put(evsel->core.cpus); in evsel__exit()
1438 perf_cpu_map__put(evsel->core.own_cpus); in evsel__exit()
1439 perf_thread_map__put(evsel->core.threads); in evsel__exit()
1440 zfree(&evsel->group_name); in evsel__exit()
1441 zfree(&evsel->name); in evsel__exit()
1442 zfree(&evsel->pmu_name); in evsel__exit()
1443 zfree(&evsel->unit); in evsel__exit()
1444 zfree(&evsel->metric_id); in evsel__exit()
1445 evsel__zero_per_pkg(evsel); in evsel__exit()
1446 hashmap__free(evsel->per_pkg_mask); in evsel__exit()
1447 evsel->per_pkg_mask = NULL; in evsel__exit()
1448 zfree(&evsel->metric_events); in evsel__exit()
1449 perf_evsel__object.fini(evsel); in evsel__exit()
1452 void evsel__delete(struct evsel *evsel) in evsel__delete() argument
1454 evsel__exit(evsel); in evsel__delete()
1455 free(evsel); in evsel__delete()
1458 void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread, in evsel__compute_deltas() argument
1463 if (!evsel->prev_raw_counts) in evsel__compute_deltas()
1467 tmp = evsel->prev_raw_counts->aggr; in evsel__compute_deltas()
1468 evsel->prev_raw_counts->aggr = *count; in evsel__compute_deltas()
1470 tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread); in evsel__compute_deltas()
1471 *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count; in evsel__compute_deltas()
1498 static int evsel__read_one(struct evsel *evsel, int cpu, int thread) in evsel__read_one() argument
1500 struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread); in evsel__read_one()
1502 return perf_evsel__read(&evsel->core, cpu, thread, count); in evsel__read_one()
1505 static void evsel__set_count(struct evsel *counter, int cpu, int thread, u64 val, u64 ena, u64 run) in evsel__set_count()
1518 static int evsel__process_group_data(struct evsel *leader, int cpu, int thread, u64 *data) in evsel__process_group_data()
1540 struct evsel *counter; in evsel__process_group_data()
1552 static int evsel__read_group(struct evsel *leader, int cpu, int thread) in evsel__read_group()
1582 int evsel__read_counter(struct evsel *evsel, int cpu, int thread) in evsel__read_counter() argument
1584 u64 read_format = evsel->core.attr.read_format; in evsel__read_counter()
1587 return evsel__read_group(evsel, cpu, thread); in evsel__read_counter()
1589 return evsel__read_one(evsel, cpu, thread); in evsel__read_counter()
1592 int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale) in __evsel__read_on_cpu() argument
1597 if (FD(evsel, cpu, thread) < 0) in __evsel__read_on_cpu()
1600 if (evsel->counts == NULL && evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0) in __evsel__read_on_cpu()
1603 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0) in __evsel__read_on_cpu()
1606 evsel__compute_deltas(evsel, cpu, thread, &count); in __evsel__read_on_cpu()
1608 *perf_counts(evsel->counts, cpu, thread) = count; in __evsel__read_on_cpu()
1612 static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other, in evsel__match_other_cpu() argument
1617 cpuid = perf_cpu_map__cpu(evsel->core.cpus, cpu); in evsel__match_other_cpu()
1621 static int evsel__hybrid_group_cpu(struct evsel *evsel, int cpu) in evsel__hybrid_group_cpu() argument
1623 struct evsel *leader = evsel__leader(evsel); in evsel__hybrid_group_cpu()
1625 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) || in evsel__hybrid_group_cpu()
1626 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) { in evsel__hybrid_group_cpu()
1627 return evsel__match_other_cpu(evsel, leader, cpu); in evsel__hybrid_group_cpu()
1633 static int get_group_fd(struct evsel *evsel, int cpu, int thread) in get_group_fd() argument
1635 struct evsel *leader = evsel__leader(evsel); in get_group_fd()
1638 if (evsel__is_group_leader(evsel)) in get_group_fd()
1647 cpu = evsel__hybrid_group_cpu(evsel, cpu); in get_group_fd()
1657 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx) in evsel__remove_fd()
1664 static int update_fds(struct evsel *evsel, in update_fds() argument
1668 struct evsel *pos; in update_fds()
1673 evlist__for_each_entry(evsel->evlist, pos) { in update_fds()
1674 nr_cpus = pos != evsel ? nr_cpus : cpu_idx; in update_fds()
1682 if (pos == evsel) in update_fds()
1688 bool evsel__ignore_missing_thread(struct evsel *evsel, in evsel__ignore_missing_thread() argument
1695 if (!evsel->ignore_missing_thread) in evsel__ignore_missing_thread()
1699 if (evsel->core.system_wide) in evsel__ignore_missing_thread()
1714 if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread)) in evsel__ignore_missing_thread()
1741 bool evsel__precise_ip_fallback(struct evsel *evsel) in evsel__precise_ip_fallback() argument
1744 if (!evsel->precise_max) in evsel__precise_ip_fallback()
1751 if (!evsel->core.attr.precise_ip) { in evsel__precise_ip_fallback()
1752 evsel->core.attr.precise_ip = evsel->precise_ip_original; in evsel__precise_ip_fallback()
1756 if (!evsel->precise_ip_original) in evsel__precise_ip_fallback()
1757 evsel->precise_ip_original = evsel->core.attr.precise_ip; in evsel__precise_ip_fallback()
1759 evsel->core.attr.precise_ip--; in evsel__precise_ip_fallback()
1760 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip); in evsel__precise_ip_fallback()
1761 display_attr(&evsel->core.attr); in evsel__precise_ip_fallback()
1768 static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, in __evsel__prepare_open() argument
1773 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) || in __evsel__prepare_open()
1774 (perf_missing_features.aux_output && evsel->core.attr.aux_output)) in __evsel__prepare_open()
1797 if (evsel->core.system_wide) in __evsel__prepare_open()
1802 if (evsel->core.fd == NULL && in __evsel__prepare_open()
1803 perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0) in __evsel__prepare_open()
1806 evsel->open_flags = PERF_FLAG_FD_CLOEXEC; in __evsel__prepare_open()
1807 if (evsel->cgrp) in __evsel__prepare_open()
1808 evsel->open_flags |= PERF_FLAG_PID_CGROUP; in __evsel__prepare_open()
1813 static void evsel__disable_missing_features(struct evsel *evsel) in evsel__disable_missing_features() argument
1816 evsel__set_sample_bit(evsel, WEIGHT); in evsel__disable_missing_features()
1817 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT); in evsel__disable_missing_features()
1820 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */ in evsel__disable_missing_features()
1822 evsel->core.attr.use_clockid = 0; in evsel__disable_missing_features()
1823 evsel->core.attr.clockid = 0; in evsel__disable_missing_features()
1826 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC; in evsel__disable_missing_features()
1828 evsel->core.attr.mmap2 = 0; in evsel__disable_missing_features()
1829 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest) in evsel__disable_missing_features()
1830 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0; in evsel__disable_missing_features()
1832 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS | in evsel__disable_missing_features()
1834 if (perf_missing_features.group_read && evsel->core.attr.inherit) in evsel__disable_missing_features()
1835 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID); in evsel__disable_missing_features()
1837 evsel->core.attr.ksymbol = 0; in evsel__disable_missing_features()
1839 evsel->core.attr.bpf_event = 0; in evsel__disable_missing_features()
1841 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX; in evsel__disable_missing_features()
1843 evsel->core.attr.sample_id_all = 0; in evsel__disable_missing_features()
1846 int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__prepare_open() argument
1851 err = __evsel__prepare_open(evsel, cpus, threads); in evsel__prepare_open()
1855 evsel__disable_missing_features(evsel); in evsel__prepare_open()
1860 bool evsel__detect_missing_features(struct evsel *evsel) in evsel__detect_missing_features() argument
1867 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) { in evsel__detect_missing_features()
1872 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) { in evsel__detect_missing_features()
1877 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) { in evsel__detect_missing_features()
1881 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) { in evsel__detect_missing_features()
1886 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) { in evsel__detect_missing_features()
1890 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) { in evsel__detect_missing_features()
1894 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) { in evsel__detect_missing_features()
1898 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) { in evsel__detect_missing_features()
1902 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) { in evsel__detect_missing_features()
1906 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) { in evsel__detect_missing_features()
1910 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) { in evsel__detect_missing_features()
1914 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) { in evsel__detect_missing_features()
1918 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) { in evsel__detect_missing_features()
1922 } else if ((evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) && in evsel__detect_missing_features()
1923 (evsel->pmu == NULL || evsel->pmu->missing_features.exclude_guest)) { in evsel__detect_missing_features()
1924 if (evsel->pmu == NULL) { in evsel__detect_missing_features()
1925 evsel->pmu = evsel__find_pmu(evsel); in evsel__detect_missing_features()
1926 if (evsel->pmu) in evsel__detect_missing_features()
1927 evsel->pmu->missing_features.exclude_guest = true; in evsel__detect_missing_features()
1930 evsel->core.attr.exclude_host = false; in evsel__detect_missing_features()
1931 evsel->core.attr.exclude_guest = false; in evsel__detect_missing_features()
1935 if (evsel->exclude_GH) { in evsel__detect_missing_features()
1949 (evsel->core.attr.branch_sample_type & in evsel__detect_missing_features()
1956 evsel->core.attr.inherit && in evsel__detect_missing_features()
1957 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) && in evsel__detect_missing_features()
1958 evsel__is_group_leader(evsel)) { in evsel__detect_missing_features()
1994 static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__open_cpu() argument
2002 err = __evsel__prepare_open(evsel, cpus, threads); in evsel__open_cpu()
2012 if (evsel->core.system_wide) in evsel__open_cpu()
2017 if (evsel->cgrp) in evsel__open_cpu()
2018 pid = evsel->cgrp->fd; in evsel__open_cpu()
2021 evsel__disable_missing_features(evsel); in evsel__open_cpu()
2023 display_attr(&evsel->core.attr); in evsel__open_cpu()
2033 if (!evsel->cgrp && !evsel->core.system_wide) in evsel__open_cpu()
2036 group_fd = get_group_fd(evsel, cpu, thread); in evsel__open_cpu()
2041 pid, cpus->map[cpu], group_fd, evsel->open_flags); in evsel__open_cpu()
2043 fd = sys_perf_event_open(&evsel->core.attr, pid, cpus->map[cpu], in evsel__open_cpu()
2044 group_fd, evsel->open_flags); in evsel__open_cpu()
2046 FD(evsel, cpu, thread) = fd; in evsel__open_cpu()
2056 bpf_counter__install_pe(evsel, cpu, fd); in evsel__open_cpu()
2059 test_attr__open(&evsel->core.attr, pid, cpus->map[cpu], in evsel__open_cpu()
2060 fd, group_fd, evsel->open_flags); in evsel__open_cpu()
2065 if (evsel->bpf_fd >= 0) { in evsel__open_cpu()
2067 int bpf_fd = evsel->bpf_fd; in evsel__open_cpu()
2097 if (evsel__precise_ip_fallback(evsel)) in evsel__open_cpu()
2100 if (evsel__ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) { in evsel__open_cpu()
2118 if (evsel__detect_missing_features(evsel)) in evsel__open_cpu()
2127 if (FD(evsel, cpu, thread) >= 0) in evsel__open_cpu()
2128 close(FD(evsel, cpu, thread)); in evsel__open_cpu()
2129 FD(evsel, cpu, thread) = -1; in evsel__open_cpu()
2137 int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus, in evsel__open() argument
2140 return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1); in evsel__open()
2143 void evsel__close(struct evsel *evsel) in evsel__close() argument
2145 perf_evsel__close(&evsel->core); in evsel__close()
2146 perf_evsel__free_id(&evsel->core); in evsel__close()
2149 int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu) in evsel__open_per_cpu() argument
2152 return evsel__open_cpu(evsel, cpus, NULL, 0, in evsel__open_per_cpu()
2155 return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1); in evsel__open_per_cpu()
2158 int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads) in evsel__open_per_thread() argument
2160 return evsel__open(evsel, NULL, threads); in evsel__open_per_thread()
2163 static int perf_evsel__parse_id_sample(const struct evsel *evsel, in perf_evsel__parse_id_sample() argument
2167 u64 type = evsel->core.attr.sample_type; in perf_evsel__parse_id_sample()
2169 bool swapped = evsel->needs_swap; in perf_evsel__parse_id_sample()
2308 int evsel__parse_sample(struct evsel *evsel, union perf_event *event, in evsel__parse_sample() argument
2311 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample()
2312 bool swapped = evsel->needs_swap; in evsel__parse_sample()
2327 data->period = evsel->core.attr.sample_period; in evsel__parse_sample()
2334 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample()
2336 return perf_evsel__parse_id_sample(evsel, event, data); in evsel__parse_sample()
2341 if (perf_event__check_size(event, evsel->sample_size)) in evsel__parse_sample()
2407 u64 read_format = evsel->core.attr.read_format; in evsel__parse_sample()
2505 if (evsel__has_branch_hw_idx(evsel)) { in evsel__parse_sample()
2542 u64 mask = evsel->core.attr.sample_regs_user; in evsel__parse_sample()
2598 u64 mask = evsel->core.attr.sample_regs_intr; in evsel__parse_sample()
2648 int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event, in evsel__parse_sample_timestamp() argument
2651 u64 type = evsel->core.attr.sample_type; in evsel__parse_sample_timestamp()
2662 if (!evsel->core.attr.sample_id_all) in evsel__parse_sample_timestamp()
2664 if (perf_evsel__parse_id_sample(evsel, event, &data)) in evsel__parse_sample_timestamp()
2673 if (perf_event__check_size(event, evsel->sample_size)) in evsel__parse_sample_timestamp()
2691 struct tep_format_field *evsel__field(struct evsel *evsel, const char *name) in evsel__field() argument
2693 return tep_find_field(evsel->tp_format, name); in evsel__field()
2696 void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__rawptr() argument
2698 struct tep_format_field *field = evsel__field(evsel, name); in evsel__rawptr()
2753 u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name) in evsel__intval() argument
2755 struct tep_format_field *field = evsel__field(evsel, name); in evsel__intval()
2760 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0; in evsel__intval()
2763 bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) in evsel__fallback() argument
2768 evsel->core.attr.type == PERF_TYPE_HARDWARE && in evsel__fallback()
2769 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { in evsel__fallback()
2781 evsel->core.attr.type = PERF_TYPE_SOFTWARE; in evsel__fallback()
2782 evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; in evsel__fallback()
2784 zfree(&evsel->name); in evsel__fallback()
2786 } else if (err == EACCES && !evsel->core.attr.exclude_kernel && in evsel__fallback()
2788 const char *name = evsel__name(evsel); in evsel__fallback()
2793 if (evsel->core.attr.exclude_user) in evsel__fallback()
2798 (strchr(name, ':') && !evsel->is_libpfm_event)) in evsel__fallback()
2804 if (evsel->name) in evsel__fallback()
2805 free(evsel->name); in evsel__fallback()
2806 evsel->name = new_name; in evsel__fallback()
2810 evsel->core.attr.exclude_kernel = 1; in evsel__fallback()
2811 evsel->core.attr.exclude_hv = 1; in evsel__fallback()
2855 int evsel__open_strerror(struct evsel *evsel, struct target *target, in evsel__open_strerror() argument
2878 "No permission to enable %s event.\n\n", evsel__name(evsel)); in evsel__open_strerror()
2896 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel)); in evsel__open_strerror()
2904 if (evsel__has_callchain(evsel) && in evsel__open_strerror()
2917 if (evsel->core.attr.aux_output) in evsel__open_strerror()
2920 evsel__name(evsel)); in evsel__open_strerror()
2921 if (evsel->core.attr.sample_period != 0) in evsel__open_strerror()
2924 evsel__name(evsel)); in evsel__open_strerror()
2925 if (evsel->core.attr.precise_ip) in evsel__open_strerror()
2929 if (evsel->core.attr.type == PERF_TYPE_HARDWARE) in evsel__open_strerror()
2941 …if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_s… in evsel__open_strerror()
2943 …if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_s… in evsel__open_strerror()
2945 if (evsel->core.attr.write_backward && perf_missing_features.write_backward) in evsel__open_strerror()
2964 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel)); in evsel__open_strerror()
2967 struct perf_env *evsel__env(struct evsel *evsel) in evsel__env() argument
2969 if (evsel && evsel->evlist) in evsel__env()
2970 return evsel->evlist->env; in evsel__env()
2974 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist) in store_evsel_ids() argument
2978 for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) { in store_evsel_ids()
2979 for (thread = 0; thread < xyarray__max_y(evsel->core.fd); in store_evsel_ids()
2981 int fd = FD(evsel, cpu, thread); in store_evsel_ids()
2983 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core, in store_evsel_ids()
2992 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist) in evsel__store_ids() argument
2994 struct perf_cpu_map *cpus = evsel->core.cpus; in evsel__store_ids()
2995 struct perf_thread_map *threads = evsel->core.threads; in evsel__store_ids()
2997 if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr)) in evsel__store_ids()
3000 return store_evsel_ids(evsel, evlist); in evsel__store_ids()
3003 void evsel__zero_per_pkg(struct evsel *evsel) in evsel__zero_per_pkg() argument
3008 if (evsel->per_pkg_mask) { in evsel__zero_per_pkg()
3009 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) in evsel__zero_per_pkg()
3012 hashmap__clear(evsel->per_pkg_mask); in evsel__zero_per_pkg()
3016 bool evsel__is_hybrid(struct evsel *evsel) in evsel__is_hybrid() argument
3018 return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name); in evsel__is_hybrid()
3021 struct evsel *evsel__leader(struct evsel *evsel) in evsel__leader() argument
3023 return container_of(evsel->core.leader, struct evsel, core); in evsel__leader()
3026 bool evsel__has_leader(struct evsel *evsel, struct evsel *leader) in evsel__has_leader() argument
3028 return evsel->core.leader == &leader->core; in evsel__has_leader()
3031 bool evsel__is_leader(struct evsel *evsel) in evsel__is_leader() argument
3033 return evsel__has_leader(evsel, evsel); in evsel__is_leader()
3036 void evsel__set_leader(struct evsel *evsel, struct evsel *leader) in evsel__set_leader() argument
3038 evsel->core.leader = &leader->core; in evsel__set_leader()
3041 int evsel__source_count(const struct evsel *evsel) in evsel__source_count() argument
3043 struct evsel *pos; in evsel__source_count()
3046 evlist__for_each_entry(evsel->evlist, pos) { in evsel__source_count()
3047 if (pos->metric_leader == evsel) in evsel__source_count()