Lines Matching refs:core

73 #define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
74 #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
79 perf_evlist__init(&evlist->core); in evlist__init()
80 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__init()
116 if (evlist->core.nr_entries > 1) { in evlist__new_default()
168 list_del_init(&pos->core.node); in evlist__purge()
173 evlist->core.nr_entries = 0; in evlist__purge()
182 perf_evlist__exit(&evlist->core); in evlist__exit()
200 perf_evlist__add(&evlist->core, &entry->core); in evlist__add()
202 entry->tracking = !entry->core.idx; in evlist__add()
204 if (evlist->core.nr_entries == 1) in evlist__add()
211 perf_evlist__remove(&evlist->core, &evsel->core); in evlist__remove()
220 list_del_init(&evsel->core.node); in evlist__splice_list_tail()
228 list_del_init(&evsel->core.node); in evlist__splice_list_tail()
260 perf_evlist__set_leader(&evlist->core); in evlist__set_leader()
274 return evsel__new_idx(&attr, evlist->core.nr_entries); in evlist__dummy_event()
295 evsel->core.attr.exclude_kernel = 1; in evlist__add_aux_dummy()
296 evsel->core.attr.exclude_guest = 1; in evlist__add_aux_dummy()
297 evsel->core.attr.exclude_hv = 1; in evlist__add_aux_dummy()
298 evsel->core.system_wide = system_wide; in evlist__add_aux_dummy()
318 evsel->core.system_wide = system_wide; in evlist__add_sched_switch()
331 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) && in evlist__find_tracepoint_by_name()
360 .evlist_cpu_map_nr = perf_cpu_map__nr(evlist->core.all_cpus), in evlist__cpu_begin()
371 itr.cpu = perf_cpu_map__cpu(evlist->core.all_cpus, 0); in evlist__cpu_begin()
373 itr.cpu_map_idx = perf_cpu_map__idx(itr.evsel->core.cpus, itr.cpu); in evlist__cpu_begin()
390 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, in evlist_cpu_iterator__next()
399 perf_cpu_map__cpu(evlist_cpu_itr->container->core.all_cpus, in evlist_cpu_iterator__next()
404 perf_cpu_map__idx(evlist_cpu_itr->evsel->core.cpus, in evlist_cpu_iterator__next()
434 if (!evsel__is_group_leader(pos) || !pos->core.fd) in evlist__is_enabled()
451 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in __evlist__disable()
463 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__disable()
481 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__disable()
520 if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in __evlist__enable()
530 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__enable()
540 if (!evsel__is_group_leader(pos) || !pos->core.fd) in __evlist__enable()
577 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default); in evlist__add_pollfd()
582 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask); in evlist__filter_pollfd()
588 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, in evlist__add_wakeup_eventfd()
596 return perf_evlist__poll(&evlist->core, timeout); in evlist__poll()
606 head = &evlist->core.heads[hash]; in evlist__id2sid()
619 if (evlist->core.nr_entries == 1 || !id) in evlist__id2evsel()
624 return container_of(sid->evsel, struct evsel, core); in evlist__id2evsel()
641 return container_of(sid->evsel, struct evsel, core); in evlist__id2evsel_strict()
674 if (evlist->core.nr_entries == 1) in evlist__event2evsel()
677 if (!first->core.attr.sample_id_all && in evlist__event2evsel()
689 head = &evlist->core.heads[hash]; in evlist__event2evsel()
693 return container_of(sid->evsel, struct evsel, core); in evlist__event2evsel()
705 for (i = 0; i < evlist->core.nr_mmaps; i++) { in evlist__set_paused()
706 int fd = evlist->overwrite_mmap[i].core.fd; in evlist__set_paused()
733 for (i = 0; i < evlist->core.nr_mmaps; i++) in evlist__munmap_nofree()
734 perf_mmap__munmap(&evlist->mmap[i].core); in evlist__munmap_nofree()
737 for (i = 0; i < evlist->core.nr_mmaps; i++) in evlist__munmap_nofree()
738 perf_mmap__munmap(&evlist->overwrite_mmap[i].core); in evlist__munmap_nofree()
750 struct mmap *m = container_of(map, struct mmap, core); in perf_mmap__unmap_cb()
761 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); in evlist__alloc_mmap()
765 for (i = 0; i < evlist->core.nr_mmaps; i++) { in evlist__alloc_mmap()
766 struct perf_mmap *prev = i ? &map[i - 1].core : NULL; in evlist__alloc_mmap()
777 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb); in evlist__alloc_mmap()
789 struct evlist *evlist = container_of(_evlist, struct evlist, core); in perf_evlist__mmap_cb_idx()
790 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); in perf_evlist__mmap_cb_idx()
791 struct evsel *evsel = container_of(_evsel, struct evsel, core); in perf_evlist__mmap_cb_idx()
799 struct evlist *evlist = container_of(_evlist, struct evlist, core); in perf_evlist__mmap_cb_get()
818 return &maps[idx].core; in perf_evlist__mmap_cb_get()
825 struct mmap *map = container_of(_map, struct mmap, core); in perf_evlist__mmap_cb_mmap()
826 struct mmap_params *mp = container_of(_mp, struct mmap_params, core); in perf_evlist__mmap_cb_mmap()
974 evlist->core.mmap_len = evlist__mmap_size(pages); in evlist__mmap_ex()
975 pr_debug("mmap size %zuB\n", evlist->core.mmap_len); in evlist__mmap_ex()
977 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len, in evlist__mmap_ex()
980 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core); in evlist__mmap_ex()
1025 evlist->core.has_user_cpus = !!target->cpu_list; in evlist__create_maps()
1027 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__create_maps()
1052 err = perf_evsel__apply_filter(&evsel->core, evsel->filter); in evlist__apply_filters()
1083 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) in evlist__set_tp_filter()
1103 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) in evlist__append_tp_filter()
1167 if (evlist->core.nr_entries == 1) in evlist__valid_sample_type()
1190 evlist->combined_sample_type |= evsel->core.attr.sample_type; in __evlist__combined_sample_type()
1207 branch_type |= evsel->core.attr.branch_sample_type; in evlist__combined_branch_type()
1219 if ((pos->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) && in evlist__find_dup_event_from_prev()
1263 if (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) { in evlist__update_br_cntr()
1280 u64 read_format = first->core.attr.read_format; in evlist__valid_read_format()
1281 u64 sample_type = first->core.attr.sample_type; in evlist__valid_read_format()
1284 if (read_format != pos->core.attr.read_format) { in evlist__valid_read_format()
1286 read_format, (u64)pos->core.attr.read_format); in evlist__valid_read_format()
1303 return first->core.attr.sample_id_all ? evsel__id_hdr_size(first) : 0; in evlist__id_hdr_size()
1311 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all) in evlist__valid_sample_id_all()
1321 return first->core.attr.sample_id_all; in evlist__sample_id_all()
1339 if (!evlist->core.user_requested_cpus || in evlist__close()
1340 cpu_map__is_dummy(evlist->core.user_requested_cpus)) { in evlist__close()
1350 perf_evsel__close_cpu(&evlist_cpu_itr.evsel->core, in evlist__close()
1356 perf_evsel__free_fd(&evsel->core); in evlist__close()
1357 perf_evsel__free_id(&evsel->core); in evlist__close()
1359 perf_evlist__reset_id_hash(&evlist->core); in evlist__close()
1386 perf_evlist__set_maps(&evlist->core, cpus, threads); in evlist__create_syswide_maps()
1401 if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) { in evlist__open()
1410 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads); in evlist__open()
1512 if (evlist->core.threads == NULL) { in evlist__prepare_workload()
1517 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid); in evlist__prepare_workload()
1639 if (first->core.attr.sample_freq < (u64)max_freq) in evlist__strerror_open()
1646 emsg, max_freq, first->core.attr.sample_freq); in evlist__strerror_open()
1661 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0; in evlist__strerror_mmap()
1699 list_move_tail(&evsel->core.node, &move); in evlist__to_front()
1702 list_splice(&move, &evlist->core.entries); in evlist__to_front()
1744 perf_evlist__go_system_wide(&evlist->core, &evsel->core); in evlist__findnew_tracking_event()
1826 if (!evsel->core.attr.exclude_kernel) in evlist__exclude_kernel()
1856 leader->name, leader->core.nr_members); in evlist__reset_weak_group()
1867 perf_evsel__close(&c2->core); in evlist__reset_weak_group()
1883 if (leader->core.nr_members == 1) in evlist__reset_weak_group()
1884 leader->core.nr_members = 0; in evlist__reset_weak_group()
1980 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, in evlist__initialize_ctlfd()
2002 struct pollfd *entries = evlist->core.pollfd.entries; in evlist__finalize_ctlfd()
2190 struct pollfd *entries = evlist->core.pollfd.entries; in evlist__ctlfd_process()
2361 eet->pollfd_pos = perf_evlist__add_pollfd(&evlist->core, eet->timerfd, NULL, POLLIN, flags); in evlist__parse_event_enable_time()
2418 entries = eet->evlist->core.pollfd.entries; in event_enable_timer__process()
2467 if (evsel->core.idx == idx) in evlist__find_evsel()
2484 if (leader != new_leader && leader && leader->core.nr_members > 1) in evlist__format_evsels()
2494 if (leader != new_leader && new_leader->core.nr_members > 1) in evlist__format_evsels()
2501 if (leader && leader->core.nr_members > 1) in evlist__format_evsels()
2524 pos->core.nr_members = 0; in evlist__check_mem_load_aux()
2629 if (!evsel->core.attr.exclude_kernel) in evlist__needs_bpf_sb_event()