Lines Matching refs:evsel
23 void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, in perf_evsel__init() argument
26 INIT_LIST_HEAD(&evsel->node); in perf_evsel__init()
27 INIT_LIST_HEAD(&evsel->per_stream_periods); in perf_evsel__init()
28 evsel->attr = *attr; in perf_evsel__init()
29 evsel->idx = idx; in perf_evsel__init()
30 evsel->leader = evsel; in perf_evsel__init()
35 struct perf_evsel *evsel = zalloc(sizeof(*evsel)); in perf_evsel__new() local
37 if (evsel != NULL) in perf_evsel__new()
38 perf_evsel__init(evsel, attr, 0); in perf_evsel__new()
40 return evsel; in perf_evsel__new()
43 void perf_evsel__exit(struct perf_evsel *evsel) in perf_evsel__exit() argument
45 assert(evsel->fd == NULL); /* If not fds were not closed. */ in perf_evsel__exit()
46 assert(evsel->mmap == NULL); /* If not munmap wasn't called. */ in perf_evsel__exit()
47 assert(evsel->sample_id == NULL); /* If not free_id wasn't called. */ in perf_evsel__exit()
48 perf_cpu_map__put(evsel->cpus); in perf_evsel__exit()
49 perf_cpu_map__put(evsel->pmu_cpus); in perf_evsel__exit()
50 perf_thread_map__put(evsel->threads); in perf_evsel__exit()
53 void perf_evsel__delete(struct perf_evsel *evsel) in perf_evsel__delete() argument
55 perf_evsel__exit(evsel); in perf_evsel__delete()
56 free(evsel); in perf_evsel__delete()
65 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_fd() argument
67 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); in perf_evsel__alloc_fd()
69 if (evsel->fd) { in perf_evsel__alloc_fd()
74 int *fd = FD(evsel, idx, thread); in perf_evsel__alloc_fd()
82 return evsel->fd != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_fd()
85 static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_mmap() argument
87 evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap)); in perf_evsel__alloc_mmap()
89 return evsel->mmap != NULL ? 0 : -ENOMEM; in perf_evsel__alloc_mmap()
100 static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd) in get_group_fd() argument
102 struct perf_evsel *leader = evsel->leader; in get_group_fd()
105 if (evsel == leader) { in get_group_fd()
126 int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, in perf_evsel__open() argument
156 if (evsel->fd == NULL && in perf_evsel__open()
157 perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0) in perf_evsel__open()
164 evsel_fd = FD(evsel, idx, thread); in perf_evsel__open()
170 err = get_group_fd(evsel, idx, thread, &group_fd); in perf_evsel__open()
174 fd = sys_perf_event_open(&evsel->attr, in perf_evsel__open()
188 perf_evsel__close(evsel); in perf_evsel__open()
193 static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx) in perf_evsel__close_fd_cpu() argument
197 for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) { in perf_evsel__close_fd_cpu()
198 int *fd = FD(evsel, cpu_map_idx, thread); in perf_evsel__close_fd_cpu()
207 void perf_evsel__close_fd(struct perf_evsel *evsel) in perf_evsel__close_fd() argument
209 for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++) in perf_evsel__close_fd()
210 perf_evsel__close_fd_cpu(evsel, idx); in perf_evsel__close_fd()
213 void perf_evsel__free_fd(struct perf_evsel *evsel) in perf_evsel__free_fd() argument
215 xyarray__delete(evsel->fd); in perf_evsel__free_fd()
216 evsel->fd = NULL; in perf_evsel__free_fd()
219 void perf_evsel__close(struct perf_evsel *evsel) in perf_evsel__close() argument
221 if (evsel->fd == NULL) in perf_evsel__close()
224 perf_evsel__close_fd(evsel); in perf_evsel__close()
225 perf_evsel__free_fd(evsel); in perf_evsel__close()
228 void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx) in perf_evsel__close_cpu() argument
230 if (evsel->fd == NULL) in perf_evsel__close_cpu()
233 perf_evsel__close_fd_cpu(evsel, cpu_map_idx); in perf_evsel__close_cpu()
236 void perf_evsel__munmap(struct perf_evsel *evsel) in perf_evsel__munmap() argument
240 if (evsel->fd == NULL || evsel->mmap == NULL) in perf_evsel__munmap()
243 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__munmap()
244 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__munmap()
245 int *fd = FD(evsel, idx, thread); in perf_evsel__munmap()
250 perf_mmap__munmap(MMAP(evsel, idx, thread)); in perf_evsel__munmap()
254 xyarray__delete(evsel->mmap); in perf_evsel__munmap()
255 evsel->mmap = NULL; in perf_evsel__munmap()
258 int perf_evsel__mmap(struct perf_evsel *evsel, int pages) in perf_evsel__mmap() argument
266 if (evsel->fd == NULL || evsel->mmap) in perf_evsel__mmap()
269 if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0) in perf_evsel__mmap()
272 for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) { in perf_evsel__mmap()
273 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__mmap()
274 int *fd = FD(evsel, idx, thread); in perf_evsel__mmap()
276 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx); in perf_evsel__mmap()
281 map = MMAP(evsel, idx, thread); in perf_evsel__mmap()
286 perf_evsel__munmap(evsel); in perf_evsel__mmap()
295 void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread) in perf_evsel__mmap_base() argument
297 int *fd = FD(evsel, cpu_map_idx, thread); in perf_evsel__mmap_base()
299 if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL) in perf_evsel__mmap_base()
302 return MMAP(evsel, cpu_map_idx, thread)->base; in perf_evsel__mmap_base()
305 int perf_evsel__read_size(struct perf_evsel *evsel) in perf_evsel__read_size() argument
307 u64 read_format = evsel->attr.read_format; in perf_evsel__read_size()
325 nr = evsel->nr_members; in perf_evsel__read_size()
334 static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx, in perf_evsel__read_group() argument
337 size_t size = perf_evsel__read_size(evsel); in perf_evsel__read_group()
338 int *fd = FD(evsel, cpu_map_idx, thread); in perf_evsel__read_group()
339 u64 read_format = evsel->attr.read_format; in perf_evsel__read_group()
379 static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf, in perf_evsel__adjust_values() argument
382 u64 read_format = evsel->attr.read_format; in perf_evsel__adjust_values()
400 int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread, in perf_evsel__read() argument
403 size_t size = perf_evsel__read_size(evsel); in perf_evsel__read()
404 int *fd = FD(evsel, cpu_map_idx, thread); in perf_evsel__read()
405 u64 read_format = evsel->attr.read_format; in perf_evsel__read()
414 return perf_evsel__read_group(evsel, cpu_map_idx, thread, count); in perf_evsel__read()
416 if (MMAP(evsel, cpu_map_idx, thread) && in perf_evsel__read()
418 !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count)) in perf_evsel__read()
424 perf_evsel__adjust_values(evsel, buf.values, count); in perf_evsel__read()
428 static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg, in perf_evsel__ioctl() argument
431 int *fd = FD(evsel, cpu_map_idx, thread); in perf_evsel__ioctl()
439 static int perf_evsel__run_ioctl(struct perf_evsel *evsel, in perf_evsel__run_ioctl() argument
445 for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) { in perf_evsel__run_ioctl()
446 int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread); in perf_evsel__run_ioctl()
455 int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx) in perf_evsel__enable_cpu() argument
457 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx); in perf_evsel__enable_cpu()
460 int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread) in perf_evsel__enable_thread() argument
466 perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) { in perf_evsel__enable_thread()
467 err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread); in perf_evsel__enable_thread()
475 int perf_evsel__enable(struct perf_evsel *evsel) in perf_evsel__enable() argument
480 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__enable()
481 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, i); in perf_evsel__enable()
485 int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx) in perf_evsel__disable_cpu() argument
487 return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx); in perf_evsel__disable_cpu()
490 int perf_evsel__disable(struct perf_evsel *evsel) in perf_evsel__disable() argument
495 for (i = 0; i < xyarray__max_x(evsel->fd) && !err; i++) in perf_evsel__disable()
496 err = perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, i); in perf_evsel__disable()
500 int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter) in perf_evsel__apply_filter() argument
504 for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++) in perf_evsel__apply_filter()
505 err = perf_evsel__run_ioctl(evsel, in perf_evsel__apply_filter()
511 struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) in perf_evsel__cpus() argument
513 return evsel->cpus; in perf_evsel__cpus()
516 struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel) in perf_evsel__threads() argument
518 return evsel->threads; in perf_evsel__threads()
521 struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel) in perf_evsel__attr() argument
523 return &evsel->attr; in perf_evsel__attr()
526 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) in perf_evsel__alloc_id() argument
531 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); in perf_evsel__alloc_id()
532 if (evsel->sample_id == NULL) in perf_evsel__alloc_id()
535 evsel->id = zalloc(ncpus * nthreads * sizeof(u64)); in perf_evsel__alloc_id()
536 if (evsel->id == NULL) { in perf_evsel__alloc_id()
537 xyarray__delete(evsel->sample_id); in perf_evsel__alloc_id()
538 evsel->sample_id = NULL; in perf_evsel__alloc_id()
545 void perf_evsel__free_id(struct perf_evsel *evsel) in perf_evsel__free_id() argument
549 xyarray__delete(evsel->sample_id); in perf_evsel__free_id()
550 evsel->sample_id = NULL; in perf_evsel__free_id()
551 zfree(&evsel->id); in perf_evsel__free_id()
552 evsel->ids = 0; in perf_evsel__free_id()
554 perf_evsel_for_each_per_thread_period_safe(evsel, n, pos) { in perf_evsel__free_id()
560 bool perf_evsel__attr_has_per_thread_sample_period(struct perf_evsel *evsel) in perf_evsel__attr_has_per_thread_sample_period() argument
562 return (evsel->attr.sample_type & PERF_SAMPLE_READ) && in perf_evsel__attr_has_per_thread_sample_period()
563 (evsel->attr.sample_type & PERF_SAMPLE_TID) && in perf_evsel__attr_has_per_thread_sample_period()
564 evsel->attr.inherit; in perf_evsel__attr_has_per_thread_sample_period()
583 if (sid->evsel == NULL) in perf_sample_id__get_period_storage()
593 list_add_tail(&res->node, &sid->evsel->per_stream_periods); in perf_sample_id__get_period_storage()