Lines Matching refs:events

6124 	__poll_t events = EPOLLHUP;  in perf_poll()  local
6135 return events; in perf_poll()
6148 events = atomic_xchg(&rb->poll, 0); in perf_poll()
6150 return events; in perf_poll()
8047 int events = local_inc_return(&rb->events); in perf_output_sample() local
8049 if (events >= wakeup_events) { in perf_output_sample()
8050 local_sub(wakeup_events, &rb->events); in perf_output_sample()
10298 int events = atomic_read(&event->event_limit); in __perf_event_overflow() local
10323 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
12394 INIT_LIST_HEAD(&pmu->events); in DEFINE_FREE()
12467 list_for_each_entry(event, &pmu->events, pmu_list) { in pmu_get_event()
12478 return list_empty(&pmu->events); in pmu_empty()
13048 list_add(&event->pmu_list, &pmu->events); in perf_event_alloc()
13867 struct list_head *events) in __perf_pmu_remove() argument
13874 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13879 list_add(&sibling->migrate_entry, events); in __perf_pmu_remove()
13908 int cpu, struct pmu *pmu, struct list_head *events) in __perf_pmu_install() argument
13920 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13932 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13941 LIST_HEAD(events); in perf_pmu_migrate_context()
13956 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->pinned_groups, &events); in perf_pmu_migrate_context()
13957 __perf_pmu_remove(src_ctx, src_cpu, pmu, &src_ctx->flexible_groups, &events); in perf_pmu_migrate_context()
13959 if (!list_empty(&events)) { in perf_pmu_migrate_context()
13965 __perf_pmu_install(dst_ctx, dst_cpu, pmu, &events); in perf_pmu_migrate_context()