Lines Matching refs:event
61 struct perf_event *event[XCHAL_NUM_PERF_COUNTERS]; member
145 static void xtensa_perf_event_update(struct perf_event *event, in xtensa_perf_event_update() argument
153 new_raw_count = xtensa_pmu_read_counter(event->hw.idx); in xtensa_perf_event_update()
159 local64_add(delta, &event->count); in xtensa_perf_event_update()
163 static bool xtensa_perf_event_set_period(struct perf_event *event, in xtensa_perf_event_set_period() argument
169 if (!is_sampling_event(event)) { in xtensa_perf_event_set_period()
192 perf_event_update_userpage(event); in xtensa_perf_event_set_period()
207 static int xtensa_pmu_event_init(struct perf_event *event) in xtensa_pmu_event_init() argument
211 switch (event->attr.type) { in xtensa_pmu_event_init()
213 if (event->attr.config >= ARRAY_SIZE(xtensa_hw_ctl) || in xtensa_pmu_event_init()
214 xtensa_hw_ctl[event->attr.config] == 0) in xtensa_pmu_event_init()
216 event->hw.config = xtensa_hw_ctl[event->attr.config]; in xtensa_pmu_event_init()
220 ret = xtensa_pmu_cache_event(event->attr.config); in xtensa_pmu_event_init()
223 event->hw.config = ret; in xtensa_pmu_event_init()
228 if ((event->attr.config & XTENSA_PMU_PMCTRL_SELECT) == in xtensa_pmu_event_init()
231 event->hw.config = (event->attr.config & in xtensa_pmu_event_init()
249 static void xtensa_pmu_start(struct perf_event *event, int flags) in xtensa_pmu_start() argument
251 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_start()
258 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in xtensa_pmu_start()
259 xtensa_perf_event_set_period(event, hwc, idx); in xtensa_pmu_start()
267 static void xtensa_pmu_stop(struct perf_event *event, int flags) in xtensa_pmu_stop() argument
269 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_stop()
280 !(event->hw.state & PERF_HES_UPTODATE)) { in xtensa_pmu_stop()
281 xtensa_perf_event_update(event, &event->hw, idx); in xtensa_pmu_stop()
282 event->hw.state |= PERF_HES_UPTODATE; in xtensa_pmu_stop()
290 static int xtensa_pmu_add(struct perf_event *event, int flags) in xtensa_pmu_add() argument
293 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_add()
305 ev->event[idx] = event; in xtensa_pmu_add()
310 xtensa_pmu_start(event, PERF_EF_RELOAD); in xtensa_pmu_add()
312 perf_event_update_userpage(event); in xtensa_pmu_add()
316 static void xtensa_pmu_del(struct perf_event *event, int flags) in xtensa_pmu_del() argument
320 xtensa_pmu_stop(event, PERF_EF_UPDATE); in xtensa_pmu_del()
321 __clear_bit(event->hw.idx, ev->used_mask); in xtensa_pmu_del()
322 perf_event_update_userpage(event); in xtensa_pmu_del()
325 static void xtensa_pmu_read(struct perf_event *event) in xtensa_pmu_read() argument
327 xtensa_perf_event_update(event, &event->hw, event->hw.idx); in xtensa_pmu_read()
376 struct perf_event *event = ev->event[i]; in xtensa_pmu_irq_handler() local
377 struct hw_perf_event *hwc = &event->hw; in xtensa_pmu_irq_handler()
384 xtensa_perf_event_update(event, hwc, i); in xtensa_pmu_irq_handler()
386 if (xtensa_perf_event_set_period(event, hwc, i)) { in xtensa_pmu_irq_handler()
391 perf_event_overflow(event, &data, regs); in xtensa_pmu_irq_handler()