Lines Matching refs:event
39 struct perf_event *event[MAX_HWEVENTS]; member
109 static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) {… in perf_get_data_addr() argument
124 static bool is_ebb_event(struct perf_event *event) { return false; } in is_ebb_event() argument
125 static int ebb_event_check(struct perf_event *event) { return 0; } in ebb_event_check() argument
126 static void ebb_event_add(struct perf_event *event) { } in ebb_event_add() argument
133 static inline void power_pmu_bhrb_enable(struct perf_event *event) {} in power_pmu_bhrb_enable() argument
134 static inline void power_pmu_bhrb_disable(struct perf_event *event) {} in power_pmu_bhrb_disable() argument
136 static inline void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) {} in power_pmu_bhrb_read() argument
202 static inline void perf_get_data_addr(struct perf_event *event, struct pt_regs *regs, u64 *addrp) in perf_get_data_addr() argument
227 if (is_kernel_addr(mfspr(SPRN_SDAR)) && event->attr.exclude_kernel) in perf_get_data_addr()
407 static void power_pmu_bhrb_enable(struct perf_event *event) in power_pmu_bhrb_enable() argument
415 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { in power_pmu_bhrb_enable()
417 cpuhw->bhrb_context = event->ctx; in power_pmu_bhrb_enable()
420 perf_sched_cb_inc(event->pmu); in power_pmu_bhrb_enable()
423 static void power_pmu_bhrb_disable(struct perf_event *event) in power_pmu_bhrb_disable() argument
432 perf_sched_cb_dec(event->pmu); in power_pmu_bhrb_disable()
483 static void power_pmu_bhrb_read(struct perf_event *event, struct cpu_hw_events *cpuhw) in power_pmu_bhrb_read() argument
514 is_kernel_addr(addr) && event->attr.exclude_kernel) in power_pmu_bhrb_read()
571 static bool is_ebb_event(struct perf_event *event) in is_ebb_event() argument
579 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); in is_ebb_event()
582 static int ebb_event_check(struct perf_event *event) in ebb_event_check() argument
584 struct perf_event *leader = event->group_leader; in ebb_event_check()
587 if (is_ebb_event(leader) != is_ebb_event(event)) in ebb_event_check()
590 if (is_ebb_event(event)) { in ebb_event_check()
591 if (!(event->attach_state & PERF_ATTACH_TASK)) in ebb_event_check()
597 if (event->attr.freq || in ebb_event_check()
598 event->attr.inherit || in ebb_event_check()
599 event->attr.sample_type || in ebb_event_check()
600 event->attr.sample_period || in ebb_event_check()
601 event->attr.enable_on_exec) in ebb_event_check()
608 static void ebb_event_add(struct perf_event *event) in ebb_event_add() argument
610 if (!is_ebb_event(event) || current->thread.used_ebb) in ebb_event_add()
887 idx = cpuhw->event[i]->hw.idx; in any_pmc_overflown()
964 int n_ev, struct perf_event **event) in power_check_constraints() argument
987 &cpuhw->avalues[i][0], event[i]->attr.config1)) in power_check_constraints()
1023 event[i]->attr.config1); in power_check_constraints()
1090 struct perf_event *event; in check_excludes() local
1110 event = ctrs[i]; in check_excludes()
1112 eu = event->attr.exclude_user; in check_excludes()
1113 ek = event->attr.exclude_kernel; in check_excludes()
1114 eh = event->attr.exclude_hv; in check_excludes()
1116 } else if (event->attr.exclude_user != eu || in check_excludes()
1117 event->attr.exclude_kernel != ek || in check_excludes()
1118 event->attr.exclude_hv != eh) { in check_excludes()
1150 static void power_pmu_read(struct perf_event *event) in power_pmu_read() argument
1154 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_read()
1157 if (!event->hw.idx) in power_pmu_read()
1160 if (is_ebb_event(event)) { in power_pmu_read()
1161 val = read_pmc(event->hw.idx); in power_pmu_read()
1162 local64_set(&event->hw.prev_count, val); in power_pmu_read()
1172 prev = local64_read(&event->hw.prev_count); in power_pmu_read()
1174 val = read_pmc(event->hw.idx); in power_pmu_read()
1178 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); in power_pmu_read()
1180 local64_add(delta, &event->count); in power_pmu_read()
1192 prev = local64_read(&event->hw.period_left); in power_pmu_read()
1196 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev); in power_pmu_read()
1213 struct perf_event *event; in freeze_limited_counters() local
1218 event = cpuhw->limited_counter[i]; in freeze_limited_counters()
1219 if (!event->hw.idx) in freeze_limited_counters()
1221 val = (event->hw.idx == 5) ? pmc5 : pmc6; in freeze_limited_counters()
1222 prev = local64_read(&event->hw.prev_count); in freeze_limited_counters()
1223 event->hw.idx = 0; in freeze_limited_counters()
1226 local64_add(delta, &event->count); in freeze_limited_counters()
1233 struct perf_event *event; in thaw_limited_counters() local
1238 event = cpuhw->limited_counter[i]; in thaw_limited_counters()
1239 event->hw.idx = cpuhw->limited_hwidx[i]; in thaw_limited_counters()
1240 val = (event->hw.idx == 5) ? pmc5 : pmc6; in thaw_limited_counters()
1241 prev = local64_read(&event->hw.prev_count); in thaw_limited_counters()
1243 local64_set(&event->hw.prev_count, val); in thaw_limited_counters()
1244 perf_event_update_userpage(event); in thaw_limited_counters()
1412 struct perf_event *event; in power_pmu_enable() local
1443 ebb = is_ebb_event(cpuhw->event[0]); in power_pmu_enable()
1474 &cpuhw->mmcr, cpuhw->event, ppmu->flags)) { in power_pmu_enable()
1486 event = cpuhw->event[0]; in power_pmu_enable()
1487 if (event->attr.exclude_user) in power_pmu_enable()
1489 if (event->attr.exclude_kernel) in power_pmu_enable()
1491 if (event->attr.exclude_hv) in power_pmu_enable()
1516 event = cpuhw->event[i]; in power_pmu_enable()
1517 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { in power_pmu_enable()
1518 power_pmu_read(event); in power_pmu_enable()
1519 write_pmc(event->hw.idx, 0); in power_pmu_enable()
1520 event->hw.idx = 0; in power_pmu_enable()
1529 event = cpuhw->event[i]; in power_pmu_enable()
1530 if (event->hw.idx) in power_pmu_enable()
1534 cpuhw->limited_counter[n_lim] = event; in power_pmu_enable()
1541 val = local64_read(&event->hw.prev_count); in power_pmu_enable()
1544 if (event->hw.sample_period) { in power_pmu_enable()
1545 left = local64_read(&event->hw.period_left); in power_pmu_enable()
1549 local64_set(&event->hw.prev_count, val); in power_pmu_enable()
1552 event->hw.idx = idx; in power_pmu_enable()
1553 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_enable()
1557 perf_event_update_userpage(event); in power_pmu_enable()
1591 struct perf_event *event; in collect_events() local
1600 for_each_sibling_event(event, group) { in collect_events()
1601 if (event->pmu->task_ctx_nr == perf_hw_context && in collect_events()
1602 event->state != PERF_EVENT_STATE_OFF) { in collect_events()
1605 ctrs[n] = event; in collect_events()
1606 flags[n] = event->hw.event_base; in collect_events()
1607 events[n++] = event->hw.config; in collect_events()
1619 static int power_pmu_add(struct perf_event *event, int ef_flags) in power_pmu_add() argument
1627 perf_pmu_disable(event->pmu); in power_pmu_add()
1637 cpuhw->event[n0] = event; in power_pmu_add()
1638 cpuhw->events[n0] = event->hw.config; in power_pmu_add()
1639 cpuhw->flags[n0] = event->hw.event_base; in power_pmu_add()
1648 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in power_pmu_add()
1650 event->hw.state = 0; in power_pmu_add()
1660 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) in power_pmu_add()
1662 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1, cpuhw->event)) in power_pmu_add()
1664 event->hw.config = cpuhw->events[n0]; in power_pmu_add()
1667 ebb_event_add(event); in power_pmu_add()
1674 if (has_branch_stack(event)) { in power_pmu_add()
1679 event->attr.branch_sample_type); in power_pmu_add()
1683 power_pmu_bhrb_enable(event); in power_pmu_add()
1687 perf_pmu_enable(event->pmu); in power_pmu_add()
1695 static void power_pmu_del(struct perf_event *event, int ef_flags) in power_pmu_del() argument
1702 perf_pmu_disable(event->pmu); in power_pmu_del()
1704 power_pmu_read(event); in power_pmu_del()
1708 if (event == cpuhw->event[i]) { in power_pmu_del()
1710 cpuhw->event[i-1] = cpuhw->event[i]; in power_pmu_del()
1715 ppmu->disable_pmc(event->hw.idx - 1, &cpuhw->mmcr); in power_pmu_del()
1716 if (event->hw.idx) { in power_pmu_del()
1717 write_pmc(event->hw.idx, 0); in power_pmu_del()
1718 event->hw.idx = 0; in power_pmu_del()
1720 perf_event_update_userpage(event); in power_pmu_del()
1725 if (event == cpuhw->limited_counter[i]) in power_pmu_del()
1739 if (has_branch_stack(event)) in power_pmu_del()
1740 power_pmu_bhrb_disable(event); in power_pmu_del()
1742 perf_pmu_enable(event->pmu); in power_pmu_del()
1751 static void power_pmu_start(struct perf_event *event, int ef_flags) in power_pmu_start() argument
1757 if (!event->hw.idx || !event->hw.sample_period) in power_pmu_start()
1760 if (!(event->hw.state & PERF_HES_STOPPED)) in power_pmu_start()
1764 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); in power_pmu_start()
1767 perf_pmu_disable(event->pmu); in power_pmu_start()
1769 event->hw.state = 0; in power_pmu_start()
1770 left = local64_read(&event->hw.period_left); in power_pmu_start()
1776 write_pmc(event->hw.idx, val); in power_pmu_start()
1778 perf_event_update_userpage(event); in power_pmu_start()
1779 perf_pmu_enable(event->pmu); in power_pmu_start()
1783 static void power_pmu_stop(struct perf_event *event, int ef_flags) in power_pmu_stop() argument
1787 if (!event->hw.idx || !event->hw.sample_period) in power_pmu_stop()
1790 if (event->hw.state & PERF_HES_STOPPED) in power_pmu_stop()
1794 perf_pmu_disable(event->pmu); in power_pmu_stop()
1796 power_pmu_read(event); in power_pmu_stop()
1797 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in power_pmu_stop()
1798 write_pmc(event->hw.idx, 0); in power_pmu_stop()
1800 perf_event_update_userpage(event); in power_pmu_stop()
1801 perf_pmu_enable(event->pmu); in power_pmu_stop()
1870 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) in power_pmu_commit_txn()
1872 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n, cpuhw->event); in power_pmu_commit_txn()
1877 cpuhw->event[i]->hw.config = cpuhw->events[i]; in power_pmu_commit_txn()
1891 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, in can_go_on_limited_pmc() argument
1897 if (event->attr.exclude_user in can_go_on_limited_pmc()
1898 || event->attr.exclude_kernel in can_go_on_limited_pmc()
1899 || event->attr.exclude_hv in can_go_on_limited_pmc()
1900 || event->attr.sample_period) in can_go_on_limited_pmc()
1944 static void hw_perf_event_destroy(struct perf_event *event) in hw_perf_event_destroy() argument
1996 static int power_pmu_event_init(struct perf_event *event) in power_pmu_event_init() argument
2010 if (has_branch_stack(event)) { in power_pmu_event_init()
2016 switch (event->attr.type) { in power_pmu_event_init()
2018 ev = event->attr.config; in power_pmu_event_init()
2027 err = hw_perf_cache_event(event->attr.config, &ev); in power_pmu_event_init()
2035 ev = event->attr.config; in power_pmu_event_init()
2052 ppmu->check_attr_config(event)) in power_pmu_event_init()
2055 event->hw.config_base = ev; in power_pmu_event_init()
2056 event->hw.idx = 0; in power_pmu_event_init()
2064 event->attr.exclude_hv = 0; in power_pmu_event_init()
2073 if (event->attach_state & PERF_ATTACH_TASK) in power_pmu_event_init()
2081 if (can_go_on_limited_pmc(event, ev, flags)) { in power_pmu_event_init()
2096 err = ebb_event_check(event); in power_pmu_event_init()
2106 if (event->group_leader != event) { in power_pmu_event_init()
2107 n = collect_events(event->group_leader, ppmu->n_counter - 1, in power_pmu_event_init()
2113 ctrs[n] = event; in power_pmu_event_init()
2123 if (has_branch_stack(event)) { in power_pmu_event_init()
2138 if (hweight64(event->attr.branch_sample_type & ~PERF_SAMPLE_BRANCH_PLM_ALL) > 1) { in power_pmu_event_init()
2145 event->attr.branch_sample_type); in power_pmu_event_init()
2158 event->hw.config = events[n]; in power_pmu_event_init()
2159 event->hw.event_base = cflags[n]; in power_pmu_event_init()
2160 event->hw.last_period = event->hw.sample_period; in power_pmu_event_init()
2161 local64_set(&event->hw.period_left, event->hw.last_period); in power_pmu_event_init()
2167 if (is_ebb_event(event)) in power_pmu_event_init()
2168 local64_set(&event->hw.prev_count, 0); in power_pmu_event_init()
2186 event->destroy = hw_perf_event_destroy; in power_pmu_event_init()
2191 static int power_pmu_event_idx(struct perf_event *event) in power_pmu_event_idx() argument
2193 return event->hw.idx; in power_pmu_event_idx()
2230 static void record_and_restart(struct perf_event *event, unsigned long val, in record_and_restart() argument
2233 u64 period = event->hw.sample_period; in record_and_restart()
2237 if (event->hw.state & PERF_HES_STOPPED) { in record_and_restart()
2238 write_pmc(event->hw.idx, 0); in record_and_restart()
2243 prev = local64_read(&event->hw.prev_count); in record_and_restart()
2245 local64_add(delta, &event->count); in record_and_restart()
2252 left = local64_read(&event->hw.period_left) - delta; in record_and_restart()
2266 if (event->attr.sample_type & PERF_SAMPLE_IP) in record_and_restart()
2271 event->hw.last_period = event->hw.sample_period; in record_and_restart()
2277 write_pmc(event->hw.idx, val); in record_and_restart()
2278 local64_set(&event->hw.prev_count, val); in record_and_restart()
2279 local64_set(&event->hw.period_left, left); in record_and_restart()
2280 perf_event_update_userpage(event); in record_and_restart()
2288 if (event->attr.exclude_kernel && in record_and_restart()
2289 (event->attr.sample_type & PERF_SAMPLE_IP) && in record_and_restart()
2299 perf_sample_data_init(&data, ~0ULL, event->hw.last_period); in record_and_restart()
2301 if (event->attr.sample_type & PERF_SAMPLE_ADDR_TYPE) in record_and_restart()
2302 perf_get_data_addr(event, regs, &data.addr); in record_and_restart()
2304 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { in record_and_restart()
2307 power_pmu_bhrb_read(event, cpuhw); in record_and_restart()
2308 perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL); in record_and_restart()
2311 if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && in record_and_restart()
2317 if (event->attr.sample_type & PERF_SAMPLE_WEIGHT_TYPE && in record_and_restart()
2319 ppmu->get_mem_weight(&data.weight.full, event->attr.sample_type); in record_and_restart()
2322 if (perf_event_overflow(event, &data, regs)) in record_and_restart()
2323 power_pmu_stop(event, 0); in record_and_restart()
2326 if (perf_event_account_interrupt(event)) in record_and_restart()
2327 power_pmu_stop(event, 0); in record_and_restart()
2393 struct perf_event *event; in __perf_event_interrupt() local
2421 event = cpuhw->event[j]; in __perf_event_interrupt()
2422 if (event->hw.idx == (i + 1)) { in __perf_event_interrupt()
2424 record_and_restart(event, cpuhw->pmcs[i], regs); in __perf_event_interrupt()
2443 event = cpuhw->event[i]; in __perf_event_interrupt()
2444 if (!event->hw.idx || is_limited_pmc(event->hw.idx)) in __perf_event_interrupt()
2446 if (pmc_overflow_power7(cpuhw->pmcs[event->hw.idx - 1])) { in __perf_event_interrupt()
2449 record_and_restart(event, in __perf_event_interrupt()
2450 cpuhw->pmcs[event->hw.idx - 1], in __perf_event_interrupt()