/linux-6.3-rc2/arch/alpha/kernel/ |
A D | perf_event.c | 422 cpuc->idx_mask |= (1<<cpuc->current_idx[j]); in maybe_change_configuration() 424 cpuc->config = cpuc->event[0]->hw.config_base; in maybe_change_configuration() 462 if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) { in alpha_pmu_add() 464 cpuc->n_added++; in alpha_pmu_add() 503 cpuc->event[j - 1] = cpuc->event[j]; in alpha_pmu_del() 504 cpuc->evtype[j - 1] = cpuc->evtype[j]; in alpha_pmu_del() 547 if (cpuc->enabled) in alpha_pmu_stop() 568 if (cpuc->enabled) in alpha_pmu_start() 720 if (cpuc->enabled) in alpha_pmu_enable() 723 cpuc->enabled = 1; in alpha_pmu_enable() [all …]
|
/linux-6.3-rc2/arch/x86/events/amd/ |
A D | lbr.c | 144 cpuc->lbr_entries[j - 1] = cpuc->lbr_entries[j]; in amd_pmu_lbr_filter() 145 cpuc->lbr_stack.nr--; in amd_pmu_lbr_filter() 167 if (!cpuc->lbr_users) in amd_pmu_lbr_read() 209 cpuc->lbr_stack.nr = out; in amd_pmu_lbr_read() 215 cpuc->lbr_stack.hw_idx = 0; in amd_pmu_lbr_read() 337 cpuc->last_log_id = 0; in amd_pmu_lbr_reset() 350 cpuc->lbr_select = 1; in amd_pmu_lbr_add() 352 cpuc->br_sel = reg->reg; in amd_pmu_lbr_add() 369 cpuc->lbr_select = 0; in amd_pmu_lbr_del() 371 cpuc->lbr_users--; in amd_pmu_lbr_del() [all …]
|
A D | core.c | 460 if (cpuc->is_fake) in __amd_get_nb_event_constraints() 547 if (!cpuc->lbr_sel) in amd_pmu_cpu_prepare() 556 if (cpuc->amd_nb) in amd_pmu_cpu_prepare() 559 kfree(cpuc->lbr_sel); in amd_pmu_cpu_prepare() 560 cpuc->lbr_sel = NULL; in amd_pmu_cpu_prepare() 586 *onln = cpuc->amd_nb; in amd_pmu_cpu_starting() 587 cpuc->amd_nb = nb; in amd_pmu_cpu_starting() 862 cpuc->enabled = 0; in amd_pmu_handle_irq() 867 if (cpuc->lbr_users) in amd_pmu_handle_irq() 895 cpuc->enabled = 0; in amd_pmu_v2_handle_irq() [all …]
|
A D | brs.c | 205 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in amd_brs_enable() local 209 if (++cpuc->brs_active > 1) in amd_brs_enable() 222 if (cpuc->lbr_users) in amd_brs_enable_all() 232 if (!cpuc->brs_active) in amd_brs_disable() 236 if (--cpuc->brs_active) in amd_brs_disable() 258 if (cpuc->lbr_users) in amd_brs_disable_all() 284 struct perf_event *event = cpuc->events[0]; in amd_brs_drain() 285 struct perf_branch_entry *br = cpuc->lbr_entries; in amd_brs_drain() 355 cpuc->lbr_stack.nr = nr; in amd_brs_drain() 389 if (!cpuc->lbr_users) in amd_pmu_brs_sched_task() [all …]
|
/linux-6.3-rc2/arch/x86/events/intel/ |
A D | lbr.c | 136 if (cpuc->lbr_sel) in __intel_pmu_lbr_enable() 199 cpuc->last_log_id = 0; in intel_pmu_lbr_reset() 384 if (cpuc->lbr_select) in intel_pmu_lbr_restore() 470 if (cpuc->lbr_select) in intel_pmu_lbr_save() 546 if (!cpuc->lbr_users) in intel_pmu_lbr_sched_task() 586 cpuc->lbr_select = 1; in intel_pmu_lbr_add() 675 cpuc->lbr_users--; in intel_pmu_lbr_del() 753 if (cpuc->lbr_sel) { in intel_pmu_lbr_read_64() 935 cpuc->lbr_users == cpuc->lbr_pebs_users) in intel_pmu_lbr_read() 1177 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; in intel_pmu_lbr_filter() [all …]
|
A D | ds.c | 738 if (!cpuc->ds) in intel_pmu_disable_bts() 1117 if (cpuc->n_pebs == cpuc->n_pebs_via_pt) in pebs_needs_sched_cb() 1120 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs); in pebs_needs_sched_cb() 1147 if (cpuc->n_pebs == cpuc->n_large_pebs) { in pebs_update_threshold() 1281 cpuc->n_pebs++; in intel_pmu_pebs_add() 1347 if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) { in intel_pmu_pebs_enable() 1349 cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg; in intel_pmu_pebs_enable() 1380 cpuc->n_pebs--; in intel_pmu_pebs_del() 1394 if (cpuc->n_pebs == cpuc->n_large_pebs && in intel_pmu_pebs_disable() 1395 cpuc->n_pebs != cpuc->n_pebs_via_pt) in intel_pmu_pebs_disable() [all …]
|
A D | core.c | 2213 if (cpuc->fixed_ctrl_val != cpuc->active_fixed_ctrl_val) { in __intel_pmu_enable_all() 2215 cpuc->active_fixed_ctrl_val = cpuc->fixed_ctrl_val; in __intel_pmu_enable_all() 2913 u64 guest_pebs_idxs = cpuc->pebs_enabled & ~cpuc->intel_ctrl_host_mask; in x86_pmu_handle_guest_pebs() 3079 cpuc->enabled = 0; in intel_pmu_handle_irq() 3532 if (!cpuc->n_excl++) in intel_get_excl_constraints() 3619 if (cpuc->is_fake) in intel_put_excl_constraints() 4085 .host = cpuc->pebs_enabled & ~cpuc->intel_ctrl_guest_mask, in intel_guest_get_msrs() 4625 cpuc->pmu = NULL; in init_hybrid_pmu() 4710 cpuc->kfree_on_online[0] = cpuc->shared_regs; in intel_pmu_cpu_starting() 4720 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; in intel_pmu_cpu_starting() [all …]
|
A D | bts.c | 262 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_start() local 273 bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; in bts_event_start() 274 bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; in bts_event_start() 275 bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; in bts_event_start() 307 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_stop() local 332 cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; in bts_event_stop() 333 cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base; in bts_event_stop() 334 cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum; in bts_event_stop() 335 cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold; in bts_event_stop() 522 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in bts_event_add() local [all …]
|
A D | knc.c | 216 struct cpu_hw_events *cpuc; in knc_pmu_handle_irq() local 221 cpuc = this_cpu_ptr(&cpu_hw_events); in knc_pmu_handle_irq() 243 struct perf_event *event = cpuc->events[bit]; in knc_pmu_handle_irq() 247 if (!test_bit(bit, cpuc->active_mask)) in knc_pmu_handle_irq() 268 if (cpuc->enabled) in knc_pmu_handle_irq()
|
A D | p4.c | 919 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_disable_all() local 923 struct perf_event *event = cpuc->events[idx]; in p4_pmu_disable_all() 924 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_disable_all() 998 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_enable_all() local 1002 struct perf_event *event = cpuc->events[idx]; in p4_pmu_enable_all() 1003 if (!test_bit(idx, cpuc->active_mask)) in p4_pmu_enable_all() 1035 struct cpu_hw_events *cpuc; in p4_pmu_handle_irq() local 1041 cpuc = this_cpu_ptr(&cpu_hw_events); in p4_pmu_handle_irq() 1046 if (!test_bit(idx, cpuc->active_mask)) { in p4_pmu_handle_irq() 1053 event = cpuc->events[idx]; in p4_pmu_handle_irq() [all …]
|
/linux-6.3-rc2/arch/x86/events/ |
A D | core.c | 777 return cpuc->pmu; in x86_get_pmu() 1149 cpuc->n_pair++; in collect_event() 1299 int n_running = cpuc->n_events - cpuc->n_added; in x86_pmu_enable() 1640 if (i >= cpuc->n_events - cpuc->n_added) in x86_pmu_del() 1647 cpuc->event_list[i-1] = cpuc->event_list[i]; in x86_pmu_del() 1648 cpuc->event_constraint[i-1] = cpuc->event_constraint[i]; in x86_pmu_del() 2315 kfree(cpuc); in free_fake_cpuc() 2323 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL); in allocate_fake_cpuc() 2324 if (!cpuc) in allocate_fake_cpuc() 2342 return cpuc; in allocate_fake_cpuc() [all …]
|
A D | perf_event.h | 776 (*get_event_constraints)(struct cpu_hw_events *cpuc, 780 void (*put_event_constraints)(struct cpu_hw_events *cpuc, 783 void (*start_scheduling)(struct cpu_hw_events *cpuc); 787 void (*stop_scheduling)(struct cpu_hw_events *cpuc); 886 void (*lbr_read)(struct cpu_hw_events *cpuc); 1336 cpuc->lbr_users++; in amd_pmu_brs_add() 1347 cpuc->lbr_users--; in amd_pmu_brs_del() 1348 WARN_ON_ONCE(cpuc->lbr_users < 0); in amd_pmu_brs_del() 1467 extern void intel_cpuc_finish(struct cpu_hw_events *cpuc); 1571 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc); [all …]
|
/linux-6.3-rc2/arch/sparc/kernel/ |
A D | perf_event.c | 977 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; in calculate_single_pcr() 1019 if (cpuc->n_added) in update_pcrs_for_enable() 1034 if (cpuc->enabled) in sparc_pmu_enable() 1037 cpuc->enabled = 1; in sparc_pmu_enable() 1055 cpuc->enabled = 0; in sparc_pmu_disable() 1133 cpuc->event[i - 1] = cpuc->event[i]; in sparc_pmu_del() 1134 cpuc->events[i - 1] = cpuc->events[i]; in sparc_pmu_del() 1402 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) in sparc_pmu_add() 1406 cpuc->n_events++; in sparc_pmu_add() 1407 cpuc->n_added++; in sparc_pmu_add() [all …]
|
/linux-6.3-rc2/arch/loongarch/kernel/ |
A D | perf_event.c | 258 if (!test_and_set_bit(i, cpuc->used_mask)) in loongarch_pmu_alloc_counter() 269 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in loongarch_pmu_enable_event() local 293 cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) & in loongarch_pmu_disable_event() 295 loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]); in loongarch_pmu_disable_event() 394 idx = loongarch_pmu_alloc_counter(cpuc, hwc); in loongarch_pmu_add() 406 cpuc->events[idx] = event; in loongarch_pmu_add() 429 cpuc->events[idx] = NULL; in loongarch_pmu_del() 430 clear_bit(idx, cpuc->used_mask); in loongarch_pmu_del() 484 struct perf_event *event = cpuc->events[idx]; in handle_associated_event() 519 if (test_bit(n, cpuc->used_mask)) { in pmu_handle_irq() [all …]
|
/linux-6.3-rc2/arch/sh/kernel/ |
A D | perf_event.c | 201 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_stop() local 207 cpuc->events[idx] = NULL; in sh_pmu_stop() 219 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_start() local 229 cpuc->events[idx] = event; in sh_pmu_start() 236 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_del() local 239 __clear_bit(event->hw.idx, cpuc->used_mask); in sh_pmu_del() 246 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); in sh_pmu_add() local 253 if (__test_and_set_bit(idx, cpuc->used_mask)) { in sh_pmu_add() 254 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); in sh_pmu_add() 258 __set_bit(idx, cpuc->used_mask); in sh_pmu_add()
|
/linux-6.3-rc2/drivers/perf/ |
A D | riscv_pmu.c | 200 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_add() local 209 cpuc->events[idx] = event; in riscv_pmu_add() 210 cpuc->n_events++; in riscv_pmu_add() 224 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pmu_del() local 228 cpuc->events[hwc->idx] = NULL; in riscv_pmu_del() 232 cpuc->n_events--; in riscv_pmu_del() 290 struct cpu_hw_events *cpuc; in riscv_pmu_alloc() local 303 cpuc = per_cpu_ptr(pmu->hw_events, cpuid); in riscv_pmu_alloc() 304 cpuc->n_events = 0; in riscv_pmu_alloc() 306 cpuc->events[i] = NULL; in riscv_pmu_alloc()
|
A D | apple_m1_cpu_pmu.c | 384 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in m1_pmu_handle_irq() local 404 struct perf_event *event = cpuc->events[idx]; in m1_pmu_handle_irq() 435 static int m1_pmu_get_event_idx(struct pmu_hw_events *cpuc, in m1_pmu_get_event_idx() argument 451 if (!test_and_set_bit(idx, cpuc->used_mask)) in m1_pmu_get_event_idx() 458 static void m1_pmu_clear_event_idx(struct pmu_hw_events *cpuc, in m1_pmu_clear_event_idx() argument 461 clear_bit(event->hw.idx, cpuc->used_mask); in m1_pmu_clear_event_idx()
|
A D | riscv_pmu_sbi.c | 329 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_get_idx() local 357 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) in pmu_sbi_ctr_get_idx() 360 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) in pmu_sbi_ctr_get_idx() 372 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_clear_idx() local 376 clear_bit(idx, cpuc->used_fw_ctrs); in pmu_sbi_ctr_clear_idx() 378 clear_bit(idx, cpuc->used_hw_ctrs); in pmu_sbi_ctr_clear_idx() 798 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pm_pmu_notify() local 799 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); in riscv_pm_pmu_notify() 807 event = cpuc->events[idx]; in riscv_pm_pmu_notify()
|
/linux-6.3-rc2/arch/arm/kernel/ |
A D | perf_event_xscale.c | 149 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in xscale1pmu_handle_irq() local 174 struct perf_event *event = cpuc->events[idx]; in xscale1pmu_handle_irq() 275 xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, in xscale1pmu_get_event_idx() argument 280 if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) in xscale1pmu_get_event_idx() 285 if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) in xscale1pmu_get_event_idx() 288 if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) in xscale1pmu_get_event_idx() 298 clear_bit(event->hw.idx, cpuc->used_mask); in xscalepmu_clear_event_idx() 520 struct perf_event *event = cpuc->events[idx]; in xscale2pmu_handle_irq() 649 xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, in xscale2pmu_get_event_idx() argument 652 int idx = xscale1pmu_get_event_idx(cpuc, event); in xscale2pmu_get_event_idx() [all …]
|
A D | perf_event_v6.c | 310 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); in armv6pmu_handle_irq() local 327 struct perf_event *event = cpuc->events[idx]; in armv6pmu_handle_irq() 388 armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, in armv6pmu_get_event_idx() argument 394 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) in armv6pmu_get_event_idx() 403 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) in armv6pmu_get_event_idx() 406 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) in armv6pmu_get_event_idx() 414 static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc, in armv6pmu_clear_event_idx() argument 417 clear_bit(event->hw.idx, cpuc->used_mask); in armv6pmu_clear_event_idx()
|
A D | perf_event_v7.c | 1064 clear_bit(event->hw.idx, cpuc->used_mask); in armv7pmu_clear_event_idx() 1626 if (test_and_set_bit(bit, cpuc->used_mask)) in krait_pmu_get_event_idx() 1630 idx = armv7pmu_get_event_idx(cpuc, event); in krait_pmu_get_event_idx() 1632 clear_bit(bit, cpuc->used_mask); in krait_pmu_get_event_idx() 1647 armv7pmu_clear_event_idx(cpuc, event); in krait_pmu_clear_event_idx() 1650 clear_bit(bit, cpuc->used_mask); in krait_pmu_clear_event_idx() 1956 if (test_and_set_bit(bit, cpuc->used_mask)) in scorpion_pmu_get_event_idx() 1960 idx = armv7pmu_get_event_idx(cpuc, event); in scorpion_pmu_get_event_idx() 1962 clear_bit(bit, cpuc->used_mask); in scorpion_pmu_get_event_idx() 1977 armv7pmu_clear_event_idx(cpuc, event); in scorpion_pmu_clear_event_idx() [all …]
|
/linux-6.3-rc2/arch/mips/kernel/ |
A D | perf_event_mipsxx.c | 341 !test_and_set_bit(i, cpuc->used_mask)) in mipsxx_pmu_alloc_counter() 369 cpuc->saved_ctrl[idx] |= in mipsxx_pmu_enable_event() 374 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; in mipsxx_pmu_enable_event() 387 cpuc->saved_ctrl[idx] |= ctrl; in mipsxx_pmu_enable_event() 509 idx = mipsxx_pmu_alloc_counter(cpuc, hwc); in mipspmu_add() 521 cpuc->events[idx] = event; in mipspmu_add() 544 cpuc->events[idx] = NULL; in mipspmu_del() 545 clear_bit(idx, cpuc->used_mask); in mipspmu_del() 786 struct perf_event *event = cpuc->events[idx]; in handle_associated_event() 1601 if (!test_bit(n, cpuc->used_mask)) in mipsxx_pmu_handle_shared_irq() [all …]
|
/linux-6.3-rc2/arch/arm64/kernel/ |
A D | perf_event.c | 861 struct perf_event *event = cpuc->events[idx]; in armv8pmu_handle_irq() 900 if (!test_and_set_bit(idx, cpuc->used_mask)) in armv8pmu_get_single_idx() 906 static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, in armv8pmu_get_chain_idx() argument 916 if (!test_and_set_bit(idx, cpuc->used_mask)) { in armv8pmu_get_chain_idx() 918 if (!test_and_set_bit(idx - 1, cpuc->used_mask)) in armv8pmu_get_chain_idx() 921 clear_bit(idx, cpuc->used_mask); in armv8pmu_get_chain_idx() 927 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, in armv8pmu_get_event_idx() argument 948 return armv8pmu_get_chain_idx(cpuc, cpu_pmu); in armv8pmu_get_event_idx() 950 return armv8pmu_get_single_idx(cpuc, cpu_pmu); in armv8pmu_get_event_idx() 958 clear_bit(idx, cpuc->used_mask); in armv8pmu_clear_event_idx() [all …]
|
/linux-6.3-rc2/arch/x86/events/zhaoxin/ |
A D | core.c | 357 struct cpu_hw_events *cpuc; in zhaoxin_pmu_handle_irq() local 362 cpuc = this_cpu_ptr(&cpu_hw_events); in zhaoxin_pmu_handle_irq() 387 struct perf_event *event = cpuc->events[bit]; in zhaoxin_pmu_handle_irq() 391 if (!test_bit(bit, cpuc->active_mask)) in zhaoxin_pmu_handle_irq() 422 zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx, in zhaoxin_get_event_constraints() argument
|
/linux-6.3-rc2/kernel/rcu/ |
A D | srcutree.c | 418 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_lock_idx() local 420 sum += atomic_long_read(&cpuc->srcu_lock_count[idx]); in srcu_readers_lock_idx() 436 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_unlock_idx() local 438 sum += atomic_long_read(&cpuc->srcu_unlock_count[idx]); in srcu_readers_unlock_idx() 440 mask = mask | READ_ONCE(cpuc->srcu_nmi_safety); in srcu_readers_unlock_idx() 544 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); in srcu_readers_active() local 546 sum += atomic_long_read(&cpuc->srcu_lock_count[0]); in srcu_readers_active() 547 sum += atomic_long_read(&cpuc->srcu_lock_count[1]); in srcu_readers_active() 548 sum -= atomic_long_read(&cpuc->srcu_unlock_count[0]); in srcu_readers_active() 549 sum -= atomic_long_read(&cpuc->srcu_unlock_count[1]); in srcu_readers_active()
|