| /drivers/irqchip/ |
| A D | irq-armada-370-xp.c | 172 void __iomem *per_cpu; member 314 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu() 316 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu() 319 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu() 369 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init() 406 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask() 408 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask() 532 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_smp_cpu_init() 538 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_smp_cpu_init() 541 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_smp_cpu_init() [all …]
|
| /drivers/cpufreq/ |
| A D | speedstep-centrino.c | 261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock() 304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock() 309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock() 313 return per_cpu(centrino_model, cpu)-> in extract_clock() 367 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; in centrino_cpu_init() 369 if (!per_cpu(centrino_cpu, policy->cpu)) { in centrino_cpu_init() 407 if (per_cpu(centrino_model, cpu)) in centrino_cpu_exit() 408 per_cpu(centrino_model, cpu) = NULL; in centrino_cpu_exit() [all …]
|
| A D | vexpress-spc-cpufreq.c | 77 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq() 79 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq() 89 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 102 return per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_get_rate() 117 prev_rate = per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_set_rate() 118 per_cpu(cpu_last_req_freq, cpu) = rate; in ve_spc_cpufreq_set_rate() 119 per_cpu(physical_cluster, cpu) = new_cluster; in ve_spc_cpufreq_set_rate() 143 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in ve_spc_cpufreq_set_rate() 144 per_cpu(physical_cluster, cpu) = old_cluster; in ve_spc_cpufreq_set_rate() 429 per_cpu(physical_cluster, cpu) = cur_cluster; in ve_spc_cpufreq_init() [all …]
|
| A D | loongson3_cpufreq.c | 245 if (per_cpu(freq_data, cpu)) in configure_freq_table() 279 per_cpu(freq_data, cpu) = data; in configure_freq_table() 293 policy->freq_table = per_cpu(freq_data, cpu)->table; in loongson3_cpufreq_cpu_init() 294 policy->suspend_freq = policy->freq_table[per_cpu(freq_data, cpu)->def_freq_level].frequency; in loongson3_cpufreq_cpu_init() 299 per_cpu(freq_data, i) = per_cpu(freq_data, cpu); in loongson3_cpufreq_cpu_init() 309 loongson3_cpufreq_target(policy, per_cpu(freq_data, cpu)->def_freq_level); in loongson3_cpufreq_cpu_exit()
|
| A D | sh-cpufreq.c | 39 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get() 47 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in __sh_cpufreq_target() 91 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify() 110 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_init() 141 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_exit()
|
| A D | cpufreq_governor.c | 103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in gov_update_cpu_data() 138 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in dbs_update() 333 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu); in gov_set_update_util() 369 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in alloc_policy_dbs_info() 384 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in free_policy_dbs_info() 533 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j); in cpufreq_dbs_governor_start()
|
| /drivers/perf/ |
| A D | arm_pmu_acpi.c | 220 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs() 230 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs() 241 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs() 242 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs() 256 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_pmu() 279 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches() 298 int irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_associate_pmu_cpu() 300 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_associate_pmu_cpu() 305 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_associate_pmu_cpu() 325 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting() [all …]
|
| A D | arm_pmu.c | 588 if (per_cpu(cpu_irq, cpu) == irq) in armpmu_count_irq_users() 601 if (per_cpu(cpu_irq, cpu) != irq) in armpmu_find_irq_ops() 604 ops = per_cpu(cpu_irq_ops, cpu); in armpmu_find_irq_ops() 614 if (per_cpu(cpu_irq, cpu) == 0) in armpmu_free_irq() 621 per_cpu(cpu_irq, cpu) = 0; in armpmu_free_irq() 622 per_cpu(cpu_irq_ops, cpu) = NULL; in armpmu_free_irq() 684 per_cpu(cpu_irq, cpu) = irq; in armpmu_request_irq() 685 per_cpu(cpu_irq_ops, cpu) = irq_ops; in armpmu_request_irq() 696 return per_cpu(hw_events->irq, cpu); in armpmu_get_cpu_irq() 720 per_cpu(cpu_armpmu, cpu) = pmu; in arm_perf_starting_cpu() [all …]
|
| A D | arm_pmu_platform.c | 55 per_cpu(hw_events->irq, cpu) = irq; in pmu_parse_percpu_irq() 144 if (per_cpu(hw_events->irq, cpu)) { in pmu_parse_irqs() 149 per_cpu(hw_events->irq, cpu) = irq; in pmu_parse_irqs() 162 int irq = per_cpu(hw_events->irq, cpu); in armpmu_request_irqs() 180 int irq = per_cpu(hw_events->irq, cpu); in armpmu_free_irqs()
|
| /drivers/xen/events/ |
| A D | events_2l.c | 52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_remove() 58 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu))); in evtchn_2l_bind_to_cpu() 59 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu() 152 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns() 268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt() 280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt() 289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt() 353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume() 359 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * in evtchn_2l_percpu_deinit()
|
| A D | events_fifo.c | 105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block() 277 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event() 324 control_block = per_cpu(cpu_control_block, cpu); in __evtchn_fifo_handle_events() 346 void *control_block = per_cpu(cpu_control_block, cpu); in evtchn_fifo_resume() 359 per_cpu(cpu_control_block, cpu) = NULL; in evtchn_fifo_resume() 388 per_cpu(cpu_control_block, cpu) = control_block; in evtchn_fifo_alloc_control_block() 399 if (!per_cpu(cpu_control_block, cpu)) in evtchn_fifo_percpu_init()
|
| /drivers/acpi/ |
| A D | cppc_acpi.c | 366 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); in send_pcc_cmd() 464 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); in acpi_cpc_valid() 480 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); in cppc_allow_fast_switch() 509 cpc_ptr = per_cpu(cpc_desc_ptr, cpu); in acpi_get_psd_map() 531 match_cpc_ptr = per_cpu(cpc_desc_ptr, i); in acpi_get_psd_map() 898 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; in acpi_cppc_processor_probe() 903 per_cpu(cpc_desc_ptr, pr->id) = NULL; in acpi_cppc_processor_probe() 951 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); in acpi_cppc_processor_exit() 1124 cpc_desc = per_cpu(cpc_desc_ptr, cpu); in cpc_write() 1442 cpc_desc = per_cpu(cpc_desc_ptr, cpu); in cppc_perf_ctrs_in_pcc() [all …]
|
| A D | processor_perflib.c | 156 pr = per_cpu(processors, cpu); in acpi_processor_get_bios_limit() 180 struct acpi_processor *pr = per_cpu(processors, cpu); in acpi_processor_ppc_init() 215 struct acpi_processor *pr = per_cpu(processors, cpu); in acpi_processor_ppc_exit() 615 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 634 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 653 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 679 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance() 707 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance() 724 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 754 pr = per_cpu(processors, cpu); in acpi_processor_register_performance() [all …]
|
| A D | acpi_processor.c | 42 pr = per_cpu(processors, cpu); in acpi_get_processor_handle() 209 if (per_cpu(processor_device_array, pr->id) != NULL && in acpi_processor_set_per_cpu() 210 per_cpu(processor_device_array, pr->id) != device) { in acpi_processor_set_per_cpu() 220 per_cpu(processor_device_array, pr->id) = device; in acpi_processor_set_per_cpu() 221 per_cpu(processors, pr->id) = pr; in acpi_processor_set_per_cpu() 251 per_cpu(processors, pr->id) = NULL; in acpi_processor_hotadd_init() 467 per_cpu(processors, pr->id) = NULL; in acpi_processor_add() 508 per_cpu(processor_device_array, pr->id) = NULL; in acpi_processor_post_eject() 509 per_cpu(processors, pr->id) = NULL; in acpi_processor_post_eject()
|
| A D | processor_thermal.c | 45 per_cpu(cpufreq_thermal_reduction_step, phys_package_first_cpu(cpu)) 118 pr = per_cpu(processors, i); in cpufreq_set_cur_state() 165 struct acpi_processor *pr = per_cpu(processors, cpu); in acpi_thermal_cpufreq_init() 189 struct acpi_processor *pr = per_cpu(processors, cpu); in acpi_thermal_cpufreq_exit()
|
| A D | processor_idle.c | 583 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_play_dead() 680 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter() 694 cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter() 752 per_cpu(acpi_cstate[count], dev->cpu) = cx; in acpi_processor_setup_cpuidle_cx() 1294 dev = per_cpu(acpi_cpuidle_device, pr->id); in acpi_processor_hotplug() 1333 _pr = per_cpu(processors, cpu); in acpi_processor_power_state_has_changed() 1336 dev = per_cpu(acpi_cpuidle_device, cpu); in acpi_processor_power_state_has_changed() 1346 _pr = per_cpu(processors, cpu); in acpi_processor_power_state_has_changed() 1351 dev = per_cpu(acpi_cpuidle_device, cpu); in acpi_processor_power_state_has_changed() 1397 per_cpu(acpi_cpuidle_device, pr->id) = dev; in acpi_processor_power_init() [all …]
|
| /drivers/base/ |
| A D | arch_topology.c | 83 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source() 106 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source() 154 per_cpu(arch_freq_scale, i) = scale; in topology_set_freq_scale() 198 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure() 246 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale() 253 (per_cpu(capacity_freq_ref, cpu) ?: 1); in topology_normalize_cpu_scale() 296 per_cpu(capacity_freq_ref, cpu) = in topology_parse_cpu_capacity() 341 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc() 355 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in topology_init_cpu_capacity_cppc() 400 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback() [all …]
|
| /drivers/powercap/ |
| A D | dtpm_cpu.c | 155 per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL; in pd_release() 174 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); in cpuhp_dtpm_cpu_offline() 185 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); in cpuhp_dtpm_cpu_online() 201 dtpm_cpu = per_cpu(dtpm_per_cpu, cpu); in __dtpm_cpu_setup() 225 per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu; in __dtpm_cpu_setup() 251 per_cpu(dtpm_per_cpu, cpu) = NULL; in __dtpm_cpu_setup()
|
| A D | idle_inject.c | 153 ii_dev = per_cpu(idle_inject_device, cpu); in idle_inject_fn() 348 if (per_cpu(idle_inject_device, cpu)) { in idle_inject_register_full() 353 per_cpu(idle_inject_device, cpu) = ii_dev; in idle_inject_register_full() 362 per_cpu(idle_inject_device, cpu_rb) = NULL; in idle_inject_register_full() 403 per_cpu(idle_inject_device, cpu) = NULL; in idle_inject_unregister()
|
| /drivers/xen/ |
| A D | time.c | 82 res->time[i] += per_cpu(old_runstate_time, cpu)[i]; in xen_get_runstate_snapshot_cpu() 123 per_cpu(old_runstate_time, cpu)[i] += in xen_manage_runstate_time() 142 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen() 157 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info()
|
| /drivers/cpuidle/ |
| A D | driver.c | 38 return per_cpu(cpuidle_drivers, cpu); in __cpuidle_get_cpu_driver() 58 per_cpu(cpuidle_drivers, cpu) = NULL; in __cpuidle_unset_driver() 82 per_cpu(cpuidle_drivers, cpu) = drv; in __cpuidle_set_driver() 377 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); in cpuidle_driver_state_disabled()
|
| A D | coupled.c | 334 call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke() 646 other_dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_register_device() 669 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); in cpuidle_coupled_register_device() 742 dev = per_cpu(cpuidle_devices, cpu); in coupled_cpu_online() 758 dev = per_cpu(cpuidle_devices, cpu); in coupled_cpu_up_prepare()
|
| /drivers/platform/x86/intel/ |
| A D | tpmi_power_domains.c | 123 return per_cpu(tpmi_cpu_info, cpu_no).punit_core_id; in tpmi_get_punit_core_number() 132 return per_cpu(tpmi_cpu_info, cpu_no).punit_domain_id; in tpmi_get_power_domain_id() 145 info = &per_cpu(tpmi_cpu_info, cpu_no); in tpmi_get_power_domain_mask() 189 struct tpmi_cpu_info *info = &per_cpu(tpmi_cpu_info, cpu); in tpmi_cpu_online()
|
| /drivers/hwtracing/coresight/ |
| A D | coresight-cpu-debug.c | 396 drvdata = per_cpu(debug_drvdata, cpu); in debug_notifier_call() 428 drvdata = per_cpu(debug_drvdata, cpu); in debug_enable_func() 447 drvdata = per_cpu(debug_drvdata, cpu); in debug_enable_func() 465 drvdata = per_cpu(debug_drvdata, cpu); in debug_disable_func() 573 if (per_cpu(debug_drvdata, drvdata->cpu)) { in __debug_probe() 587 per_cpu(debug_drvdata, drvdata->cpu) = drvdata; in __debug_probe() 622 per_cpu(debug_drvdata, drvdata->cpu) = NULL; in __debug_probe() 642 per_cpu(debug_drvdata, drvdata->cpu) = NULL; in __debug_remove()
|
| /drivers/thermal/intel/ |
| A D | intel_hfi.c | 182 index = per_cpu(hfi_cpu_info, cpu).index; in get_hfi_caps() 263 info = &per_cpu(hfi_cpu_info, cpu); in intel_hfi_process_event() 427 info = &per_cpu(hfi_cpu_info, cpu); in intel_hfi_online() 511 struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, cpu); in intel_hfi_offline() 598 struct hfi_cpu_info *info = &per_cpu(hfi_cpu_info, 0); in hfi_syscore_resume()
|