Home
last modified time | relevance | path

Searched refs:per_cpu (Results 1 – 25 of 208) sorted by relevance

123456789

/arch/x86/xen/
A Dsmp.c34 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free()
35 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free()
38 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free()
40 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free()
44 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free()
46 kfree(per_cpu(xen_debug_irq, cpu).name); in xen_smp_intr_free()
47 per_cpu(xen_debug_irq, cpu).name = NULL; in xen_smp_intr_free()
50 per_cpu(xen_debug_irq, cpu).irq = -1; in xen_smp_intr_free()
78 per_cpu(xen_resched_irq, cpu).irq = rc; in xen_smp_intr_init()
92 per_cpu(xen_callfunc_irq, cpu).irq = rc; in xen_smp_intr_init()
[all …]
A Dspinlock.c24 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick()
73 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu()
74 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu()
77 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu()
87 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu()
100 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu()
101 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
106 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu()
111 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
A Dsmp_pv.c98 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv()
99 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv()
100 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv()
102 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv()
105 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv()
106 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv()
107 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv()
109 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv()
128 per_cpu(xen_irq_work, cpu).irq = rc; in xen_smp_intr_init_pv()
132 per_cpu(xen_pmu_irq, cpu).name = pmu_name; in xen_smp_intr_init_pv()
[all …]
/arch/powerpc/kernel/
A Dirq.c121 seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).pmu_irqs, 10); in arch_show_interrupts()
140 seq_put_decimal_ull_width(p, " ", per_cpu(irq_stat, j).sreset_irqs, 10); in arch_show_interrupts()
167 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; in arch_irq_stat_cpu()
169 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; in arch_irq_stat_cpu()
170 sum += per_cpu(irq_stat, cpu).pmu_irqs; in arch_irq_stat_cpu()
171 sum += per_cpu(irq_stat, cpu).mce_exceptions; in arch_irq_stat_cpu()
172 sum += per_cpu(irq_stat, cpu).spurious_irqs; in arch_irq_stat_cpu()
173 sum += per_cpu(irq_stat, cpu).timer_irqs_others; in arch_irq_stat_cpu()
177 sum += per_cpu(irq_stat, cpu).sreset_irqs; in arch_irq_stat_cpu()
179 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; in arch_irq_stat_cpu()
[all …]
A Dwatchdog.c153 cpu, tb, per_cpu(wd_timer_tb, cpu), in wd_lockup_ipi()
154 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in wd_lockup_ipi()
350 per_cpu(wd_timer_tb, cpu) = tb; in watchdog_timer_interrupt()
384 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { in DEFINE_INTERRUPT_HANDLER_NMI()
410 cpu, tb, per_cpu(wd_timer_tb, cpu), in DEFINE_INTERRUPT_HANDLER_NMI()
411 tb_to_ns(tb - per_cpu(wd_timer_tb, cpu)) / 1000000); in DEFINE_INTERRUPT_HANDLER_NMI()
464 if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) { in arch_touch_nmi_watchdog()
465 per_cpu(wd_timer_tb, cpu) = tb; in arch_touch_nmi_watchdog()
/arch/arm/mm/
A Dcontext.c67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask()
69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask()
144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context()
153 asid = per_cpu(reserved_asids, i); in flush_context()
155 per_cpu(reserved_asids, i) = asid; in flush_context()
180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid()
182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid()
254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context()
270 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
A Dproc-v7-bugs.c71 if (per_cpu(harden_branch_predictor_fn, cpu)) in spectre_v2_install_workaround()
76 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
82 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
88 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
95 per_cpu(harden_branch_predictor_fn, cpu) = in spectre_v2_install_workaround()
/arch/mips/kernel/
A Dmips-cpc.c77 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe()
100 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other()
101 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other()
120 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other()
121 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
A Dtime.c57 per_cpu(pcp_lpj_ref, cpu) = in cpufreq_callback()
59 per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; in cpufreq_callback()
74 lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), in cpufreq_callback()
75 per_cpu(pcp_lpj_ref_freq, cpu), in cpufreq_callback()
/arch/x86/kernel/apic/
A Dx2apic_cluster.c58 struct cpumask *cmsk = per_cpu(cluster_masks, cpu); in __x2apic_send_IPI_mask()
110 struct cpumask **cpu_cmsk = &per_cpu(cluster_masks, cpu_i); in prefill_clustermask()
135 if (per_cpu(cluster_masks, cpu)) in alloc_clustermask()
150 cmsk = per_cpu(cluster_masks, cpu_i); in alloc_clustermask()
156 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
170 per_cpu(cluster_masks, cpu) = cmsk; in alloc_clustermask()
188 if (!zalloc_cpumask_var_node(&per_cpu(ipi_mask, cpu), GFP_KERNEL, node)) in x2apic_prepare_cpu()
196 struct cpumask *cmsk = per_cpu(cluster_masks, dead_cpu); in x2apic_dead_cpu()
200 free_cpumask_var(per_cpu(ipi_mask, dead_cpu)); in x2apic_dead_cpu()
/arch/x86/kvm/vmx/
A Dposted_intr.c93 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu); in vmx_vcpu_pi_load()
182 raw_spin_lock_nested(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu), in pi_enable_wakeup_handler()
185 &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu)); in pi_enable_wakeup_handler()
186 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler()
256 struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu); in pi_wakeup_handler()
257 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu); in pi_wakeup_handler()
271 INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu)); in pi_init_cpu()
272 raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu)); in pi_init_cpu()
/arch/x86/mm/
A Dcpu_entry_area.c25 return per_cpu(_cea_offset, cpu); in cea_offset()
35 per_cpu(_cea_offset, i) = i; in init_cea_offsets()
56 per_cpu(_cea_offset, i) = cea; in init_cea_offsets()
117 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store()
147 per_cpu(cea_exception_stacks, cpu) = &cea->estacks; in percpu_setup_exception_stacks()
172 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL); in percpu_setup_exception_stacks()
233 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu), in setup_cpu_entry_area()
237 per_cpu(cpu_entry_area, cpu) = cea; in setup_cpu_entry_area()
/arch/riscv/mm/
A Dcontext.c51 if (per_cpu(reserved_context, cpu) == cntx) { in check_update_reserved_context()
53 per_cpu(reserved_context, cpu) = newcntx; in check_update_reserved_context()
73 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); in __flush_context()
81 cntx = per_cpu(reserved_context, i); in __flush_context()
84 per_cpu(reserved_context, i) = cntx; in __flush_context()
168 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); in set_mm_asid()
171 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), in set_mm_asid()
187 atomic_long_set(&per_cpu(active_context, cpu), cntx); in set_mm_asid()
/arch/parisc/kernel/
A Dirq.c75 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq()
78 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq()
90 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq()
93 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq()
126 #define irq_stats(x) (&per_cpu(irq_stat, x))
318 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr()
330 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr()
406 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check()
422 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check()
523 per_cpu(cpu_data, cpu).hpa); in do_cpu_irq_mask()
[all …]
A Dtopology.c40 per_cpu(cpu_devices, cpuid).hotpluggable = 1; in store_cpu_topology()
42 if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid)) in store_cpu_topology()
49 p = &per_cpu(cpu_data, cpuid); in store_cpu_topology()
51 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in store_cpu_topology()
A Dsmp.c123 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt()
128 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt()
199 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send()
200 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send()
335 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu()
342 memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); in smp_boot_one_cpu()
421 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
/arch/arm64/kernel/
A Dsdei.c55 p = per_cpu(*ptr, cpu); in _free_sdei_stack()
57 per_cpu(*ptr, cpu) = NULL; in _free_sdei_stack()
81 per_cpu(*ptr, cpu) = p; in _init_sdei_stack()
112 s = per_cpu(*ptr, cpu); in _free_sdei_scs()
114 per_cpu(*ptr, cpu) = NULL; in _free_sdei_scs()
136 per_cpu(*ptr, cpu) = s; in _init_sdei_scs()
/arch/riscv/kernel/
A Dirq.c86 per_cpu(irq_shadow_call_stack_ptr, cpu) = in init_irq_scs()
100 per_cpu(irq_stack_ptr, cpu) = p; in init_irq_stacks()
112 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); in init_irq_stacks()
A Dunaligned_access_speed.c46 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) in check_unaligned_access()
114 per_cpu(misaligned_access_speed, cpu) = speed; in check_unaligned_access()
245 if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) { in riscv_online_cpu()
248 per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param; in riscv_online_cpu()
294 if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) in check_vector_unaligned_access()
372 per_cpu(vector_misaligned_access, cpu) = speed; in check_vector_unaligned_access()
395 per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; in riscv_online_cpu_vec()
400 if (per_cpu(vector_misaligned_access, cpu) != RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN) in riscv_online_cpu_vec()
452 per_cpu(misaligned_access_speed, cpu) = unaligned_scalar_speed_param; in check_unaligned_access_all_cpus()
473 per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; in check_unaligned_access_all_cpus()
/arch/x86/kernel/
A Dsetup_percpu.c166 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas()
167 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas()
177 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas()
179 per_cpu(x86_cpu_to_acpiid, cpu) = in setup_per_cpu_areas()
183 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
A Dirq_64.c54 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; in map_irq_stack()
67 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8; in map_irq_stack()
74 if (per_cpu(hardirq_stack_ptr, cpu)) in irq_init_percpu_irqstack()
/arch/powerpc/include/asm/
A Dsmp.h116 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask()
121 return per_cpu(cpu_core_map, cpu); in cpu_core_mask()
126 return per_cpu(cpu_l2_cache_map, cpu); in cpu_l2_cache_mask()
131 return per_cpu(cpu_smallcore_map, cpu); in cpu_smallcore_mask()
145 return per_cpu(cpu_smallcore_map, cpu); in cpu_smt_mask()
147 return per_cpu(cpu_sibling_map, cpu); in cpu_smt_mask()
/arch/arm64/kvm/
A Dvmid.c53 vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0); in flush_context()
57 vmid = per_cpu(reserved_vmids, cpu); in flush_context()
59 per_cpu(reserved_vmids, cpu) = vmid; in flush_context()
83 if (per_cpu(reserved_vmids, cpu) == vmid) { in check_update_reserved_vmid()
85 per_cpu(reserved_vmids, cpu) = newvmid; in check_update_reserved_vmid()
/arch/powerpc/platforms/powernv/
A Dsubcore.c155 while(per_cpu(split_state, i).step < step) in wait_for_sync_step()
196 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core()
230 split_core_secondary_loop(&per_cpu(split_state, cpu).step); in split_core()
262 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; in cpu_do_split()
320 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) in cpu_update_split_mode()
355 state = &per_cpu(split_state, cpu); in set_subcores_per_core()
/arch/arm/kernel/
A Dsmp.c391 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info()
487 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done()
622 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion()
628 complete(per_cpu(cpu_completion, cpu)); in ipi_complete()
805 if (!per_cpu(l_p_j_ref, first)) { in cpufreq_callback()
807 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback()
808 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback()
809 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback()
824 lpj = cpufreq_scale(per_cpu(l_p_j_ref, first), in cpufreq_callback()
825 per_cpu(l_p_j_ref_freq, first), freq->new); in cpufreq_callback()
[all …]

Completed in 43 milliseconds

123456789