/linux-6.3-rc2/arch/x86/xen/ |
A D | smp.c | 35 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 36 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 39 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 41 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 45 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free() 47 kfree(per_cpu(xen_debug_irq, cpu).name); in xen_smp_intr_free() 48 per_cpu(xen_debug_irq, cpu).name = NULL; in xen_smp_intr_free() 51 per_cpu(xen_debug_irq, cpu).irq = -1; in xen_smp_intr_free() 77 per_cpu(xen_resched_irq, cpu).irq = rc; in xen_smp_intr_init() 89 per_cpu(xen_callfunc_irq, cpu).irq = rc; in xen_smp_intr_init() [all …]
|
A D | spinlock.c | 25 int irq = per_cpu(lock_kicker_irq, cpu); in xen_qlock_kick() 74 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu() 75 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu() 78 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu() 88 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu() 101 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu() 102 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu() 107 irq = per_cpu(lock_kicker_irq, cpu); in xen_uninit_lock_cpu() 112 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu()
|
A D | smp_pv.c | 100 kfree(per_cpu(xen_irq_work, cpu).name); in xen_smp_intr_free_pv() 101 per_cpu(xen_irq_work, cpu).name = NULL; in xen_smp_intr_free_pv() 102 if (per_cpu(xen_irq_work, cpu).irq >= 0) { in xen_smp_intr_free_pv() 104 per_cpu(xen_irq_work, cpu).irq = -1; in xen_smp_intr_free_pv() 107 kfree(per_cpu(xen_pmu_irq, cpu).name); in xen_smp_intr_free_pv() 108 per_cpu(xen_pmu_irq, cpu).name = NULL; in xen_smp_intr_free_pv() 109 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { in xen_smp_intr_free_pv() 111 per_cpu(xen_pmu_irq, cpu).irq = -1; in xen_smp_intr_free_pv() 130 per_cpu(xen_irq_work, cpu).irq = rc; in xen_smp_intr_init_pv() 134 per_cpu(xen_pmu_irq, cpu).name = pmu_name; in xen_smp_intr_init_pv() [all …]
|
/linux-6.3-rc2/arch/powerpc/kernel/ |
A D | irq.c | 121 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts() 140 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); in arch_show_interrupts() 167 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; in arch_irq_stat_cpu() 169 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; in arch_irq_stat_cpu() 170 sum += per_cpu(irq_stat, cpu).pmu_irqs; in arch_irq_stat_cpu() 171 sum += per_cpu(irq_stat, cpu).mce_exceptions; in arch_irq_stat_cpu() 172 sum += per_cpu(irq_stat, cpu).spurious_irqs; in arch_irq_stat_cpu() 173 sum += per_cpu(irq_stat, cpu).timer_irqs_others; in arch_irq_stat_cpu() 177 sum += per_cpu(irq_stat, cpu).sreset_irqs; in arch_irq_stat_cpu() 179 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; in arch_irq_stat_cpu() [all …]
|
/linux-6.3-rc2/kernel/ |
A D | smpboot.c | 32 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 41 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu() 52 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 59 per_cpu(idle_threads, cpu) = tsk; in idle_init() 337 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state() 355 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare() 359 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare() 448 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_wait_death() 453 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); in cpu_wait_death() 456 if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), in cpu_wait_death() [all …]
|
A D | profile.c | 175 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 183 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 187 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 206 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 226 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits() 274 if (per_cpu(cpu_profile_hits, cpu)[i]) { in profile_dead_cpu() 275 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu() 276 per_cpu(cpu_profile_hits, cpu)[i] = NULL; in profile_dead_cpu() 288 per_cpu(cpu_profile_flip, cpu) = 0; in profile_prepare_cpu() 291 if (per_cpu(cpu_profile_hits, cpu)[i]) in profile_prepare_cpu() [all …]
|
A D | softirq.c | 911 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 912 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 913 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 914 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() 949 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() 951 __this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets() 952 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets() 953 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 957 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { in takeover_tasklets() 960 per_cpu(tasklet_hi_vec, cpu).head = NULL; in takeover_tasklets() [all …]
|
/linux-6.3-rc2/drivers/perf/ |
A D | arm_pmu_acpi.c | 162 per_cpu(pmu_irqs, cpu) = irq; in arm_pmu_acpi_parse_irqs() 172 irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_parse_irqs() 183 if (per_cpu(pmu_irqs, irq_cpu) == irq) in arm_pmu_acpi_parse_irqs() 184 per_cpu(pmu_irqs, irq_cpu) = 0; in arm_pmu_acpi_parse_irqs() 198 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_pmu() 221 int other_irq = per_cpu(hw_events->irq, cpu); in pmu_irq_matches() 240 int irq = per_cpu(pmu_irqs, cpu); in arm_pmu_acpi_associate_pmu_cpu() 242 per_cpu(probed_pmus, cpu) = pmu; in arm_pmu_acpi_associate_pmu_cpu() 247 per_cpu(hw_events->irq, cpu) = irq; in arm_pmu_acpi_associate_pmu_cpu() 267 if (per_cpu(probed_pmus, cpu)) in arm_pmu_acpi_cpu_starting() [all …]
|
/linux-6.3-rc2/drivers/cpufreq/ |
A D | speedstep-centrino.c | 261 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 296 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 303 if ((!per_cpu(centrino_model, cpu)) || in extract_clock() 304 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock() 309 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock() 313 return per_cpu(centrino_model, cpu)-> in extract_clock() 367 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; in centrino_cpu_init() 369 if (!per_cpu(centrino_cpu, policy->cpu)) { in centrino_cpu_init() 407 if (!per_cpu(centrino_model, cpu)) in centrino_cpu_exit() 410 per_cpu(centrino_model, cpu) = NULL; in centrino_cpu_exit() [all …]
|
A D | vexpress-spc-cpufreq.c | 78 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq() 80 if (cluster == per_cpu(physical_cluster, j) && in find_cluster_maxfreq() 90 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 103 return per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_get_rate() 118 prev_rate = per_cpu(cpu_last_req_freq, cpu); in ve_spc_cpufreq_set_rate() 119 per_cpu(cpu_last_req_freq, cpu) = rate; in ve_spc_cpufreq_set_rate() 120 per_cpu(physical_cluster, cpu) = new_cluster; in ve_spc_cpufreq_set_rate() 144 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in ve_spc_cpufreq_set_rate() 145 per_cpu(physical_cluster, cpu) = old_cluster; in ve_spc_cpufreq_set_rate() 430 per_cpu(physical_cluster, cpu) = cur_cluster; in ve_spc_cpufreq_init() [all …]
|
/linux-6.3-rc2/arch/arm/mm/ |
A D | context.c | 67 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 69 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 144 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context() 153 asid = per_cpu(reserved_asids, i); in flush_context() 155 per_cpu(reserved_asids, i) = asid; in flush_context() 180 if (per_cpu(reserved_asids, cpu) == asid) { in check_update_reserved_asid() 182 per_cpu(reserved_asids, cpu) = newasid; in check_update_reserved_asid() 254 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context() 270 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
|
/linux-6.3-rc2/arch/ia64/mm/ |
A D | tlb.c | 101 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context() 397 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init() 401 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init() 403 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init() 405 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init() 407 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init() 472 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 516 if (i >= per_cpu(ia64_tr_num, cpu)) in ia64_itr_entry() 520 if (i > per_cpu(ia64_tr_used, cpu)) in ia64_itr_entry() 521 per_cpu(ia64_tr_used, cpu) = i; in ia64_itr_entry() [all …]
|
/linux-6.3-rc2/arch/x86/kernel/ |
A D | setup_percpu.c | 171 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas() 172 per_cpu(pcpu_hot.cpu_number, cpu) = cpu; in setup_per_cpu_areas() 182 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas() 184 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas() 186 per_cpu(x86_cpu_to_acpiid, cpu) = in setup_per_cpu_areas() 190 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas() 194 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
|
A D | topology.c | 134 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; in arch_register_cpu() 136 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu() 142 unregister_cpu(&per_cpu(cpu_devices, num).cpu); in arch_unregister_cpu() 149 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu()
|
/linux-6.3-rc2/arch/parisc/kernel/ |
A D | irq.c | 78 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq() 81 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq() 93 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq() 96 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq() 129 #define irq_stats(x) (&per_cpu(irq_stat, x)) 321 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr() 333 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 409 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check() 425 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check() 526 per_cpu(cpu_data, cpu).hpa); in do_cpu_irq_mask() [all …]
|
A D | topology.c | 40 per_cpu(cpu_devices, cpuid).hotpluggable = 1; in store_cpu_topology() 42 if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid)) in store_cpu_topology() 49 p = &per_cpu(cpu_data, cpuid); in store_cpu_topology() 51 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in store_cpu_topology()
|
A D | smp.c | 123 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt() 128 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt() 199 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send() 200 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send() 338 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu() 345 memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t)); in smp_boot_one_cpu() 410 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; in smp_prepare_boot_cpu() 430 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
|
/linux-6.3-rc2/arch/mips/kernel/ |
A D | mips-cpc.c | 77 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe() 100 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other() 101 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other() 120 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other() 121 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
|
/linux-6.3-rc2/arch/x86/kvm/vmx/ |
A D | posted_intr.c | 92 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in vmx_vcpu_pi_load() 94 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in vmx_vcpu_pi_load() 155 raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler() 157 &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu)); in pi_enable_wakeup_handler() 158 raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu)); in pi_enable_wakeup_handler() 221 struct list_head *wakeup_list = &per_cpu(wakeup_vcpus_on_cpu, cpu); in pi_wakeup_handler() 222 raw_spinlock_t *spinlock = &per_cpu(wakeup_vcpus_on_cpu_lock, cpu); in pi_wakeup_handler() 236 INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu)); in pi_init_cpu() 237 raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu)); in pi_init_cpu()
|
/linux-6.3-rc2/arch/riscv/mm/ |
A D | context.c | 52 if (per_cpu(reserved_context, cpu) == cntx) { in check_update_reserved_context() 54 per_cpu(reserved_context, cpu) = newcntx; in check_update_reserved_context() 74 cntx = atomic_long_xchg_relaxed(&per_cpu(active_context, i), 0); in __flush_context() 82 cntx = per_cpu(reserved_context, i); in __flush_context() 85 per_cpu(reserved_context, i) = cntx; in __flush_context() 169 old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu)); in set_mm_asid() 172 atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu), in set_mm_asid() 188 atomic_long_set(&per_cpu(active_context, cpu), cntx); in set_mm_asid()
|
/linux-6.3-rc2/arch/arm64/kernel/ |
A D | sdei.c | 54 p = per_cpu(*ptr, cpu); in _free_sdei_stack() 56 per_cpu(*ptr, cpu) = NULL; in _free_sdei_stack() 81 per_cpu(*ptr, cpu) = p; in _init_sdei_stack() 113 s = per_cpu(*ptr, cpu); in _free_sdei_scs() 115 per_cpu(*ptr, cpu) = NULL; in _free_sdei_scs() 137 per_cpu(*ptr, cpu) = s; in _init_sdei_scs()
|
A D | irq.c | 49 per_cpu(irq_shadow_call_stack_ptr, cpu) = in init_irq_scs() 61 per_cpu(irq_stack_ptr, cpu) = p; in init_irq_stacks() 73 per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); in init_irq_stacks()
|
/linux-6.3-rc2/arch/x86/mm/ |
A D | cpu_entry_area.c | 24 return per_cpu(_cea_offset, cpu); in cea_offset() 49 per_cpu(_cea_offset, i) = cea; in init_cea_offsets() 110 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 140 per_cpu(cea_exception_stacks, cpu) = &cea->estacks; in percpu_setup_exception_stacks() 165 &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL); in percpu_setup_exception_stacks() 226 cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu), in setup_cpu_entry_area() 230 per_cpu(cpu_entry_area, cpu) = cea; in setup_cpu_entry_area()
|
/linux-6.3-rc2/drivers/xen/events/ |
A D | events_2l.c | 52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_remove() 58 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, old_cpu))); in evtchn_2l_bind_to_cpu() 59 set_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu() 152 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns() 268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt() 280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt() 289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt() 353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume() 359 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) * in evtchn_2l_percpu_deinit()
|
/linux-6.3-rc2/arch/powerpc/include/asm/ |
A D | smp.h | 115 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask() 120 return per_cpu(cpu_core_map, cpu); in cpu_core_mask() 125 return per_cpu(cpu_l2_cache_map, cpu); in cpu_l2_cache_mask() 130 return per_cpu(cpu_smallcore_map, cpu); in cpu_smallcore_mask() 144 return per_cpu(cpu_smallcore_map, cpu); in cpu_smt_mask() 146 return per_cpu(cpu_sibling_map, cpu); in cpu_smt_mask()
|