Lines Matching refs:cpu

82 static void ipi_setup(int cpu);
85 static void ipi_teardown(int cpu);
86 static int op_cpu_kill(unsigned int cpu);
88 static inline int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
99 static int boot_secondary(unsigned int cpu, struct task_struct *idle) in boot_secondary() argument
101 const struct cpu_operations *ops = get_cpu_ops(cpu); in boot_secondary()
104 return ops->cpu_boot(cpu); in boot_secondary()
111 int __cpu_up(unsigned int cpu, struct task_struct *idle) in __cpu_up() argument
124 ret = boot_secondary(cpu, idle); in __cpu_up()
127 pr_err("CPU%u: failed to boot: %d\n", cpu, ret); in __cpu_up()
137 if (cpu_online(cpu)) in __cpu_up()
140 pr_crit("CPU%u: failed to come online\n", cpu); in __cpu_up()
149 cpu, status); in __cpu_up()
153 if (!op_cpu_kill(cpu)) { in __cpu_up()
154 pr_crit("CPU%u: died during early boot\n", cpu); in __cpu_up()
157 pr_crit("CPU%u: may not have shut down cleanly\n", cpu); in __cpu_up()
160 pr_crit("CPU%u: is stuck in kernel\n", cpu); in __cpu_up()
162 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu); in __cpu_up()
165 cpu, PAGE_SIZE / SZ_1K); in __cpu_up()
170 panic("CPU%u detected unsupported configuration\n", cpu); in __cpu_up()
200 unsigned int cpu = smp_processor_id(); in secondary_start_kernel() local
218 rcutree_report_cpu_starting(cpu); in secondary_start_kernel()
228 ops = get_cpu_ops(cpu); in secondary_start_kernel()
236 store_cpu_topology(cpu); in secondary_start_kernel()
241 notify_cpu_starting(cpu); in secondary_start_kernel()
243 ipi_setup(cpu); in secondary_start_kernel()
245 numa_add_cpu(cpu); in secondary_start_kernel()
253 cpu, (unsigned long)mpidr, in secondary_start_kernel()
256 set_cpu_online(cpu, true); in secondary_start_kernel()
275 static int op_cpu_disable(unsigned int cpu) in op_cpu_disable() argument
277 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_disable()
291 return ops->cpu_disable(cpu); in op_cpu_disable()
301 unsigned int cpu = smp_processor_id(); in __cpu_disable() local
304 ret = op_cpu_disable(cpu); in __cpu_disable()
308 remove_cpu_topology(cpu); in __cpu_disable()
309 numa_remove_cpu(cpu); in __cpu_disable()
315 set_cpu_online(cpu, false); in __cpu_disable()
316 ipi_teardown(cpu); in __cpu_disable()
326 static int op_cpu_kill(unsigned int cpu) in op_cpu_kill() argument
328 const struct cpu_operations *ops = get_cpu_ops(cpu); in op_cpu_kill()
338 return ops->cpu_kill(cpu); in op_cpu_kill()
345 void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) in arch_cpuhp_cleanup_dead_cpu() argument
349 pr_debug("CPU%u: shutdown\n", cpu); in arch_cpuhp_cleanup_dead_cpu()
357 err = op_cpu_kill(cpu); in arch_cpuhp_cleanup_dead_cpu()
359 pr_warn("CPU%d may not have shut down cleanly: %d\n", cpu, err); in arch_cpuhp_cleanup_dead_cpu()
368 unsigned int cpu = smp_processor_id(); in cpu_die() local
369 const struct cpu_operations *ops = get_cpu_ops(cpu); in cpu_die()
383 ops->cpu_die(cpu); in cpu_die()
389 static void __cpu_try_die(int cpu) in __cpu_try_die() argument
392 const struct cpu_operations *ops = get_cpu_ops(cpu); in __cpu_try_die()
395 ops->cpu_die(cpu); in __cpu_try_die()
405 int cpu = smp_processor_id(); in cpu_die_early() local
407 pr_crit("CPU%d: will not boot\n", cpu); in cpu_die_early()
410 set_cpu_present(cpu, 0); in cpu_die_early()
415 __cpu_try_die(cpu); in cpu_die_early()
474 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid) in is_mpidr_duplicate() argument
478 for (i = 1; (i < cpu) && (i < NR_CPUS); i++) in is_mpidr_duplicate()
488 static int __init smp_cpu_setup(int cpu) in smp_cpu_setup() argument
492 if (init_cpu_ops(cpu)) in smp_cpu_setup()
495 ops = get_cpu_ops(cpu); in smp_cpu_setup()
496 if (ops->cpu_init(cpu)) in smp_cpu_setup()
499 set_cpu_possible(cpu, true); in smp_cpu_setup()
507 int arch_register_cpu(int cpu) in arch_register_cpu() argument
509 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); in arch_register_cpu()
510 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_register_cpu()
518 if (invalid_logical_cpuid(cpu) || !cpu_present(cpu)) { in arch_register_cpu()
528 c->hotpluggable = arch_cpu_is_hotpluggable(cpu); in arch_register_cpu()
530 return register_cpu(c, cpu); in arch_register_cpu()
534 void arch_unregister_cpu(int cpu) in arch_unregister_cpu() argument
536 acpi_handle acpi_handle = acpi_get_processor_handle(cpu); in arch_unregister_cpu()
537 struct cpu *c = &per_cpu(cpu_devices, cpu); in arch_unregister_cpu()
551 if (cpu_present(cpu) && !(sta & ACPI_STA_DEVICE_PRESENT)) { in arch_unregister_cpu()
563 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) in acpi_cpu_get_madt_gicc() argument
565 return &cpu_madt_gicc[cpu]; in acpi_cpu_get_madt_gicc()
776 unsigned int cpu; in smp_prepare_cpus() local
798 for_each_possible_cpu(cpu) { in smp_prepare_cpus()
800 if (cpu == smp_processor_id()) in smp_prepare_cpus()
803 ops = get_cpu_ops(cpu); in smp_prepare_cpus()
807 err = ops->cpu_prepare(cpu); in smp_prepare_cpus()
811 set_cpu_present(cpu, true); in smp_prepare_cpus()
812 numa_store_cpu_info(cpu); in smp_prepare_cpus()
833 unsigned int cpu, i; in arch_show_interrupts() local
838 for_each_online_cpu(cpu) in arch_show_interrupts()
839 seq_printf(p, "%10u ", irq_desc_kstat_cpu(get_ipi_desc(cpu, i), cpu)); in arch_show_interrupts()
852 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
854 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); in arch_send_call_function_single_ipi()
864 static void __noreturn local_cpu_stop(unsigned int cpu) in local_cpu_stop() argument
866 set_cpu_online(cpu, false); in local_cpu_stop()
883 static void __noreturn ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) in ipi_cpu_crash_stop() argument
896 crash_save_cpu(regs, cpu); in ipi_cpu_crash_stop()
898 set_cpu_online(cpu, false); in ipi_cpu_crash_stop()
903 __cpu_try_die(cpu); in ipi_cpu_crash_stop()
914 unsigned int cpu; in arm64_send_ipi() local
919 for_each_cpu(cpu, mask) in arm64_send_ipi()
920 __ipi_send_single(get_ipi_desc(cpu, nr), cpu); in arm64_send_ipi()
943 int cpu; in kgdb_roundup_cpus() local
945 for_each_online_cpu(cpu) { in kgdb_roundup_cpus()
947 if (cpu == this_cpu) in kgdb_roundup_cpus()
950 __ipi_send_single(get_ipi_desc(cpu, IPI_KGDB_ROUNDUP), cpu); in kgdb_roundup_cpus()
960 unsigned int cpu = smp_processor_id(); in do_handle_IPI() local
977 ipi_cpu_crash_stop(cpu, get_irq_regs()); in do_handle_IPI()
980 local_cpu_stop(cpu); in do_handle_IPI()
1005 kgdb_nmicallback(cpu, get_irq_regs()); in do_handle_IPI()
1009 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); in do_handle_IPI()
1046 static void ipi_setup(int cpu) in ipi_setup() argument
1062 enable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); in ipi_setup()
1068 static void ipi_teardown(int cpu) in ipi_teardown() argument
1084 disable_irq(irq_desc_get_irq(get_ipi_desc(cpu, i))); in ipi_teardown()
1092 int err, irq, cpu; in ipi_setup_sgi() local
1104 for_each_possible_cpu(cpu) in ipi_setup_sgi()
1105 get_ipi_desc(cpu, ipi) = irq_to_desc(irq); in ipi_setup_sgi()
1112 for (int cpu = 0; cpu < ncpus; cpu++) { in ipi_setup_lpi() local
1115 irq = ipi_irq_base + (cpu * nr_ipi) + ipi; in ipi_setup_lpi()
1117 err = irq_force_affinity(irq, cpumask_of(cpu)); in ipi_setup_lpi()
1126 get_ipi_desc(cpu, ipi) = irq_to_desc(irq); in ipi_setup_lpi()
1151 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
1153 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); in arch_smp_send_reschedule()
1157 void arch_send_wakeup_ipi(unsigned int cpu) in arch_send_wakeup_ipi() argument
1163 smp_send_reschedule(cpu); in arch_send_wakeup_ipi()