1 /*
2  * xen/arch/arm/smpboot.c
3  *
4  * Dummy smpboot support
5  *
6  * Copyright (c) 2011 Citrix Systems.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <xen/cpu.h>
20 #include <xen/cpumask.h>
21 #include <xen/delay.h>
22 #include <xen/domain_page.h>
23 #include <xen/errno.h>
24 #include <xen/init.h>
25 #include <xen/mm.h>
26 #include <xen/sched.h>
27 #include <xen/smp.h>
28 #include <xen/softirq.h>
29 #include <xen/timer.h>
30 #include <xen/irq.h>
31 #include <xen/console.h>
32 #include <asm/cpuerrata.h>
33 #include <asm/gic.h>
34 #include <asm/psci.h>
35 #include <asm/acpi.h>
36 
37 cpumask_t cpu_online_map;
38 cpumask_t cpu_present_map;
39 cpumask_t cpu_possible_map;
40 
41 struct cpuinfo_arm cpu_data[NR_CPUS];
42 
43 /* CPU logical map: map xen cpuid to an MPIDR */
44 register_t __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
45 
46 /* Fake one node for now. See also include/asm-arm/numa.h */
47 nodemask_t __read_mostly node_online_map = { { [0] = 1UL } };
48 
49 /* Xen stack for bringing up the first CPU. */
50 static unsigned char __initdata cpu0_boot_stack[STACK_SIZE]
51        __attribute__((__aligned__(STACK_SIZE)));
52 
53 /* Initial boot cpu data */
54 struct init_info __initdata init_data =
55 {
56     .stack = cpu0_boot_stack,
57 };
58 
59 /* Shared state for coordinating CPU bringup */
60 unsigned long smp_up_cpu = MPIDR_INVALID;
61 /* Shared state for coordinating CPU teardown */
62 static bool cpu_is_dead;
63 
64 /* ID of the PCPU we're running on */
65 DEFINE_PER_CPU(unsigned int, cpu_id);
66 /* XXX these seem awfully x86ish... */
67 /* representing HT siblings of each logical CPU */
68 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_mask);
69 /* representing HT and core siblings of each logical CPU */
70 DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_mask);
71 
setup_cpu_sibling_map(int cpu)72 static void setup_cpu_sibling_map(int cpu)
73 {
74     if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||
75          !zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
76         panic("No memory for CPU sibling/core maps");
77 
78     /* A CPU is a sibling with itself and is always on its own core. */
79     cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
80     cpumask_set_cpu(cpu, per_cpu(cpu_core_mask, cpu));
81 }
82 
83 void __init
smp_clear_cpu_maps(void)84 smp_clear_cpu_maps (void)
85 {
86     cpumask_clear(&cpu_possible_map);
87     cpumask_clear(&cpu_online_map);
88     cpumask_set_cpu(0, &cpu_online_map);
89     cpumask_set_cpu(0, &cpu_possible_map);
90     cpu_logical_map(0) = READ_SYSREG(MPIDR_EL1) & MPIDR_HWID_MASK;
91 }
92 
93 /* Parse the device tree and build the logical map array containing
94  * MPIDR values related to logical cpus
95  * Code base on Linux arch/arm/kernel/devtree.c
96  */
dt_smp_init_cpus(void)97 static void __init dt_smp_init_cpus(void)
98 {
99     register_t mpidr;
100     struct dt_device_node *cpus = dt_find_node_by_path("/cpus");
101     struct dt_device_node *cpu;
102     unsigned int i, j;
103     unsigned int cpuidx = 1;
104     static register_t tmp_map[NR_CPUS] __initdata =
105     {
106         [0 ... NR_CPUS - 1] = MPIDR_INVALID
107     };
108     bool bootcpu_valid = false;
109     int rc;
110 
111     mpidr = boot_cpu_data.mpidr.bits & MPIDR_HWID_MASK;
112 
113     if ( !cpus )
114     {
115         printk(XENLOG_WARNING "WARNING: Can't find /cpus in the device tree.\n"
116                "Using only 1 CPU\n");
117         return;
118     }
119 
120     dt_for_each_child_node( cpus, cpu )
121     {
122         const __be32 *prop;
123         u64 addr;
124         u32 reg_len;
125         register_t hwid;
126 
127         if ( !dt_device_type_is_equal(cpu, "cpu") )
128             continue;
129 
130         if ( dt_n_size_cells(cpu) != 0 )
131             printk(XENLOG_WARNING "cpu node `%s`: #size-cells %d\n",
132                    dt_node_full_name(cpu), dt_n_size_cells(cpu));
133 
134         prop = dt_get_property(cpu, "reg", &reg_len);
135         if ( !prop )
136         {
137             printk(XENLOG_WARNING "cpu node `%s`: has no reg property\n",
138                    dt_node_full_name(cpu));
139             continue;
140         }
141 
142         if ( reg_len < dt_cells_to_size(dt_n_addr_cells(cpu)) )
143         {
144             printk(XENLOG_WARNING "cpu node `%s`: reg property too short\n",
145                    dt_node_full_name(cpu));
146             continue;
147         }
148 
149         addr = dt_read_number(prop, dt_n_addr_cells(cpu));
150 
151         hwid = addr;
152         if ( hwid != addr )
153         {
154             printk(XENLOG_WARNING "cpu node `%s`: hwid overflow %"PRIx64"\n",
155                    dt_node_full_name(cpu), addr);
156             continue;
157         }
158 
159         /*
160          * 8 MSBs must be set to 0 in the DT since the reg property
161          * defines the MPIDR[23:0]
162          */
163         if ( hwid & ~MPIDR_HWID_MASK )
164         {
165             printk(XENLOG_WARNING "cpu node `%s`: invalid hwid value (0x%"PRIregister")\n",
166                    dt_node_full_name(cpu), hwid);
167             continue;
168         }
169 
170         /*
171          * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
172          * entries and check for duplicates. If any found just skip the node.
173          * temp values values are initialized to MPIDR_INVALID to avoid
174          * matching valid MPIDR[23:0] values.
175          */
176         for ( j = 0; j < cpuidx; j++ )
177         {
178             if ( tmp_map[j] == hwid )
179             {
180                 printk(XENLOG_WARNING
181                        "cpu node `%s`: duplicate /cpu reg properties %"PRIregister" in the DT\n",
182                        dt_node_full_name(cpu), hwid);
183                 break;
184             }
185         }
186         if ( j != cpuidx )
187             continue;
188 
189         /*
190          * Build a stashed array of MPIDR values. Numbering scheme requires
191          * that if detected the boot CPU must be assigned logical id 0. Other
192          * CPUs get sequential indexes starting from 1. If a CPU node
193          * with a reg property matching the boot CPU MPIDR is detected,
194          * this is recorded and so that the logical map build from DT is
195          * validated and can be used to set the map.
196          */
197         if ( hwid == mpidr )
198         {
199             i = 0;
200             bootcpu_valid = true;
201         }
202         else
203             i = cpuidx++;
204 
205         if ( cpuidx > NR_CPUS )
206         {
207             printk(XENLOG_WARNING
208                    "DT /cpu %u node greater than max cores %u, capping them\n",
209                    cpuidx, NR_CPUS);
210             cpuidx = NR_CPUS;
211             break;
212         }
213 
214         if ( (rc = arch_cpu_init(i, cpu)) < 0 )
215         {
216             printk("cpu%d init failed (hwid %"PRIregister"): %d\n", i, hwid, rc);
217             tmp_map[i] = MPIDR_INVALID;
218         }
219         else
220             tmp_map[i] = hwid;
221     }
222 
223     if ( !bootcpu_valid )
224     {
225         printk(XENLOG_WARNING "DT missing boot CPU MPIDR[23:0]\n"
226                "Using only 1 CPU\n");
227         return;
228     }
229 
230     for ( i = 0; i < cpuidx; i++ )
231     {
232         if ( tmp_map[i] == MPIDR_INVALID )
233             continue;
234         cpumask_set_cpu(i, &cpu_possible_map);
235         cpu_logical_map(i) = tmp_map[i];
236     }
237 }
238 
smp_init_cpus(void)239 void __init smp_init_cpus(void)
240 {
241     int rc;
242 
243     /* initialize PSCI and set a global variable */
244     psci_init();
245 
246     if ( (rc = arch_smp_init()) < 0 )
247     {
248         printk(XENLOG_WARNING "SMP init failed (%d)\n"
249                "Using only 1 CPU\n", rc);
250         return;
251     }
252 
253     if ( acpi_disabled )
254         dt_smp_init_cpus();
255     else
256         acpi_smp_init_cpus();
257 
258 }
259 
260 int __init
smp_get_max_cpus(void)261 smp_get_max_cpus (void)
262 {
263     int i, max_cpus = 0;
264 
265     for ( i = 0; i < nr_cpu_ids; i++ )
266         if ( cpu_possible(i) )
267             max_cpus++;
268 
269     return max_cpus;
270 }
271 
272 void __init
smp_prepare_cpus(unsigned int max_cpus)273 smp_prepare_cpus (unsigned int max_cpus)
274 {
275     cpumask_copy(&cpu_present_map, &cpu_possible_map);
276 
277     setup_cpu_sibling_map(0);
278 }
279 
280 /* Boot the current CPU */
start_secondary(unsigned long boot_phys_offset,unsigned long fdt_paddr,unsigned long hwid)281 void start_secondary(unsigned long boot_phys_offset,
282                      unsigned long fdt_paddr,
283                      unsigned long hwid)
284 {
285     unsigned int cpuid = init_data.cpuid;
286 
287     memset(get_cpu_info(), 0, sizeof (struct cpu_info));
288 
289     set_processor_id(cpuid);
290 
291     identify_cpu(&current_cpu_data);
292 
293     init_traps();
294 
295     mmu_init_secondary_cpu();
296 
297     gic_init_secondary_cpu();
298 
299     init_secondary_IRQ();
300 
301     init_maintenance_interrupt();
302     init_timer_interrupt();
303 
304     set_current(idle_vcpu[cpuid]);
305 
306     setup_cpu_sibling_map(cpuid);
307 
308     /* Run local notifiers */
309     notify_cpu_starting(cpuid);
310     /*
311      * Ensure that previous writes are visible before marking the cpu as
312      * online.
313      */
314     smp_wmb();
315 
316     /* Now report this CPU is up */
317     cpumask_set_cpu(cpuid, &cpu_online_map);
318 
319     local_irq_enable();
320     local_abort_enable();
321 
322     check_local_cpu_errata();
323 
324     printk(XENLOG_DEBUG "CPU %u booted.\n", smp_processor_id());
325 
326     startup_cpu_idle_loop();
327 }
328 
329 /* Shut down the current CPU */
__cpu_disable(void)330 void __cpu_disable(void)
331 {
332     unsigned int cpu = get_processor_id();
333 
334     local_irq_disable();
335     gic_disable_cpu();
336     /* Allow any queued timer interrupts to get serviced */
337     local_irq_enable();
338     mdelay(1);
339     local_irq_disable();
340 
341     /* It's now safe to remove this processor from the online map */
342     cpumask_clear_cpu(cpu, &cpu_online_map);
343 
344     if ( cpu_disable_scheduler(cpu) )
345         BUG();
346     smp_mb();
347 
348     /* Return to caller; eventually the IPI mechanism will unwind and the
349      * scheduler will drop to the idle loop, which will call stop_cpu(). */
350 }
351 
stop_cpu(void)352 void stop_cpu(void)
353 {
354     local_irq_disable();
355     cpu_is_dead = true;
356     /* Make sure the write happens before we sleep forever */
357     dsb(sy);
358     isb();
359     while ( 1 )
360         wfi();
361 }
362 
cpu_up_send_sgi(int cpu)363 int __init cpu_up_send_sgi(int cpu)
364 {
365     /* We don't know the GIC ID of the CPU until it has woken up, so just
366      * signal everyone and rely on our own smp_up_cpu gate to ensure only
367      * the one we want gets through. */
368     send_SGI_allbutself(GIC_SGI_EVENT_CHECK);
369 
370     return 0;
371 }
372 
373 /* Bring up a remote CPU */
__cpu_up(unsigned int cpu)374 int __cpu_up(unsigned int cpu)
375 {
376     int rc;
377     s_time_t deadline;
378 
379     printk("Bringing up CPU%d\n", cpu);
380 
381     rc = init_secondary_pagetables(cpu);
382     if ( rc < 0 )
383         return rc;
384 
385     console_start_sync(); /* Secondary may use early_printk */
386 
387     /* Tell the remote CPU which stack to boot on. */
388     init_data.stack = idle_vcpu[cpu]->arch.stack;
389 
390     /* Tell the remote CPU what its logical CPU ID is. */
391     init_data.cpuid = cpu;
392 
393     /* Open the gate for this CPU */
394     smp_up_cpu = cpu_logical_map(cpu);
395     clean_dcache(smp_up_cpu);
396 
397     rc = arch_cpu_up(cpu);
398 
399     console_end_sync();
400 
401     if ( rc < 0 )
402     {
403         printk("Failed to bring up CPU%d\n", cpu);
404         return rc;
405     }
406 
407     deadline = NOW() + MILLISECS(1000);
408 
409     while ( !cpu_online(cpu) && NOW() < deadline )
410     {
411         cpu_relax();
412         process_pending_softirqs();
413     }
414     /*
415      * Ensure that other cpus' initializations are visible before
416      * proceeding. Corresponds to smp_wmb() in start_secondary.
417      */
418     smp_rmb();
419 
420     /*
421      * Nuke start of day info before checking one last time if the CPU
422      * actually came online. If it is not online it may still be
423      * trying to come up and may show up later unexpectedly.
424      *
425      * This doesn't completely avoid the possibility of the supposedly
426      * failed CPU trying to progress with another CPUs stack settings
427      * etc, but better than nothing, hopefully.
428      */
429     init_data.stack = NULL;
430     init_data.cpuid = ~0;
431     smp_up_cpu = MPIDR_INVALID;
432     clean_dcache(smp_up_cpu);
433 
434     if ( !cpu_online(cpu) )
435     {
436         printk("CPU%d never came online\n", cpu);
437         return -EIO;
438     }
439 
440     return 0;
441 }
442 
443 /* Wait for a remote CPU to die */
__cpu_die(unsigned int cpu)444 void __cpu_die(unsigned int cpu)
445 {
446     unsigned int i = 0;
447 
448     while ( !cpu_is_dead )
449     {
450         mdelay(100);
451         cpu_relax();
452         process_pending_softirqs();
453         if ( (++i % 10) == 0 )
454             printk(KERN_ERR "CPU %u still not dead...\n", cpu);
455         smp_mb();
456     }
457     cpu_is_dead = false;
458     smp_mb();
459 }
460 
461 /*
462  * Local variables:
463  * mode: C
464  * c-file-style: "BSD"
465  * c-basic-offset: 4
466  * indent-tabs-mode: nil
467  * End:
468  */
469