1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <arch/machine.h>
9 #include <arch/kernel/boot_sys.h>
10 #include <arch/kernel/smp_sys.h>
11 #include <smp/lock.h>
12 
13 #ifdef ENABLE_SMP_SUPPORT
14 
15 /* Index of next AP to boot, BSP has index zero */
16 BOOT_DATA VISIBLE
17 volatile word_t smp_aps_index = 1;
18 
19 #ifdef CONFIG_USE_LOGICAL_IDS
update_logical_id_mappings(void)20 BOOT_CODE static void update_logical_id_mappings(void)
21 {
22     cpu_mapping.index_to_logical_id[getCurrentCPUIndex()] = apic_get_logical_id();
23 
24     for (int i = 0; i < smp_aps_index; i++) {
25         if (apic_get_cluster(cpu_mapping.index_to_logical_id[getCurrentCPUIndex()]) ==
26             apic_get_cluster(cpu_mapping.index_to_logical_id[i])) {
27 
28             cpu_mapping.other_indexes_in_cluster[getCurrentCPUIndex()] |= BIT(i);
29             cpu_mapping.other_indexes_in_cluster[i] |= BIT(getCurrentCPUIndex());
30         }
31     }
32 }
33 #endif /* CONFIG_USE_LOGICAL_IDS */
34 
start_cpu(cpu_id_t cpu_id,paddr_t boot_fun_paddr)35 BOOT_CODE static void start_cpu(cpu_id_t cpu_id, paddr_t boot_fun_paddr)
36 {
37     /* memory fence needed before starting the other CPU */
38     x86_mfence();
39 
40     /* starting the other CPU */
41     apic_send_init_ipi(cpu_id);
42     apic_send_startup_ipi(cpu_id, boot_fun_paddr);
43 }
44 
start_boot_aps(void)45 BOOT_CODE void start_boot_aps(void)
46 {
47     /* update cpu mapping for BSP, cpus[0] is always assumed to be BSP */
48     cpu_mapping.index_to_cpu_id[getCurrentCPUIndex()] = boot_state.cpus[0];
49 #ifdef CONFIG_USE_LOGICAL_IDS
50     cpu_mapping.index_to_logical_id[getCurrentCPUIndex()] = apic_get_logical_id();
51 #endif /* CONFIG_USE_LOGICAL_IDS */
52 
53     /* startup APs one at a time as we use shared kernel boot stack */
54     while (smp_aps_index < boot_state.num_cpus) {
55         word_t current_ap_index = smp_aps_index;
56 
57         printf("Starting node #%lu with APIC ID %lu \n",
58                current_ap_index, boot_state.cpus[current_ap_index]);
59 
60         /* update cpu mapping for APs, store APIC ID of the next booting AP
61          * as APIC ID are not continoius e.g. 0,2,1,3 for 4 cores with hyperthreading
62          * we need to store a mapping to translate the index to real APIC ID */
63         cpu_mapping.index_to_cpu_id[current_ap_index] = boot_state.cpus[current_ap_index];
64         start_cpu(boot_state.cpus[current_ap_index], BOOT_NODE_PADDR);
65 
66         /* wait for current AP to boot up */
67         while (smp_aps_index == current_ap_index);
68     }
69 }
70 
copy_boot_code_aps(uint32_t mem_lower)71 BOOT_CODE bool_t copy_boot_code_aps(uint32_t mem_lower)
72 {
73     assert(boot_cpu_end - boot_cpu_start < 0x400);
74 
75     /* Ensure that our boot code fits in the memory hole we want to use, and check this region
76      * is free according to multiboot. As boot_cpu_end and boot_cpu_start are link time
77      * symbols (and not compile time) this cannot be a compile time check */
78     word_t boot_size = (word_t)(boot_cpu_end - boot_cpu_start);
79     word_t boot_node_top = BOOT_NODE_PADDR + boot_size;
80     word_t mem_lower_bytes = mem_lower << 10;
81     if (boot_node_top > BOOT_NODE_MAX_PADDR) {
82         printf("AP boot code does not fit in chosen memory hole. Can be at most %lu, is %lu\n",
83                (word_t)(BOOT_NODE_MAX_PADDR - BOOT_NODE_PADDR), boot_size);
84         return false;
85     }
86     if (mem_lower_bytes < boot_node_top) {
87         printf("Need lower physical memory up to %lu to be free. Multiboot reports only up to %lu\n",
88                boot_node_top, mem_lower_bytes);
89         return false;
90     }
91 
92     /* copy CPU bootup code to lower memory */
93     memcpy((void *)BOOT_NODE_PADDR, boot_cpu_start, boot_size);
94     return true;
95 }
96 
try_boot_node(void)97 static BOOT_CODE bool_t try_boot_node(void)
98 {
99     setCurrentVSpaceRoot(kpptr_to_paddr(X86_KERNEL_VSPACE_ROOT), 0);
100     /* Sync up the compilers view of the world here to force the PD to actually
101      * be set *right now* instead of delayed */
102     asm volatile("" ::: "memory");
103 
104     /* initialise the CPU, make sure legacy interrupts are disabled */
105     if (!init_cpu(1)) {
106         return false;
107     }
108 
109 #ifdef CONFIG_USE_LOGICAL_IDS
110     update_logical_id_mappings();
111 #endif /* CONFIG_USE_LOGICAL_IDS */
112     return true;
113 }
114 
115 /* This is the entry function for APs. However, it is not a BOOT_CODE as
116  * there is a race between exiting this function and root task running on
117  * node #0 to possibly reallocate this memory */
boot_node(void)118 VISIBLE void boot_node(void)
119 {
120     bool_t result;
121 
122     mode_init_tls(smp_aps_index);
123     result = try_boot_node();
124 
125     if (!result) {
126         fail("boot_node failed for some reason :(\n");
127     }
128 
129     smp_aps_index++;
130 
131     /* grab BKL before leaving the kernel */
132     NODE_LOCK_SYS;
133 
134     init_core_state(SchedulerAction_ChooseNewThread);
135     ARCH_NODE_STATE(x86KScurInterrupt) = int_invalid;
136     ARCH_NODE_STATE(x86KSPendingInterrupt) = int_invalid;
137 
138     schedule();
139     activateThread();
140 }
141 
142 #endif /* ENABLE_SMP_SUPPORT */
143