1 /*
2 * Copyright (c) 2019 Elliot Berman
3 * Copyright (c) 2020 Travis Geiselbrecht
4 *
5 * Use of this source code is governed by a MIT-style
6 * license that can be found in the LICENSE file or at
7 * https://opensource.org/licenses/MIT
8 */
9
10 #include <lk/reg.h>
11 #include <lk/compiler.h>
12 #include <lk/debug.h>
13 #include <lk/trace.h>
14 #include <lk/err.h>
15 #include <lk/init.h>
16 #include <lk/main.h>
17
18 #include <arch/atomic.h>
19 #include <arch/ops.h>
20 #include <arch/mp.h>
21 #include <arch/riscv/clint.h>
22
23 #include "riscv_priv.h"
24
25 #if WITH_SMP
26
27 #define LOCAL_TRACE 0
28
29 // mapping of cpu -> hart
30 static int cpu_to_hart_map[SMP_MAX_CPUS];
31
32 // list of IPIs queued per cpu
33 static volatile int ipi_data[SMP_MAX_CPUS];
34
35 static spin_lock_t boot_cpu_lock = 1;
36 volatile int secondaries_to_init = SMP_MAX_CPUS - 1;
37
38 // modified in start.S to save the physical address of _start as the first cpu boots
39 uintptr_t _start_physical;
40
arch_mp_send_ipi(mp_cpu_mask_t target,mp_ipi_t ipi)41 status_t arch_mp_send_ipi(mp_cpu_mask_t target, mp_ipi_t ipi) {
42 LTRACEF("target 0x%x, ipi %u\n", target, ipi);
43
44 mp_cpu_mask_t m = target;
45 ulong hart_mask = 0;
46 for (uint c = 0; c < SMP_MAX_CPUS && m; c++, m >>= 1) {
47 if (m & 1) {
48 int h = cpu_to_hart_map[c];
49 LTRACEF("c %u h %d m %#x\n", c, h, m);
50
51 // record a pending hart to notify
52 hart_mask |= (1ul << h);
53
54 // set the ipi_data based on the incoming ipi
55 atomic_or(&ipi_data[h], (1u << ipi));
56 }
57 }
58
59 mb();
60 #if RISCV_M_MODE
61 clint_send_ipis(&hart_mask);
62 #else
63 sbi_send_ipis(&hart_mask);
64 #endif
65
66 return NO_ERROR;
67 }
68
69 // software triggered exceptions, used for cross-cpu calls
riscv_software_exception(void)70 enum handler_return riscv_software_exception(void) {
71 uint curr_cpu = arch_curr_cpu_num();
72
73 #if RISCV_M_MODE
74 uint ch = riscv_current_hart();
75 clint_ipi_clear(ch);
76 #else
77 sbi_clear_ipi();
78 #endif
79
80 rmb();
81 int reason = atomic_swap(&ipi_data[curr_cpu], 0);
82 LTRACEF("cpu %u reason %#x\n", curr_cpu, reason);
83
84 enum handler_return ret = INT_NO_RESCHEDULE;
85 if (reason & (1u << MP_IPI_RESCHEDULE)) {
86 ret = mp_mbx_reschedule_irq();
87 reason &= ~(1u << MP_IPI_RESCHEDULE);
88 }
89 if (reason & (1u << MP_IPI_GENERIC)) {
90 panic("unimplemented MP_IPI_GENERIC\n");
91 reason &= ~(1u << MP_IPI_GENERIC);
92 }
93
94 if (unlikely(reason)) {
95 TRACEF("unhandled ipi cause %#x, cpu %u\n", reason, curr_cpu);
96 panic("stopping");
97 }
98
99 return ret;
100 }
101
102 // called in very early percpu bringup
riscv_configure_percpu_mp_early(uint hart_id,uint cpu_num)103 void riscv_configure_percpu_mp_early(uint hart_id, uint cpu_num) {
104 cpu_to_hart_map[cpu_num] = hart_id;
105 wmb();
106 }
107
108 // called from assembly
109 void riscv_secondary_entry(uint hart_id, uint __unused, uint cpu_id);
riscv_secondary_entry(uint hart_id,uint __unused,uint cpu_id)110 void riscv_secondary_entry(uint hart_id, uint __unused, uint cpu_id) {
111 // basic bootstrapping of this cpu
112 riscv_early_init_percpu();
113
114 if (unlikely(arch_curr_cpu_num() >= SMP_MAX_CPUS)) {
115 while (1) {
116 arch_idle();
117 }
118 }
119
120 // spin here waiting for the main cpu to release us
121 spin_lock(&boot_cpu_lock);
122 spin_unlock(&boot_cpu_lock);
123
124 #if RISCV_MMU
125 // let the mmu code configure per cpu bits
126 riscv_mmu_init_secondaries();
127 #endif
128
129 // run early secondary cpu init routines up to the threading level
130 lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
131
132 // run threading level initialization on this cpu
133 riscv_init_percpu();
134
135 dprintf(INFO, "RISCV: secondary hart coming up: mvendorid %#lx marchid %#lx mimpid %#lx mhartid %#x\n",
136 riscv_get_mvendorid(), riscv_get_marchid(),
137 riscv_get_mimpid(), riscv_current_hart());
138
139 // atomic_add(&secondaries_to_init, -1);
140 // arch_mp_send_ipi(1 << 0, MP_IPI_GENERIC); // wake up hart0 to let it know this CPU has come up
141
142 lk_secondary_cpu_entry();
143 }
144
145 // platform can detect and set the number of cores to boot (optional)
riscv_set_secondary_count(int count)146 void riscv_set_secondary_count(int count) {
147 if (count > SMP_MAX_CPUS - 1) {
148 count = SMP_MAX_CPUS - 1;
149 }
150 secondaries_to_init = count;
151 }
152
153 // start any secondary cpus we are set to start. called on the boot processor
riscv_boot_secondaries(void)154 void riscv_boot_secondaries(void) {
155 lk_init_secondary_cpus(secondaries_to_init);
156
157 #if RISCV_M_MODE
158 dprintf(INFO, "RISCV: Releasing %d secondary harts from purgatory\n", secondaries_to_init);
159 #else
160 uint boot_hart = riscv_current_hart();
161
162 // use SBI HSM to boot the secondaries
163 // TODO: handle the range of harts we should consider, since they
164 // may not be zero based
165 for (uint i = 0; i <= (uint)secondaries_to_init; i++) {
166 // skip the boot hart
167 if (i != boot_hart) {
168 dprintf(INFO, "RISCV: using SBI to start hart %u at %#lx\n", i, _start_physical);
169 sbi_boot_hart(i, _start_physical, 0);
170 }
171 }
172 #endif
173 /* release the secondary cpus */
174 spin_unlock(&boot_cpu_lock);
175 }
176
177
178 #endif
179