1 /*
2 * Copyright (c) 2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <assert.h>
9 #include <lk/trace.h>
10 #include <lk/debug.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <arch/riscv.h>
14 #include <arch/ops.h>
15 #include <arch/mp.h>
16 #include <lk/init.h>
17 #include <lk/main.h>
18 #include <platform.h>
19 #include <arch.h>
20
21 #include "riscv_priv.h"
22
23 #define LOCAL_TRACE 0
24
25 // per cpu structure, pointed to by xscratch
26 struct riscv_percpu percpu[SMP_MAX_CPUS];
27
28 // called extremely early from start.S prior to getting into any other C code on
29 // both the boot cpu and the secondaries
30 void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num);
riscv_configure_percpu_early(uint hart_id,uint __unused,uint cpu_num)31 void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num) {
32 // point tp reg at the current cpu structure
33 riscv_set_percpu(&percpu[cpu_num]);
34
35 // set up the cpu number and hart id for the per cpu structure
36 percpu[cpu_num].cpu_num = cpu_num;
37 percpu[cpu_num].hart_id = hart_id;
38 wmb();
39
40 #if WITH_SMP
41 // do any MP percpu config
42 riscv_configure_percpu_mp_early(hart_id, cpu_num);
43 #endif
44 }
45
46 // first C level code to initialize each cpu
riscv_early_init_percpu(void)47 void riscv_early_init_percpu(void) {
48 // clear the scratch register in case we take an exception early
49 riscv_csr_write(RISCV_CSR_XSCRATCH, 0);
50
51 // set the top level exception handler
52 riscv_csr_write(RISCV_CSR_XTVEC, (uintptr_t)&riscv_exception_entry);
53
54 // mask all exceptions, just in case
55 riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_IE);
56 riscv_csr_clear(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE | RISCV_CSR_XIE_TIE | RISCV_CSR_XIE_EIE);
57
58 // enable cycle counter (disabled for now, unimplemented on sifive-e)
59 //riscv_csr_set(mcounteren, 1);
60 }
61
62 // called very early just after entering C code on boot processor
arch_early_init(void)63 void arch_early_init(void) {
64 riscv_early_init_percpu();
65
66 #if RISCV_S_MODE
67 sbi_early_init();
68 #endif
69 #if RISCV_MMU
70 riscv_early_mmu_init();
71 #endif
72 }
73
74 // later init per cpu
riscv_init_percpu(void)75 void riscv_init_percpu(void) {
76 dprintf(INFO, "RISCV: percpu cpu num %#x hart id %#x\n", arch_curr_cpu_num(), riscv_current_hart());
77 #if WITH_SMP
78 // enable software interrupts, used for inter-processor-interrupts
79 riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE);
80 #endif
81
82 // enable external interrupts
83 riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_EIE);
84 }
85
86 // called later once the kernel is running before platform and target init
arch_init(void)87 void arch_init(void) {
88 riscv_init_percpu();
89
90 // print some arch info
91 #if RISCV_M_MODE
92 dprintf(INFO, "RISCV: Machine mode\n");
93 dprintf(INFO, "RISCV: mvendorid %#lx marchid %#lx mimpid %#lx mhartid %#x\n",
94 riscv_get_mvendorid(), riscv_get_marchid(),
95 riscv_get_mimpid(), riscv_current_hart());
96 dprintf(INFO, "RISCV: misa %#lx\n", riscv_csr_read(RISCV_CSR_MISA));
97 #else
98 dprintf(INFO, "RISCV: Supervisor mode\n");
99 #if RISCV_MMU
100 dprintf(INFO, "RISCV: MMU enabled sv%u\n", RISCV_MMU);
101 riscv_mmu_init();
102 #endif
103 sbi_init();
104 #endif
105
106 #if WITH_SMP
107 riscv_boot_secondaries();
108 #endif
109 }
110
arch_idle(void)111 void arch_idle(void) {
112 // let the platform/target disable wfi
113 #if !RISCV_DISABLE_WFI
114 __asm__ volatile("wfi");
115 #endif
116 }
117
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)118 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
119 PANIC_UNIMPLEMENTED;
120 }
121
122 #if RISCV_S_MODE
123 /* switch to user mode, set the user stack pointer to user_stack_top, get into user space */
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)124 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
125 DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 8));
126
127 thread_t *ct = get_current_thread();
128
129 vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
130 kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
131
132 printf("kernel sstatus %#lx\n", riscv_csr_read(sstatus));
133
134 // build a user status register
135 ulong status;
136 status = RISCV_CSR_XSTATUS_PIE |
137 RISCV_CSR_XSTATUS_SUM;
138
139 #if RISCV_FPU
140 status |= (1ul << RISCV_CSR_XSTATUS_FS_SHIFT); // mark fpu state 'initial'
141 #endif
142
143 printf("user sstatus %#lx\n", status);
144
145 arch_disable_ints();
146
147 riscv_csr_write(sstatus, status);
148 riscv_csr_write(sepc, entry_point);
149 riscv_csr_write(sscratch, kernel_stack_top);
150 // put the current tp (percpu pointer) just below the top of the stack
151 // the exception code will recover it when coming from user space
152 ((uintptr_t *)kernel_stack_top)[-1] = (uintptr_t)riscv_get_percpu();
153 asm volatile(
154 #if RISCV_FPU
155 // zero out the fpu state
156 "csrw fcsr, 0\n"
157 // TODO: figure out how to do this more cleanly
158 // without 'fd' in the march line the assembler wont let us emit a direct
159 // fpu opcode. Tried unsuccessfully to use the .insn operand. below is a
160 // series of fmv.d.x fN, zero instructions to wipe out the complete state.
161 ".word 0xf2000053\n" // fmv.d.x f0, zero
162 ".word 0xf20000d3\n" // fmv.d.x f1, zero
163 ".word 0xf2000153\n" // ...
164 ".word 0xf20001d3\n"
165 ".word 0xf2000253\n"
166 ".word 0xf20002d3\n"
167 ".word 0xf2000353\n"
168 ".word 0xf20003d3\n"
169 ".word 0xf2000453\n"
170 ".word 0xf20004d3\n"
171 ".word 0xf2000553\n"
172 ".word 0xf20005d3\n"
173 ".word 0xf2000653\n"
174 ".word 0xf20006d3\n"
175 ".word 0xf2000753\n"
176 ".word 0xf20007d3\n"
177 ".word 0xf2000853\n"
178 ".word 0xf20008d3\n"
179 ".word 0xf2000953\n"
180 ".word 0xf20009d3\n"
181 ".word 0xf2000a53\n"
182 ".word 0xf2000ad3\n"
183 ".word 0xf2000b53\n"
184 ".word 0xf2000bd3\n"
185 ".word 0xf2000c53\n"
186 ".word 0xf2000cd3\n"
187 ".word 0xf2000d53\n"
188 ".word 0xf2000dd3\n"
189 ".word 0xf2000e53\n"
190 ".word 0xf2000ed3\n"
191 ".word 0xf2000f53\n"
192 ".word 0xf2000fd3\n" // fmv.d.x f31, zero
193 #endif
194 // set the user stack pointer
195 "mv sp, %0\n"
196 // zero out the rest of the integer state
197 "li a0, 0\n"
198 "li a1, 0\n"
199 "li a2, 0\n"
200 "li a3, 0\n"
201 "li a4, 0\n"
202 "li a5, 0\n"
203 "li a6, 0\n"
204 "li a7, 0\n"
205 "li t0, 0\n"
206 "li t1, 0\n"
207 "li t2, 0\n"
208 "li t3, 0\n"
209 "li t4, 0\n"
210 "li t5, 0\n"
211 "li t6, 0\n"
212 "li s0, 0\n"
213 "li s1, 0\n"
214 "li s2, 0\n"
215 "li s3, 0\n"
216 "li s4, 0\n"
217 "li s5, 0\n"
218 "li s6, 0\n"
219 "li s7, 0\n"
220 "li s8, 0\n"
221 "li s9, 0\n"
222 "li s10, 0\n"
223 "li s11, 0\n"
224 "li ra, 0\n"
225 "li gp, 0\n"
226 "li tp, 0\n"
227 "sret"
228 :: "r" (user_stack_top)
229 );
230
231 __UNREACHABLE;
232 }
233 #endif
234
235 /* unimplemented cache operations */
236 #if RISCV_NO_CACHE_OPS
arch_disable_cache(uint flags)237 void arch_disable_cache(uint flags) { }
arch_enable_cache(uint flags)238 void arch_enable_cache(uint flags) { }
239
arch_clean_cache_range(addr_t start,size_t len)240 void arch_clean_cache_range(addr_t start, size_t len) { }
arch_clean_invalidate_cache_range(addr_t start,size_t len)241 void arch_clean_invalidate_cache_range(addr_t start, size_t len) { }
arch_invalidate_cache_range(addr_t start,size_t len)242 void arch_invalidate_cache_range(addr_t start, size_t len) { }
arch_sync_cache_range(addr_t start,size_t len)243 void arch_sync_cache_range(addr_t start, size_t len) { }
244 #else
arch_disable_cache(uint flags)245 void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
arch_enable_cache(uint flags)246 void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
247
arch_clean_cache_range(addr_t start,size_t len)248 void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_clean_invalidate_cache_range(addr_t start,size_t len)249 void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_invalidate_cache_range(addr_t start,size_t len)250 void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_sync_cache_range(addr_t start,size_t len)251 void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
252 #endif
253