1 /*
2  * Copyright (c) 2015 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <assert.h>
9 #include <lk/trace.h>
10 #include <lk/debug.h>
11 #include <stdint.h>
12 #include <stdlib.h>
13 #include <arch/riscv.h>
14 #include <arch/ops.h>
15 #include <arch/mp.h>
16 #include <lk/init.h>
17 #include <lk/main.h>
18 #include <platform.h>
19 #include <arch.h>
20 
21 #include "arch/riscv/feature.h"
22 #include "riscv_priv.h"
23 
24 #define LOCAL_TRACE 0
25 
26 // per cpu structure, pointed to by xscratch
27 struct riscv_percpu percpu[SMP_MAX_CPUS];
28 
29 // called extremely early from start.S prior to getting into any other C code on
30 // both the boot cpu and the secondaries
31 void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num);
riscv_configure_percpu_early(uint hart_id,uint __unused,uint cpu_num)32 void riscv_configure_percpu_early(uint hart_id, uint __unused, uint cpu_num) {
33     // point tp reg at the current cpu structure
34     riscv_set_percpu(&percpu[cpu_num]);
35 
36     // set up the cpu number and hart id for the per cpu structure
37     percpu[cpu_num].cpu_num = cpu_num;
38     percpu[cpu_num].hart_id = hart_id;
39     wmb();
40 
41 #if WITH_SMP
42     // do any MP percpu config
43     riscv_configure_percpu_mp_early(hart_id, cpu_num);
44 #endif
45 }
46 
47 // first C level code to initialize each cpu
riscv_early_init_percpu(void)48 void riscv_early_init_percpu(void) {
49     // clear the scratch register in case we take an exception early
50     riscv_csr_write(RISCV_CSR_XSCRATCH, 0);
51 
52     // set the top level exception handler
53     riscv_csr_write(RISCV_CSR_XTVEC, (uintptr_t)&riscv_exception_entry);
54 
55     // mask all exceptions, just in case
56     riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_IE);
57     riscv_csr_clear(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE | RISCV_CSR_XIE_TIE | RISCV_CSR_XIE_EIE);
58 
59 #if RISCV_FPU
60     // enable the fpu and zero it out
61     riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_MASK);
62     riscv_csr_set(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_INITIAL);
63 
64     riscv_fpu_zero();
65 #endif
66 
67     // enable cycle counter (disabled for now, unimplemented on sifive-e)
68     //riscv_csr_set(mcounteren, 1);
69 }
70 
71 // called very early just after entering C code on boot processor
arch_early_init(void)72 void arch_early_init(void) {
73     riscv_early_init_percpu();
74 
75     riscv_feature_early_init();
76 
77 #if RISCV_S_MODE
78     sbi_early_init();
79 #endif
80 #if RISCV_MMU
81     riscv_early_mmu_init();
82 #endif
83 }
84 
85 // later init per cpu
riscv_init_percpu(void)86 void riscv_init_percpu(void) {
87     dprintf(INFO, "RISCV: percpu cpu num %#x hart id %#x\n", arch_curr_cpu_num(), riscv_current_hart());
88 #if WITH_SMP
89     // enable software interrupts, used for inter-processor-interrupts
90     riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_SIE);
91 #endif
92 
93     // enable external interrupts
94     riscv_csr_set(RISCV_CSR_XIE, RISCV_CSR_XIE_EIE);
95 }
96 
97 // called later once the kernel is running before platform and target init
arch_init(void)98 void arch_init(void) {
99     riscv_init_percpu();
100 
101     // print some arch info
102     const char *mode_string;
103 #if RISCV_M_MODE
104     mode_string = "Machine";
105 #elif RISCV_S_MODE
106     mode_string = "Supervisor";
107 #else
108 #error need to define M or S mode
109 #endif
110 
111     dprintf(INFO, "RISCV: %s mode\n", mode_string);
112     dprintf(INFO, "RISCV: mvendorid %#lx marchid %#lx mimpid %#lx mhartid %#x\n",
113             riscv_get_mvendorid(), riscv_get_marchid(),
114             riscv_get_mimpid(), riscv_current_hart());
115 
116     riscv_feature_init();
117 
118 #if RISCV_M_MODE
119     dprintf(INFO, "RISCV: misa %#lx\n", riscv_csr_read(RISCV_CSR_MISA));
120 #elif RISCV_S_MODE
121     sbi_init();
122 #if RISCV_MMU
123     dprintf(INFO, "RISCV: MMU enabled sv%u\n", RISCV_MMU);
124     riscv_mmu_init();
125 #endif
126 #endif
127 
128 #if WITH_SMP
129     riscv_boot_secondaries();
130 #endif
131 }
132 
arch_idle(void)133 void arch_idle(void) {
134     // let the platform/target disable wfi
135 #if !RISCV_DISABLE_WFI
136     __asm__ volatile("wfi");
137 #endif
138 }
139 
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)140 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
141     PANIC_UNIMPLEMENTED;
142 }
143 
144 #if RISCV_S_MODE
145 /* switch to user mode, set the user stack pointer to user_stack_top, get into user space */
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)146 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
147     DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 8));
148 
149     thread_t *ct = get_current_thread();
150 
151     vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
152     kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
153 
154     printf("kernel sstatus %#lx\n", riscv_csr_read(sstatus));
155 
156     // build a user status register
157     ulong status;
158     status = RISCV_CSR_XSTATUS_PIE |
159              RISCV_CSR_XSTATUS_SUM;
160 
161     printf("user sstatus %#lx\n", status);
162 
163     arch_disable_ints();
164 
165     riscv_csr_write(sstatus, status);
166     riscv_csr_write(sepc, entry_point);
167     riscv_csr_write(sscratch, kernel_stack_top);
168 
169 #if RISCV_FPU
170     status |= RISCV_CSR_XSTATUS_FS_INITIAL; // mark fpu state 'initial'
171     riscv_fpu_zero();
172 #endif
173 
174     // put the current tp (percpu pointer) just below the top of the stack
175     // the exception code will recover it when coming from user space
176     ((uintptr_t *)kernel_stack_top)[-1] = (uintptr_t)riscv_get_percpu();
177     asm volatile(
178         // set the user stack pointer
179         "mv  sp, %0\n"
180         // zero out the rest of the integer state
181         "li  a0, 0\n"
182         "li  a1, 0\n"
183         "li  a2, 0\n"
184         "li  a3, 0\n"
185         "li  a4, 0\n"
186         "li  a5, 0\n"
187         "li  a6, 0\n"
188         "li  a7, 0\n"
189         "li  t0, 0\n"
190         "li  t1, 0\n"
191         "li  t2, 0\n"
192         "li  t3, 0\n"
193         "li  t4, 0\n"
194         "li  t5, 0\n"
195         "li  t6, 0\n"
196         "li  s0, 0\n"
197         "li  s1, 0\n"
198         "li  s2, 0\n"
199         "li  s3, 0\n"
200         "li  s4, 0\n"
201         "li  s5, 0\n"
202         "li  s6, 0\n"
203         "li  s7, 0\n"
204         "li  s8, 0\n"
205         "li  s9, 0\n"
206         "li  s10, 0\n"
207         "li  s11, 0\n"
208         "li  ra, 0\n"
209         "li  gp, 0\n"
210         "li  tp, 0\n"
211         "sret"
212         :: "r" (user_stack_top)
213     );
214 
215     __UNREACHABLE;
216 }
217 #endif
218 
219 /* unimplemented cache operations */
220 #if RISCV_NO_CACHE_OPS
arch_disable_cache(uint flags)221 void arch_disable_cache(uint flags) { }
arch_enable_cache(uint flags)222 void arch_enable_cache(uint flags) { }
223 
arch_clean_cache_range(addr_t start,size_t len)224 void arch_clean_cache_range(addr_t start, size_t len) { }
arch_clean_invalidate_cache_range(addr_t start,size_t len)225 void arch_clean_invalidate_cache_range(addr_t start, size_t len) { }
arch_invalidate_cache_range(addr_t start,size_t len)226 void arch_invalidate_cache_range(addr_t start, size_t len) { }
arch_sync_cache_range(addr_t start,size_t len)227 void arch_sync_cache_range(addr_t start, size_t len) { }
228 #else
arch_disable_cache(uint flags)229 void arch_disable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
arch_enable_cache(uint flags)230 void arch_enable_cache(uint flags) { PANIC_UNIMPLEMENTED; }
231 
arch_clean_cache_range(addr_t start,size_t len)232 void arch_clean_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_clean_invalidate_cache_range(addr_t start,size_t len)233 void arch_clean_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_invalidate_cache_range(addr_t start,size_t len)234 void arch_invalidate_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
arch_sync_cache_range(addr_t start,size_t len)235 void arch_sync_cache_range(addr_t start, size_t len) { PANIC_UNIMPLEMENTED; }
236 #endif
237