1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  * Copyright 2015, 2016 Hesham Almatary <heshamelmatary@gmail.com>
4  *
5  * SPDX-License-Identifier: GPL-2.0-only
6  */
7 
8 #include <config.h>
9 #include <model/statedata.h>
10 #include <arch/fastpath/fastpath.h>
11 #include <arch/kernel/traps.h>
12 #include <machine/debug.h>
13 #include <api/syscall.h>
14 #include <util.h>
15 #include <arch/machine/hardware.h>
16 #include <machine/fpu.h>
17 
18 #include <benchmark/benchmark_track.h>
19 #include <benchmark/benchmark_utilisation.h>
20 
21 /** DONT_TRANSLATE */
restore_user_context(void)22 void VISIBLE NORETURN restore_user_context(void)
23 {
24     word_t cur_thread_reg = (word_t) NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers;
25     c_exit_hook();
26     NODE_UNLOCK_IF_HELD;
27 
28 #ifdef ENABLE_SMP_SUPPORT
29     word_t sp;
30     asm volatile("csrr %0, sscratch" : "=r"(sp));
31     sp -= sizeof(word_t);
32     *((word_t *)sp) = cur_thread_reg;
33 #endif
34 
35 
36 #ifdef CONFIG_HAVE_FPU
37     lazyFPURestore(NODE_STATE(ksCurThread));
38     set_tcb_fs_state(NODE_STATE(ksCurThread), isFpuEnable());
39 #endif
40 
41     asm volatile(
42         "mv t0, %[cur_thread]       \n"
43         LOAD_S " ra, (0*%[REGSIZE])(t0)  \n"
44         LOAD_S "  sp, (1*%[REGSIZE])(t0)  \n"
45         LOAD_S "  gp, (2*%[REGSIZE])(t0)  \n"
46         /* skip tp */
47         /* skip x5/t0 */
48         /* no-op store conditional to clear monitor state */
49         /* this may succeed in implementations with very large reservations, but the saved ra is dead */
50         "sc.w zero, zero, (t0)\n"
51         LOAD_S "  t2, (6*%[REGSIZE])(t0)  \n"
52         LOAD_S "  s0, (7*%[REGSIZE])(t0)  \n"
53         LOAD_S "  s1, (8*%[REGSIZE])(t0)  \n"
54         LOAD_S "  a0, (9*%[REGSIZE])(t0) \n"
55         LOAD_S "  a1, (10*%[REGSIZE])(t0) \n"
56         LOAD_S "  a2, (11*%[REGSIZE])(t0) \n"
57         LOAD_S "  a3, (12*%[REGSIZE])(t0) \n"
58         LOAD_S "  a4, (13*%[REGSIZE])(t0) \n"
59         LOAD_S "  a5, (14*%[REGSIZE])(t0) \n"
60         LOAD_S "  a6, (15*%[REGSIZE])(t0) \n"
61         LOAD_S "  a7, (16*%[REGSIZE])(t0) \n"
62         LOAD_S "  s2, (17*%[REGSIZE])(t0) \n"
63         LOAD_S "  s3, (18*%[REGSIZE])(t0) \n"
64         LOAD_S "  s4, (19*%[REGSIZE])(t0) \n"
65         LOAD_S "  s5, (20*%[REGSIZE])(t0) \n"
66         LOAD_S "  s6, (21*%[REGSIZE])(t0) \n"
67         LOAD_S "  s7, (22*%[REGSIZE])(t0) \n"
68         LOAD_S "  s8, (23*%[REGSIZE])(t0) \n"
69         LOAD_S "  s9, (24*%[REGSIZE])(t0) \n"
70         LOAD_S "  s10, (25*%[REGSIZE])(t0)\n"
71         LOAD_S "  s11, (26*%[REGSIZE])(t0)\n"
72         LOAD_S "  t3, (27*%[REGSIZE])(t0) \n"
73         LOAD_S "  t4, (28*%[REGSIZE])(t0) \n"
74         LOAD_S "  t5, (29*%[REGSIZE])(t0) \n"
75         LOAD_S "  t6, (30*%[REGSIZE])(t0) \n"
76         /* Get next restored tp */
77         LOAD_S "  t1, (3*%[REGSIZE])(t0)  \n"
78         /* get restored tp */
79         "add tp, t1, x0  \n"
80         /* get sepc */
81         LOAD_S "  t1, (34*%[REGSIZE])(t0)\n"
82         "csrw sepc, t1  \n"
83 #ifndef ENABLE_SMP_SUPPORT
84         /* Write back sscratch with cur_thread_reg to get it back on the next trap entry */
85         "csrw sscratch, t0         \n"
86 #endif
87         LOAD_S "  t1, (32*%[REGSIZE])(t0) \n"
88         "csrw sstatus, t1\n"
89 
90         LOAD_S "  t1, (5*%[REGSIZE])(t0) \n"
91         LOAD_S "  t0, (4*%[REGSIZE])(t0) \n"
92         "sret"
93         : /* no output */
94         : [REGSIZE] "i"(sizeof(word_t)),
95         [cur_thread] "r"(cur_thread_reg)
96         : "memory"
97     );
98 
99     UNREACHABLE();
100 }
101 
c_handle_interrupt(void)102 void VISIBLE NORETURN c_handle_interrupt(void)
103 {
104     NODE_LOCK_IRQ_IF(getActiveIRQ() != irq_remote_call_ipi);
105 
106     c_entry_hook();
107 
108     handleInterruptEntry();
109 
110     restore_user_context();
111     UNREACHABLE();
112 }
113 
c_handle_exception(void)114 void VISIBLE NORETURN c_handle_exception(void)
115 {
116     NODE_LOCK_SYS;
117 
118     c_entry_hook();
119 
120     word_t scause = read_scause();
121     switch (scause) {
122     case RISCVInstructionAccessFault:
123     case RISCVLoadAccessFault:
124     case RISCVStoreAccessFault:
125     case RISCVLoadPageFault:
126     case RISCVStorePageFault:
127     case RISCVInstructionPageFault:
128         handleVMFaultEvent(scause);
129         break;
130     default:
131 #ifdef CONFIG_HAVE_FPU
132         if (!isFpuEnable()) {
133             /* we assume the illegal instruction is caused by FPU first */
134             handleFPUFault();
135             setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
136             break;
137         }
138 #endif
139         handleUserLevelFault(scause, 0);
140         break;
141     }
142 
143     restore_user_context();
144     UNREACHABLE();
145 }
146 
slowpath(syscall_t syscall)147 void VISIBLE NORETURN slowpath(syscall_t syscall)
148 {
149     if (unlikely(syscall < SYSCALL_MIN || syscall > SYSCALL_MAX)) {
150 #ifdef TRACK_KERNEL_ENTRIES
151         ksKernelEntry.path = Entry_UnknownSyscall;
152 #endif /* TRACK_KERNEL_ENTRIES */
153         /* Contrary to the name, this handles all non-standard syscalls used in
154          * debug builds also.
155          */
156         handleUnknownSyscall(syscall);
157     } else {
158 #ifdef TRACK_KERNEL_ENTRIES
159         ksKernelEntry.is_fastpath = 0;
160 #endif /* TRACK KERNEL ENTRIES */
161         handleSyscall(syscall);
162     }
163 
164     restore_user_context();
165     UNREACHABLE();
166 }
167 
168 #ifdef CONFIG_FASTPATH
ALIGN(L1_CACHE_LINE_SIZE)169 ALIGN(L1_CACHE_LINE_SIZE)
170 #ifdef CONFIG_KERNEL_MCS
171 void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply)
172 #else
173 void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo)
174 #endif
175 {
176     NODE_LOCK_SYS;
177 
178     c_entry_hook();
179 #ifdef TRACK_KERNEL_ENTRIES
180     benchmark_debug_syscall_start(cptr, msgInfo, SysReplyRecv);
181     ksKernelEntry.is_fastpath = 1;
182 #endif /* DEBUG */
183 #ifdef CONFIG_KERNEL_MCS
184     fastpath_reply_recv(cptr, msgInfo, reply);
185 #else
186     fastpath_reply_recv(cptr, msgInfo);
187 #endif
188     UNREACHABLE();
189 }
190 
ALIGN(L1_CACHE_LINE_SIZE)191 ALIGN(L1_CACHE_LINE_SIZE)
192 void VISIBLE c_handle_fastpath_call(word_t cptr, word_t msgInfo)
193 {
194     NODE_LOCK_SYS;
195 
196     c_entry_hook();
197 #ifdef TRACK_KERNEL_ENTRIES
198     benchmark_debug_syscall_start(cptr, msgInfo, SysCall);
199     ksKernelEntry.is_fastpath = 1;
200 #endif /* DEBUG */
201 
202     fastpath_call(cptr, msgInfo);
203 
204     UNREACHABLE();
205 }
206 #endif
207 
c_handle_syscall(word_t cptr,word_t msgInfo,syscall_t syscall)208 void VISIBLE NORETURN c_handle_syscall(word_t cptr, word_t msgInfo, syscall_t syscall)
209 {
210     NODE_LOCK_SYS;
211 
212     c_entry_hook();
213 #ifdef TRACK_KERNEL_ENTRIES
214     benchmark_debug_syscall_start(cptr, msgInfo, syscall);
215     ksKernelEntry.is_fastpath = 0;
216 #endif /* DEBUG */
217     slowpath(syscall);
218 
219     UNREACHABLE();
220 }
221