1 /*
2  * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <arch/kernel/traps.h>
9 #include <arch/object/vcpu.h>
10 #include <arch/machine/registerset.h>
11 #include <api/syscall.h>
12 #include <machine/fpu.h>
13 
14 #include <sel4/benchmark_track_types.h>
15 #include <benchmark/benchmark_track.h>
16 #include <benchmark/benchmark_utilisation.h>
17 #include <arch/machine.h>
18 
c_handle_undefined_instruction(void)19 void VISIBLE NORETURN c_handle_undefined_instruction(void)
20 {
21     NODE_LOCK_SYS;
22     c_entry_hook();
23 
24 #ifdef TRACK_KERNEL_ENTRIES
25     ksKernelEntry.path = Entry_UserLevelFault;
26     ksKernelEntry.word = getRegister(NODE_STATE(ksCurThread), NextIP);
27 #endif
28 
29 #if defined(CONFIG_HAVE_FPU) && defined(CONFIG_ARCH_AARCH32)
30     /* We assume the first fault is a FP exception and enable FPU, if not already enabled */
31     if (!isFpuEnable()) {
32         handleFPUFault();
33 
34         /* Restart the FP instruction that cause the fault */
35         setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
36     } else {
37         handleUserLevelFault(0, 0);
38     }
39 
40     restore_user_context();
41     UNREACHABLE();
42 #endif
43 
44     /* There's only one user-level fault on ARM, and the code is (0,0) */
45 #ifdef CONFIG_ARCH_AARCH32
46     handleUserLevelFault(0, 0);
47 #else
48     handleUserLevelFault(getESR(), 0);
49 #endif
50     restore_user_context();
51     UNREACHABLE();
52 }
53 
54 #if defined(CONFIG_HAVE_FPU) && defined(CONFIG_ARCH_AARCH64)
c_handle_enfp(void)55 void VISIBLE NORETURN c_handle_enfp(void)
56 {
57     c_entry_hook();
58 
59     handleFPUFault();
60     restore_user_context();
61     UNREACHABLE();
62 }
63 #endif /* CONFIG_HAVE_FPU */
64 
c_handle_vm_fault(vm_fault_type_t type)65 static inline void NORETURN c_handle_vm_fault(vm_fault_type_t type)
66 {
67     NODE_LOCK_SYS;
68     c_entry_hook();
69 
70 #ifdef TRACK_KERNEL_ENTRIES
71     ksKernelEntry.path = Entry_VMFault;
72     ksKernelEntry.word = getRegister(NODE_STATE(ksCurThread), NextIP);
73 #endif
74 
75     handleVMFaultEvent(type);
76     restore_user_context();
77     UNREACHABLE();
78 }
79 
c_handle_data_fault(void)80 void VISIBLE NORETURN c_handle_data_fault(void)
81 {
82     c_handle_vm_fault(seL4_DataFault);
83 }
84 
c_handle_instruction_fault(void)85 void VISIBLE NORETURN c_handle_instruction_fault(void)
86 {
87     c_handle_vm_fault(seL4_InstructionFault);
88 }
89 
c_handle_interrupt(void)90 void VISIBLE NORETURN c_handle_interrupt(void)
91 {
92     NODE_LOCK_IRQ_IF(IRQT_TO_IRQ(getActiveIRQ()) != irq_remote_call_ipi);
93     c_entry_hook();
94 
95 #ifdef TRACK_KERNEL_ENTRIES
96     ksKernelEntry.path = Entry_Interrupt;
97     ksKernelEntry.word = IRQT_TO_IRQ(getActiveIRQ());
98     ksKernelEntry.core = CURRENT_CPU_INDEX();
99 #endif
100 
101     handleInterruptEntry();
102     restore_user_context();
103 }
104 
slowpath(syscall_t syscall)105 void NORETURN slowpath(syscall_t syscall)
106 {
107     if (unlikely(syscall < SYSCALL_MIN || syscall > SYSCALL_MAX)) {
108 #ifdef TRACK_KERNEL_ENTRIES
109         ksKernelEntry.path = Entry_UnknownSyscall;
110         /* ksKernelEntry.word word is already set to syscall */
111 #endif /* TRACK_KERNEL_ENTRIES */
112         /* Contrary to the name, this handles all non-standard syscalls used in
113          * debug builds also.
114          */
115         handleUnknownSyscall(syscall);
116     } else {
117 #ifdef TRACK_KERNEL_ENTRIES
118         ksKernelEntry.is_fastpath = 0;
119 #endif /* TRACK KERNEL ENTRIES */
120         handleSyscall(syscall);
121     }
122 
123     restore_user_context();
124     UNREACHABLE();
125 }
126 
c_handle_syscall(word_t cptr,word_t msgInfo,syscall_t syscall)127 void VISIBLE c_handle_syscall(word_t cptr, word_t msgInfo, syscall_t syscall)
128 {
129     NODE_LOCK_SYS;
130 
131     c_entry_hook();
132 #ifdef TRACK_KERNEL_ENTRIES
133     benchmark_debug_syscall_start(cptr, msgInfo, syscall);
134     ksKernelEntry.is_fastpath = 0;
135 #endif /* DEBUG */
136 
137     slowpath(syscall);
138     UNREACHABLE();
139 }
140 
141 #ifdef CONFIG_FASTPATH
ALIGN(L1_CACHE_LINE_SIZE)142 ALIGN(L1_CACHE_LINE_SIZE)
143 void VISIBLE c_handle_fastpath_call(word_t cptr, word_t msgInfo)
144 {
145     NODE_LOCK_SYS;
146 
147     c_entry_hook();
148 #ifdef TRACK_KERNEL_ENTRIES
149     benchmark_debug_syscall_start(cptr, msgInfo, SysCall);
150     ksKernelEntry.is_fastpath = 1;
151 #endif /* DEBUG */
152 
153     fastpath_call(cptr, msgInfo);
154     UNREACHABLE();
155 }
156 
ALIGN(L1_CACHE_LINE_SIZE)157 ALIGN(L1_CACHE_LINE_SIZE)
158 #ifdef CONFIG_KERNEL_MCS
159 void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply)
160 #else
161 void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo)
162 #endif
163 {
164     NODE_LOCK_SYS;
165 
166     c_entry_hook();
167 #ifdef TRACK_KERNEL_ENTRIES
168     benchmark_debug_syscall_start(cptr, msgInfo, SysReplyRecv);
169     ksKernelEntry.is_fastpath = 1;
170 #endif /* DEBUG */
171 
172 #ifdef CONFIG_KERNEL_MCS
173     fastpath_reply_recv(cptr, msgInfo, reply);
174 #else
175     fastpath_reply_recv(cptr, msgInfo);
176 #endif
177     UNREACHABLE();
178 }
179 
180 #endif
181 
182 #ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
c_handle_vcpu_fault(word_t hsr)183 VISIBLE NORETURN void c_handle_vcpu_fault(word_t hsr)
184 {
185     NODE_LOCK_SYS;
186 
187     c_entry_hook();
188 
189 #ifdef TRACK_KERNEL_ENTRIES
190     ksKernelEntry.path = Entry_VCPUFault;
191     ksKernelEntry.word = hsr;
192 #endif
193     handleVCPUFault(hsr);
194     restore_user_context();
195     UNREACHABLE();
196 }
197 #endif /* CONFIG_ARM_HYPERVISOR_SUPPORT */
198