1 /*
2  * Copyright 2014, General Dynamics C4 Systems
3  *
4  * SPDX-License-Identifier: GPL-2.0-only
5  */
6 
7 #include <config.h>
8 #include <model/statedata.h>
9 #include <kernel/stack.h>
10 #include <machine/fpu.h>
11 #include <arch/fastpath/fastpath.h>
12 #include <arch/machine/debug.h>
13 #include <benchmark/benchmark_track.h>
14 #include <mode/stack.h>
15 #include <arch/object/vcpu.h>
16 #include <arch/kernel/traps.h>
17 
18 #include <api/syscall.h>
19 #include <util.h>
20 
21 #ifdef CONFIG_VTX
vmlaunch_failed(void)22 USED static void NORETURN vmlaunch_failed(void)
23 {
24     NODE_LOCK_SYS;
25 
26     c_entry_hook();
27 
28     handleVmEntryFail();
29     restore_user_context();
30 }
31 
restore_vmx(void)32 static void NORETURN restore_vmx(void)
33 {
34     restoreVMCS();
35 #ifdef CONFIG_HARDWARE_DEBUG_API
36     /* Do not support breakpoints in VMs, so just disable all breakpoints */
37     loadAllDisabledBreakpointState(ksCurThread);
38 #endif
39 #ifdef ENABLE_SMP_SUPPORT
40     NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->kernelSP = ((word_t)kernel_stack_alloc[getCurrentCPUIndex()]) + BIT(
41                                                              CONFIG_KERNEL_STACK_BITS) - 4;
42 #endif /* ENABLE_SMP_SUPPORT */
43     if (NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->launched) {
44         /* attempt to do a vmresume */
45         asm volatile(
46             // Set our stack pointer to the top of the tcb so we can efficiently pop
47             "movl %0, %%esp\n"
48             "popl %%eax\n"
49             "popl %%ebx\n"
50             "popl %%ecx\n"
51             "popl %%edx\n"
52             "popl %%esi\n"
53             "popl %%edi\n"
54             "popl %%ebp\n"
55             // Now do the vmresume
56             "vmresume\n"
57             // if we get here we failed
58 #ifdef ENABLE_SMP_SUPPORT
59             "movl (%%esp), %%esp\n"
60 #else
61             "leal kernel_stack_alloc + %c1, %%esp\n"
62 #endif
63             "call vmlaunch_failed\n"
64             :
65             : "r"(&NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->gp_registers[VCPU_EAX]),
66             "i"(BIT(CONFIG_KERNEL_STACK_BITS) - sizeof(word_t))
67             // Clobber memory so the compiler is forced to complete all stores
68             // before running this assembler
69             : "memory"
70         );
71     } else {
72         /* attempt to do a vmlaunch */
73         asm volatile(
74             // Set our stack pointer to the top of the tcb so we can efficiently pop
75             "movl %0, %%esp\n"
76             "popl %%eax\n"
77             "popl %%ebx\n"
78             "popl %%ecx\n"
79             "popl %%edx\n"
80             "popl %%esi\n"
81             "popl %%edi\n"
82             "popl %%ebp\n"
83             // Now do the vmresume
84             "vmlaunch\n"
85             // if we get here we failed
86 #ifdef ENABLE_SMP_SUPPORT
87             "movl (%%esp), %%esp\n"
88 #else
89             "leal kernel_stack_alloc + %c1, %%esp\n"
90 #endif
91             "call vmlaunch_failed\n"
92             :
93             : "r"(&NODE_STATE(ksCurThread)->tcbArch.tcbVCPU->gp_registers[VCPU_EAX]),
94             "i"(BIT(CONFIG_KERNEL_STACK_BITS) - sizeof(word_t))
95             // Clobber memory so the compiler is forced to complete all stores
96             // before running this assembler
97             : "memory"
98         );
99     }
100     UNREACHABLE();
101 }
102 #endif
103 
104 void NORETURN VISIBLE restore_user_context(void);
restore_user_context(void)105 void NORETURN VISIBLE restore_user_context(void)
106 {
107     c_exit_hook();
108 
109     NODE_UNLOCK_IF_HELD;
110 
111     /* we've now 'exited' the kernel. If we have a pending interrupt
112      * we should 'enter' it again */
113     if (ARCH_NODE_STATE(x86KSPendingInterrupt) != int_invalid) {
114         /* put this in service */
115         interrupt_t irq = servicePendingIRQ();
116         /* reset our stack and jmp to the IRQ entry point */
117         asm volatile(
118             "mov %[stack_top], %%esp\n"
119             "push %[syscall] \n"
120             "push %[irq]\n"
121             "call c_handle_interrupt"
122             :
123             : [stack_top] "r"(&(kernel_stack_alloc[CURRENT_CPU_INDEX()][BIT(CONFIG_KERNEL_STACK_BITS)])),
124             [syscall] "r"(0), /* syscall is unused for irq path */
125             [irq] "r"(irq)
126             : "memory");
127         UNREACHABLE();
128     }
129 
130 #ifdef CONFIG_VTX
131     if (thread_state_ptr_get_tsType(&NODE_STATE(ksCurThread)->tcbState) == ThreadState_RunningVM) {
132         restore_vmx();
133     }
134 #endif
135     setKernelEntryStackPointer(NODE_STATE(ksCurThread));
136     lazyFPURestore(NODE_STATE(ksCurThread));
137 
138 #ifdef CONFIG_HARDWARE_DEBUG_API
139     restore_user_debug_context(NODE_STATE(ksCurThread));
140 #endif
141 
142     if (config_set(CONFIG_KERNEL_X86_IBRS_BASIC)) {
143         x86_disable_ibrs();
144     }
145 
146     /* see if we entered via syscall */
147     if (likely(NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[Error] == -1)) {
148         NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[FLAGS] &= ~FLAGS_IF;
149         asm volatile(
150             // Set our stack pointer to the top of the tcb so we can efficiently pop
151             "movl %0, %%esp\n"
152             // restore syscall number
153             "popl %%eax\n"
154             // cap/badge register
155             "popl %%ebx\n"
156             // skip ecx and edx, these will contain esp and NextIP due to sysenter/sysexit convention
157             "addl $8, %%esp\n"
158             // message info register
159             "popl %%esi\n"
160             // message register
161             "popl %%edi\n"
162             // message register
163             "popl %%ebp\n"
164             // skip FaultIP and Error (these are fake registers)
165             "addl $8, %%esp\n"
166             // restore NextIP
167             "popl %%edx\n"
168             // skip cs
169             "addl $4,  %%esp\n"
170             "movl 4(%%esp), %%ecx\n"
171             "popfl\n"
172             "orl %[IFMASK], -4(%%esp)\n"
173             "sti\n"
174             "sysexit\n"
175             :
176             : "r"(&NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[EAX]),
177             [IFMASK]"i"(FLAGS_IF)
178             // Clobber memory so the compiler is forced to complete all stores
179             // before running this assembler
180             : "memory"
181         );
182     } else {
183         asm volatile(
184             // Set our stack pointer to the top of the tcb so we can efficiently pop
185             "movl %0, %%esp\n"
186             "popl %%eax\n"
187             "popl %%ebx\n"
188             "popl %%ecx\n"
189             "popl %%edx\n"
190             "popl %%esi\n"
191             "popl %%edi\n"
192             "popl %%ebp\n"
193             // skip FaultIP and Error
194             "addl $8, %%esp\n"
195             "iret\n"
196             :
197             : "r"(&NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers[EAX])
198             // Clobber memory so the compiler is forced to complete all stores
199             // before running this assembler
200             : "memory"
201         );
202     }
203 
204     UNREACHABLE();
205 }
206