1/*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <asm/guest/vmcs.h>
8#include <asm/guest/vm.h>
9#include <asm/security.h>
10
11/* NOTE:
12 *
13 * MISRA C requires that all unsigned constants should have the suffix 'U'
14 * (e.g. 0xffU), but the assembler may not accept such C-style constants. For
15 * example, binutils 2.26 fails to compile assembly in that case. To work this
16 * around, all unsigned constants must be explicitly spells out in assembly
17 * with a comment tracking the original expression from which the magic
18 * number is calculated. As an example:
19 *
20 *    /* 0x00000668 =
21 *     *    (CR4_DE | CR4_PAE | CR4_MCE | CR4_OSFXSR | CR4_OSXMMEXCPT) *\/
22 *    movl    $0x00000668, %eax
23 *
24 * Make sure that these numbers are updated accordingly if the definition of
25 * the macros involved are changed.
26 */
27
28    .text
29
30/*int vmx_vmrun(struct run_context *context, int32_t launch, int32_t ibrs_type) */
31   .code64
32   .align       8
33   .global      vmx_vmrun
34vmx_vmrun:
35
36    /* Save all host GPRs that must be preserved across function calls
37       per System V ABI */
38    push        %rdx
39    push        %rbx
40    push        %rbp
41    push        %r12
42    push        %r13
43    push        %r14
44    push        %r15
45
46    /* Save RDI on top of host stack for easy access to VCPU pointer
47       on return from guest context */
48    push        %rdi
49
50    /* rdx = ibrs_type */
51    /* if ibrs_type != IBRS_NONE, means IBRS feature is supported,
52     * restore MSR SPEC_CTRL to guest
53     */
54    cmp         $IBRS_NONE,%rdx
55    je          next
56
57    /* 0x00000048 = MSR_IA32_SPEC_CTRL */
58    movl        $0x00000048,%ecx
59    /*0xa8=168U=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
60    mov         0xa8(%rdi),%rax
61    movl        $0,%edx
62    wrmsr
63
64next:
65
66    /* Load VMCS_HOST_RSP_FIELD field value
67     *
68     *     0x00006c14 = VMX_HOST_RSP
69     */
70    mov         $0x00006c14,%rdx
71
72    /* Write the current stack pointer to the VMCS_HOST_RSP_FIELD */
73    vmwrite     %rsp,%rdx
74
75    /* Error occurred - handle error */
76    jbe         vm_eval_error
77
78    /* Compare the launch flag to see if launching (1) or resuming (0) */
79    cmp         $VM_LAUNCH, %rsi
80
81    /*136U=0x88=CPU_CONTEXT_OFFSET_CR2*/
82    mov         0x88(%rdi),%rax
83    mov         %rax,%cr2
84
85    /*
86     * 0U=0x0=CPU_CONTEXT_OFFSET_RAX
87     * 8U=0x8=CPU_CONTEXT_OFFSET_RCX
88     * 16U=0x10=CPU_CONTEXT_OFFSET_RDX
89     * 24U=0x18=CPU_CONTEXT_OFFSET_RBX
90     * 40U=0x28=CPU_CONTEXT_OFFSET_RBP
91     * 48U=0x30=CPU_CONTEXT_OFFSET_RSI
92     * 64U=0x40=CPU_CONTEXT_OFFSET_R8
93     * 72U=0x48=CPU_CONTEXT_OFFSET_R9
94     * 80U=0x50=CPU_CONTEXT_OFFSET_R10
95     * 88U=0x58=CPU_CONTEXT_OFFSET_R11
96     * 6U=0x60=CPU_CONTEXT_OFFSET_R12
97     * 104U=0x68=CPU_CONTEXT_OFFSET_R13
98     * 112U=0x70=CPU_CONTEXT_OFFSET_R14
99     * 120U=0x78=CPU_CONTEXT_OFFSET_R15
100     */
101    mov         0x0(%rdi),%rax
102    mov         0x8(%rdi),%rcx
103    mov         0x10(%rdi),%rdx
104    mov         0x18(%rdi),%rbx
105    mov         0x28(%rdi),%rbp
106    mov         0x30(%rdi),%rsi
107    mov         0x40(%rdi),%r8
108    mov         0x48(%rdi),%r9
109    mov         0x50(%rdi),%r10
110    mov         0x58(%rdi),%r11
111    mov         0x60(%rdi),%r12
112    mov         0x68(%rdi),%r13
113    mov         0x70(%rdi),%r14
114    mov         0x78(%rdi),%r15
115
116    /*56U=0x38=CPU_CONTEXT_OFFSET_RDI*/
117    mov         0x38(%rdi),%rdi
118
119    /* Execute appropriate VMX instruction */
120    je          vm_launch
121
122    /* Execute a VM resume */
123    vmresume
124
125    /* jump to vm_exit directly when it fails in vmresume */
126    jmp         vm_exit
127
128vm_launch:
129
130    /* Execute a VM launch */
131    vmlaunch
132
133    .global vm_exit
134vm_exit:
135
136    /* Get VCPU data structure pointer from top of host stack and
137       save guest RDI in its place */
138    xchg        0(%rsp),%rdi
139
140    /* Save current GPRs to guest state area;
141     * 0U=0x0=CPU_CONTEXT_OFFSET_RAX
142     */
143    mov         %rax,0x0(%rdi)
144
145    mov         %cr2,%rax
146    /*136U=0x88=CPU_CONTEXT_OFFSET_CR2*/
147    mov         %rax,0x88(%rdi)
148
149
150    /*
151     * 8U=0x8=CPU_CONTEXT_OFFSET_RCX
152     * 16U=0x10=CPU_CONTEXT_OFFSET_RDX
153     * 24U=0x18=CPU_CONTEXT_OFFSET_RBX
154     * 40U=0x28=CPU_CONTEXT_OFFSET_RBP
155     * 48U=0x30=CPU_CONTEXT_OFFSET_RSI
156     * 64U=0x40=CPU_CONTEXT_OFFSET_R8
157     * 72U=0x48=CPU_CONTEXT_OFFSET_R9
158     * 80U=0x50=CPU_CONTEXT_OFFSET_R10
159     * 88U=0x58=CPU_CONTEXT_OFFSET_R11
160     * 96U=0x60=CPU_CONTEXT_OFFSET_R12
161     * 104U=0x68=CPU_CONTEXT_OFFSET_R13
162     * 112U=0x70=CPU_CONTEXT_OFFSET_R14
163     * 120U=0x78=CPU_CONTEXT_OFFSET_R15
164     */
165    mov         %rcx,0x8(%rdi)
166    mov         %rdx,0x10(%rdi)
167    mov         %rbx,0x18(%rdi)
168    mov         %rbp,0x28(%rdi)
169    mov         %rsi,0x30(%rdi)
170    mov         %r8,0x40(%rdi)
171    mov         %r9,0x48(%rdi)
172    mov         %r10,0x50(%rdi)
173    mov         %r11,0x58(%rdi)
174    mov         %r12,0x60(%rdi)
175    mov         %r13,0x68(%rdi)
176    mov         %r14,0x70(%rdi)
177    mov         %r15,0x78(%rdi)
178
179    /* Load guest RDI off host stack and into RDX */
180    mov         0(%rsp),%rdx
181
182    /* Save guest RDI to guest state area
183    /*56U=0x38=CPU_CONTEXT_OFFSET_RDI*/
184    mov         %rdx,0x38(%rdi)
185
186    /* Save RDI to RSI for later SPEC_CTRL save*/
187    mov         %rdi,%rsi
188
189vm_eval_error:
190
191    /* Restore host GPR System V required registers */
192    pop         %rdi
193    pop         %r15
194    pop         %r14
195    pop         %r13
196    pop         %r12
197    pop         %rbp
198    pop         %rbx
199    pop         %rdx
200
201
202    /* Check vm fail, refer to 64-ia32 spec section 26.2 in volume#3 */
203    mov         $VM_FAIL,%rax
204    jc          vm_return
205    jz          vm_return
206
207    /* Clear host registers to prevent speculative use */
208    xor         %rcx,%rcx
209    xor         %r8,%r8
210    xor         %r9,%r9
211    xor         %r10,%r10
212    xor         %r11,%r11
213
214    /* rdx = ibrs_type */
215    /* IBRS_NONE: no ibrs setting, just flush rsb
216     * IBRS_RAW: set IBRS then flush rsb
217     * IBRS_OPT: set STIBP & IBPB then flush rsb
218     */
219    cmp         $IBRS_NONE,%rdx
220    je          stuff_rsb
221
222    cmp         $IBRS_OPT,%rdx
223    je          ibrs_opt
224
225    /* Save guest MSR SPEC_CTRL, low 32 bit is enough
226     *
227     *     0x00000048 = MSR_IA32_SPEC_CTRL
228     */
229    movl        $0x00000048,%ecx
230    rdmsr
231    /*168U=0xa8=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
232    mov         %rax,0xa8(%rsi)
233    /* 0x1 = SPEC_ENABLE_IBRS */
234    movl        $0x1,%eax
235    movl        $0,%edx
236    wrmsr
237
238    jmp         stuff_rsb
239
240ibrs_opt:
241
242    /* 0x00000049 = MSR_IA32_PRED_CMD */
243    movl        $0x00000049,%ecx
244    /* 0x1 = PRED_SET_IBPB */
245    movl        $0x1,%eax
246    movl        $0,%edx
247    wrmsr
248
249    /* Save guest MSR SPEC_CTRL, low 32 bit is enough
250     *
251     *     0x00000048 = MSR_IA32_SPEC_CTRL
252     */
253    movl        $0x00000048,%ecx
254    rdmsr
255    /*168U=0xa8=CPU_CONTEXT_OFFSET_IA32_SPEC_CTRL*/
256    mov         %rax,0xa8(%rsi)
257    /* 0x2 = SPEC_ENABLE_STIBP */
258    movl        $0x2,%eax
259    movl        $0,%edx
260    wrmsr
261
262    /* stuff rsb by 32 CALLs, make sure no any "ret" is executed before this
263     * stuffing rsb, otherwise, someone may insert some code before this for
264     * future update.
265     */
266stuff_rsb:
267
268    /* stuff 32 RSB, rax = 32/2 */
269    mov         $16,%rax
270.align 16
2713:
272    call        4f
27333:
274    pause
275    jmp         33b
276.align 16
2774:
278    call        5f
27944:
280    pause
281    jmp         44b
282.align 16
2835:  dec         %rax
284    jnz         3b
285    /* stuff 32 RSB, rsp += 8*32 */
286    add         $(8*32),%rsp
287
288    mov         $VM_SUCCESS,%rax
289
290vm_return:
291    /* Return to caller */
292    ret
293