1// Copyright 2017 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <asm.h>
8
9#include <arch/x86/vmx_state.h>
10#include <zircon/errors.h>
11
12.text
13
14/* zx_status_t vmx_enter(VmxState* vmx_state) */
15FUNCTION(vmx_enter)
16    // Store the return address.
17    // We do this first to adjust the RSP we store.
18    popq HS_RIP(%rdi)
19
20    // Store the callee save registers.
21    mov %rbx, HS_RBX(%rdi)
22    mov %rsp, HS_RSP(%rdi)
23    mov %rbp, HS_RBP(%rdi)
24    mov %r12, HS_R12(%rdi)
25    mov %r13, HS_R13(%rdi)
26    mov %r14, HS_R14(%rdi)
27    mov %r15, HS_R15(%rdi)
28
29    // Store the processor flags.
30    pushfq
31    popq HS_RFLAGS(%rdi)
32
33    // We are going to trample RDI, so move it to RSP. This also conveniently
34    // mirrors the exit code.
35    mov %rdi, %rsp
36
37    // Load the guest CR2.
38    mov GS_CR2(%rsp), %rax
39    mov %rax, %cr2
40
41    // Load the guest registers not covered by the VMCS.
42    mov GS_RAX(%rsp), %rax
43    mov GS_RCX(%rsp), %rcx
44    mov GS_RDX(%rsp), %rdx
45    mov GS_RBX(%rsp), %rbx
46    mov GS_RBP(%rsp), %rbp
47    mov GS_RSI(%rsp), %rsi
48    mov GS_RDI(%rsp), %rdi
49    mov GS_R8(%rsp), %r8
50    mov GS_R9(%rsp), %r9
51    mov GS_R10(%rsp), %r10
52    mov GS_R11(%rsp), %r11
53    mov GS_R12(%rsp), %r12
54    mov GS_R13(%rsp), %r13
55    mov GS_R14(%rsp), %r14
56    mov GS_R15(%rsp), %r15
57
58    // If we are resuming, jump to resume.
59    testb $1, VS_RESUME(%rsp)
60    jnz resume
61
62    // Launch the guest.
63    vmlaunch
64    jmp failure
65
66resume:
67    // Resume the guest.
68    vmresume
69
70failure:
71    // We will only be here if vmlaunch or vmresume failed.
72    // Restore host RDI and RSP.
73    mov %rsp, %rdi
74    mov HS_RSP(%rdi), %rsp
75
76    // Set up the return address.
77    pushq HS_RIP(%rdi)
78
79    // Return ZX_ERR_INTERNAL.
80    mov $ZX_ERR_INTERNAL, %eax
81    ret
82END_FUNCTION(vmx_enter)
83
84/* This is effectively the second-half of vmx_enter. When we return from a
85 * VM exit, vmx_state argument is stored in RSP. We use this to restore the
86 * stack and registers to the state they were in when vmx_enter was called.
87 */
88FUNCTION(vmx_exit_entry)
89    // Store the guest registers not covered by the VMCS. At this point,
90    // vmx_state is in RSP.
91    mov %rax, GS_RAX(%rsp)
92    mov %rcx, GS_RCX(%rsp)
93    mov %rdx, GS_RDX(%rsp)
94    mov %rbx, GS_RBX(%rsp)
95    mov %rbp, GS_RBP(%rsp)
96    mov %rsi, GS_RSI(%rsp)
97    mov %rdi, GS_RDI(%rsp)
98    mov %r8, GS_R8(%rsp)
99    mov %r9, GS_R9(%rsp)
100    mov %r10, GS_R10(%rsp)
101    mov %r11, GS_R11(%rsp)
102    mov %r12, GS_R12(%rsp)
103    mov %r13, GS_R13(%rsp)
104    mov %r14, GS_R14(%rsp)
105    mov %r15, GS_R15(%rsp)
106
107    // Store the guest CR2.
108    mov %cr2, %rax
109    mov %rax, GS_CR2(%rsp)
110
111    // Load vmx_state from RSP into RDI.
112    mov %rsp, %rdi
113
114    // Load the host callee save registers.
115    mov HS_RBX(%rdi), %rbx
116    mov HS_RSP(%rdi), %rsp
117    mov HS_RBP(%rdi), %rbp
118    mov HS_R12(%rdi), %r12
119    mov HS_R13(%rdi), %r13
120    mov HS_R14(%rdi), %r14
121    mov HS_R15(%rdi), %r15
122
123    // Load the host processor flags.
124    pushq HS_RFLAGS(%rdi)
125    popfq
126
127    // Set up the return address.
128    pushq HS_RIP(%rdi)
129
130    // Call vmx_exit(vmx_state).
131    sub $8, %rsp
132    call vmx_exit
133    add $8, %rsp
134
135    // Return ZX_OK, using the return address of vmx_enter pushed above.
136    mov $ZX_OK, %eax
137    ret
138END_FUNCTION(vmx_exit_entry)
139