1/*
2 * entry.S: VMX architecture-specific entry/exit handling.
3 * Copyright (c) 2004, Intel Corporation.
4 * Copyright (c) 2008, Citrix Systems, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19        .file "vmx/entry.S"
20
21#include <asm/asm_defns.h>
22#include <asm/page.h>
23
24FUNC(vmx_asm_vmexit_handler)
25        SAVE_ALL
26
27        mov  %cr2,%rax
28        GET_CURRENT(bx)
29
30        movb $1,VCPU_vmx_launched(%rbx)
31        mov  %rax,VCPU_hvm_guest_cr2(%rbx)
32
33        /* SPEC_CTRL_ENTRY_FROM_VMX    Req: b=curr %rsp=regs/cpuinfo, Clob: acd */
34        /*
35         * RSB stuffing is to prevents RET predictions following guest
36         * entries.  This is *not* sufficient to flush all RSB entries on
37         * parts enumerating eIBRS, although the following restore_spec_ctrl
38         * does covers us.
39         */
40        ALTERNATIVE "", DO_OVERWRITE_RSB, X86_FEATURE_SC_RSB_HVM
41
42        /*
43         * Restore Xen's MSR_SPEC_CTRL setting.  The guest's value resides in
44         * the MSR load/save list.  For Legacy IBRS, this flushes/inhibits
45         * indirect predictions and does not flush the RSB.  For eIBRS, this
46         * prevents CALLs/JMPs using predictions learnt at a lower predictor
47         * mode, and it flushes the RSB.  On eIBRS parts that also suffer from
48         * PBRSB, the prior RSB stuffing suffices to make the RSB safe.
49         */
50        .macro restore_spec_ctrl
51            mov    $MSR_SPEC_CTRL, %ecx
52            mov    CPUINFO_xen_spec_ctrl(%rsp), %eax
53            xor    %edx, %edx
54            wrmsr
55        .endm
56        ALTERNATIVE "", restore_spec_ctrl, X86_FEATURE_SC_MSR_HVM
57
58        /*
59         * Clear the BHB to mitigate BHI.  Used on eIBRS parts, and uses RETs
60         * itself so must be after we've perfomed all the RET-safety we can.
61         */
62        testb $SCF_entry_bhb, CPUINFO_scf(%rsp)
63        jz .L_skip_bhb
64        ALTERNATIVE_2 "",                                    \
65            "call clear_bhb_loops", X86_SPEC_BHB_LOOPS,      \
66            "call clear_bhb_tsx", X86_SPEC_BHB_TSX
67.L_skip_bhb:
68
69        ALTERNATIVE "lfence", "", X86_SPEC_NO_LFENCE_ENTRY_VMX
70        /* WARNING! `ret`, `call *`, `jmp *` not safe before this point. */
71
72        /* Hardware clears MSR_DEBUGCTL on VMExit.  Reinstate it if debugging Xen. */
73        .macro restore_lbr
74            mov $IA32_DEBUGCTLMSR_LBR, %eax
75            mov $MSR_IA32_DEBUGCTLMSR, %ecx
76            xor %edx, %edx
77            wrmsr
78        .endm
79        ALTERNATIVE "", restore_lbr, X86_FEATURE_XEN_LBR
80
81        mov  %rsp,%rdi
82        call vmx_vmexit_handler
83
84.Lvmx_do_vmentry:
85        call vmx_intr_assist
86        call nvmx_switch_guest
87        ASSERT_NOT_IN_ATOMIC
88
89        mov  VCPU_processor(%rbx),%eax
90        lea  irq_stat+IRQSTAT_softirq_pending(%rip),%rdx
91        xor  %ecx,%ecx
92        shl  $IRQSTAT_shift,%eax
93        cli
94        cmp  %ecx,(%rdx,%rax,1)
95        jnz  .Lvmx_process_softirqs
96
97        cmp  %cl,VCPU_vmx_emulate(%rbx)
98        jne .Lvmx_goto_emulator
99        cmp  %cl,VCPU_vmx_realmode(%rbx)
100UNLIKELY_START(ne, realmode)
101        cmp  %cx,VCPU_vm86_seg_mask(%rbx)
102        jnz .Lvmx_goto_emulator
103        mov  %rsp,%rdi
104        call vmx_enter_realmode
105UNLIKELY_END(realmode)
106
107        mov  %rsp,%rdi
108        call vmx_vmenter_helper
109        test %al, %al
110        jz .Lvmx_vmentry_restart
111
112        /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
113        /* SPEC_CTRL_EXIT_TO_VMX   Req: %rsp=regs/cpuinfo              Clob:    */
114        /*
115         * All speculation safety work happens to be elsewhere.  VERW is after
116         * popping the GPRs, while restoring the guest MSR_SPEC_CTRL is left
117         * to the MSR load list.
118         */
119
120        mov  VCPU_hvm_guest_cr2(%rbx),%rax
121        mov  %rax, %cr2
122
123        /*
124         * We need to perform two conditional actions (VERW, and Resume vs
125         * Launch) after popping GPRs.  With some cunning, we can encode both
126         * of these in eflags together.
127         *
128         * Parity is only calculated over the bottom byte of the answer, while
129         * Sign is simply the top bit.
130         *
131         * Therefore, the final OR instruction ends up producing:
132         *   SF = VCPU_vmx_launched
133         *   PF = !SCF_verw
134         */
135        BUILD_BUG_ON(SCF_verw & ~0xff)
136        movzbl VCPU_vmx_launched(%rbx), %ecx
137        shl  $31, %ecx
138        movzbl CPUINFO_scf(%rsp), %eax
139        and  $SCF_verw, %eax
140        or   %eax, %ecx
141
142        pop  %r15
143        pop  %r14
144        pop  %r13
145        pop  %r12
146        pop  %rbp
147        pop  %rbx
148        pop  %r11
149        pop  %r10
150        pop  %r9
151        pop  %r8
152        pop  %rax
153        pop  %rcx
154        pop  %rdx
155        pop  %rsi
156        pop  %rdi
157
158        jpe  .L_skip_verw
159        /* VERW clobbers ZF, but preserves all others, including SF. */
160        verw STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)(%rsp)
161.L_skip_verw:
162
163        jns  .Lvmx_launch
164
165/*.Lvmx_resume:*/
166        vmresume
167        jmp  .Lvmx_vmentry_fail
168
169.Lvmx_launch:
170        vmlaunch
171
172.Lvmx_vmentry_fail:
173        sti
174        SAVE_ALL
175
176        /*
177         * SPEC_CTRL_ENTRY notes
178         *
179         * If we end up here, no guest code has executed.  The MSR lists have
180         * not been processed, so we still have Xen's choice of MSR_SPEC_CTRL
181         * in context, and the RSB is unchanged.
182         */
183
184        call vmx_vmentry_failure
185        jmp  .Lvmx_process_softirqs
186
187LABEL(vmx_asm_do_vmentry)
188        GET_CURRENT(bx)
189        jmp  .Lvmx_do_vmentry
190
191.Lvmx_vmentry_restart:
192        sti
193        jmp  .Lvmx_do_vmentry
194
195.Lvmx_goto_emulator:
196        sti
197        mov  %rsp,%rdi
198        call vmx_realmode
199        jmp  .Lvmx_do_vmentry
200
201.Lvmx_process_softirqs:
202        sti
203        call do_softirq
204        jmp  .Lvmx_do_vmentry
205END(vmx_asm_vmexit_handler)
206