1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/vmx.h>
8
9 #include "hyperv.h"
10 #include "vmcs.h"
11 #include "../x86.h"
12
13 void vmread_error(unsigned long field, bool fault);
14 void vmwrite_error(unsigned long field, unsigned long value);
15 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
16 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
17 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
18 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
19
20 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
21 /*
22 * The VMREAD error trampoline _always_ uses the stack to pass parameters, even
23 * for 64-bit targets. Preserving all registers allows the VMREAD inline asm
24 * blob to avoid clobbering GPRs, which in turn allows the compiler to better
25 * optimize sequences of VMREADs.
26 *
27 * Declare the trampoline as an opaque label as it's not safe to call from C
28 * code; there is no way to tell the compiler to pass params on the stack for
29 * 64-bit targets.
30 *
31 * void vmread_error_trampoline(unsigned long field, bool fault);
32 */
33 extern unsigned long vmread_error_trampoline;
34 #endif
35
vmcs_check16(unsigned long field)36 static __always_inline void vmcs_check16(unsigned long field)
37 {
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39 "16-bit accessor invalid for 64-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41 "16-bit accessor invalid for 64-bit high field");
42 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
43 "16-bit accessor invalid for 32-bit high field");
44 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
45 "16-bit accessor invalid for natural width field");
46 }
47
vmcs_check32(unsigned long field)48 static __always_inline void vmcs_check32(unsigned long field)
49 {
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
51 "32-bit accessor invalid for 16-bit field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
53 "32-bit accessor invalid for 64-bit field");
54 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
55 "32-bit accessor invalid for 64-bit high field");
56 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
57 "32-bit accessor invalid for natural width field");
58 }
59
vmcs_check64(unsigned long field)60 static __always_inline void vmcs_check64(unsigned long field)
61 {
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
63 "64-bit accessor invalid for 16-bit field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65 "64-bit accessor invalid for 64-bit high field");
66 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67 "64-bit accessor invalid for 32-bit field");
68 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
69 "64-bit accessor invalid for natural width field");
70 }
71
vmcs_checkl(unsigned long field)72 static __always_inline void vmcs_checkl(unsigned long field)
73 {
74 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
75 "Natural width accessor invalid for 16-bit field");
76 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
77 "Natural width accessor invalid for 64-bit field");
78 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
79 "Natural width accessor invalid for 64-bit high field");
80 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
81 "Natural width accessor invalid for 32-bit field");
82 }
83
__vmcs_readl(unsigned long field)84 static __always_inline unsigned long __vmcs_readl(unsigned long field)
85 {
86 unsigned long value;
87
88 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
89
90 asm_volatile_goto("1: vmread %[field], %[output]\n\t"
91 "jna %l[do_fail]\n\t"
92
93 _ASM_EXTABLE(1b, %l[do_exception])
94
95 : [output] "=r" (value)
96 : [field] "r" (field)
97 : "cc"
98 : do_fail, do_exception);
99
100 return value;
101
102 do_fail:
103 instrumentation_begin();
104 WARN_ONCE(1, KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
105 pr_warn_ratelimited(KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
106 instrumentation_end();
107 return 0;
108
109 do_exception:
110 kvm_spurious_fault();
111 return 0;
112
113 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
114
115 asm volatile("1: vmread %2, %1\n\t"
116 ".byte 0x3e\n\t" /* branch taken hint */
117 "ja 3f\n\t"
118
119 /*
120 * VMREAD failed. Push '0' for @fault, push the failing
121 * @field, and bounce through the trampoline to preserve
122 * volatile registers.
123 */
124 "xorl %k1, %k1\n\t"
125 "2:\n\t"
126 "push %1\n\t"
127 "push %2\n\t"
128 "call vmread_error_trampoline\n\t"
129
130 /*
131 * Unwind the stack. Note, the trampoline zeros out the
132 * memory for @fault so that the result is '0' on error.
133 */
134 "pop %2\n\t"
135 "pop %1\n\t"
136 "3:\n\t"
137
138 /* VMREAD faulted. As above, except push '1' for @fault. */
139 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
140
141 : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
142 return value;
143
144 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
145 }
146
vmcs_read16(unsigned long field)147 static __always_inline u16 vmcs_read16(unsigned long field)
148 {
149 vmcs_check16(field);
150 if (static_branch_unlikely(&enable_evmcs))
151 return evmcs_read16(field);
152 return __vmcs_readl(field);
153 }
154
vmcs_read32(unsigned long field)155 static __always_inline u32 vmcs_read32(unsigned long field)
156 {
157 vmcs_check32(field);
158 if (static_branch_unlikely(&enable_evmcs))
159 return evmcs_read32(field);
160 return __vmcs_readl(field);
161 }
162
vmcs_read64(unsigned long field)163 static __always_inline u64 vmcs_read64(unsigned long field)
164 {
165 vmcs_check64(field);
166 if (static_branch_unlikely(&enable_evmcs))
167 return evmcs_read64(field);
168 #ifdef CONFIG_X86_64
169 return __vmcs_readl(field);
170 #else
171 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
172 #endif
173 }
174
vmcs_readl(unsigned long field)175 static __always_inline unsigned long vmcs_readl(unsigned long field)
176 {
177 vmcs_checkl(field);
178 if (static_branch_unlikely(&enable_evmcs))
179 return evmcs_read64(field);
180 return __vmcs_readl(field);
181 }
182
183 #define vmx_asm1(insn, op1, error_args...) \
184 do { \
185 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
186 ".byte 0x2e\n\t" /* branch not taken hint */ \
187 "jna %l[error]\n\t" \
188 _ASM_EXTABLE(1b, %l[fault]) \
189 : : op1 : "cc" : error, fault); \
190 return; \
191 error: \
192 instrumentation_begin(); \
193 insn##_error(error_args); \
194 instrumentation_end(); \
195 return; \
196 fault: \
197 kvm_spurious_fault(); \
198 } while (0)
199
200 #define vmx_asm2(insn, op1, op2, error_args...) \
201 do { \
202 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
203 ".byte 0x2e\n\t" /* branch not taken hint */ \
204 "jna %l[error]\n\t" \
205 _ASM_EXTABLE(1b, %l[fault]) \
206 : : op1, op2 : "cc" : error, fault); \
207 return; \
208 error: \
209 instrumentation_begin(); \
210 insn##_error(error_args); \
211 instrumentation_end(); \
212 return; \
213 fault: \
214 kvm_spurious_fault(); \
215 } while (0)
216
__vmcs_writel(unsigned long field,unsigned long value)217 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
218 {
219 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
220 }
221
vmcs_write16(unsigned long field,u16 value)222 static __always_inline void vmcs_write16(unsigned long field, u16 value)
223 {
224 vmcs_check16(field);
225 if (static_branch_unlikely(&enable_evmcs))
226 return evmcs_write16(field, value);
227
228 __vmcs_writel(field, value);
229 }
230
vmcs_write32(unsigned long field,u32 value)231 static __always_inline void vmcs_write32(unsigned long field, u32 value)
232 {
233 vmcs_check32(field);
234 if (static_branch_unlikely(&enable_evmcs))
235 return evmcs_write32(field, value);
236
237 __vmcs_writel(field, value);
238 }
239
vmcs_write64(unsigned long field,u64 value)240 static __always_inline void vmcs_write64(unsigned long field, u64 value)
241 {
242 vmcs_check64(field);
243 if (static_branch_unlikely(&enable_evmcs))
244 return evmcs_write64(field, value);
245
246 __vmcs_writel(field, value);
247 #ifndef CONFIG_X86_64
248 __vmcs_writel(field+1, value >> 32);
249 #endif
250 }
251
vmcs_writel(unsigned long field,unsigned long value)252 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
253 {
254 vmcs_checkl(field);
255 if (static_branch_unlikely(&enable_evmcs))
256 return evmcs_write64(field, value);
257
258 __vmcs_writel(field, value);
259 }
260
vmcs_clear_bits(unsigned long field,u32 mask)261 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
262 {
263 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
264 "vmcs_clear_bits does not support 64-bit fields");
265 if (static_branch_unlikely(&enable_evmcs))
266 return evmcs_write32(field, evmcs_read32(field) & ~mask);
267
268 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
269 }
270
vmcs_set_bits(unsigned long field,u32 mask)271 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
272 {
273 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
274 "vmcs_set_bits does not support 64-bit fields");
275 if (static_branch_unlikely(&enable_evmcs))
276 return evmcs_write32(field, evmcs_read32(field) | mask);
277
278 __vmcs_writel(field, __vmcs_readl(field) | mask);
279 }
280
vmcs_clear(struct vmcs * vmcs)281 static inline void vmcs_clear(struct vmcs *vmcs)
282 {
283 u64 phys_addr = __pa(vmcs);
284
285 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
286 }
287
vmcs_load(struct vmcs * vmcs)288 static inline void vmcs_load(struct vmcs *vmcs)
289 {
290 u64 phys_addr = __pa(vmcs);
291
292 if (static_branch_unlikely(&enable_evmcs))
293 return evmcs_load(phys_addr);
294
295 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
296 }
297
__invvpid(unsigned long ext,u16 vpid,gva_t gva)298 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
299 {
300 struct {
301 u64 vpid : 16;
302 u64 rsvd : 48;
303 u64 gva;
304 } operand = { vpid, 0, gva };
305
306 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
307 }
308
__invept(unsigned long ext,u64 eptp,gpa_t gpa)309 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
310 {
311 struct {
312 u64 eptp, gpa;
313 } operand = {eptp, gpa};
314
315 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
316 }
317
vpid_sync_vcpu_single(int vpid)318 static inline void vpid_sync_vcpu_single(int vpid)
319 {
320 if (vpid == 0)
321 return;
322
323 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
324 }
325
vpid_sync_vcpu_global(void)326 static inline void vpid_sync_vcpu_global(void)
327 {
328 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
329 }
330
vpid_sync_context(int vpid)331 static inline void vpid_sync_context(int vpid)
332 {
333 if (cpu_has_vmx_invvpid_single())
334 vpid_sync_vcpu_single(vpid);
335 else if (vpid != 0)
336 vpid_sync_vcpu_global();
337 }
338
vpid_sync_vcpu_addr(int vpid,gva_t addr)339 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
340 {
341 if (vpid == 0)
342 return;
343
344 if (cpu_has_vmx_invvpid_individual_addr())
345 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
346 else
347 vpid_sync_context(vpid);
348 }
349
ept_sync_global(void)350 static inline void ept_sync_global(void)
351 {
352 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
353 }
354
ept_sync_context(u64 eptp)355 static inline void ept_sync_context(u64 eptp)
356 {
357 if (cpu_has_vmx_invept_context())
358 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
359 else
360 ept_sync_global();
361 }
362
363 #endif /* __KVM_X86_VMX_INSN_H */
364