1 #ifndef X86_EMULATE_H
2 #define X86_EMULATE_H
3
4 #include <assert.h>
5 #include <stdbool.h>
6 #include <stddef.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 /*
10 * Use of sse registers must be disabled prior to the definition of
11 * always_inline functions that would use them (memcpy, memset, etc),
12 * so do this as early as possible, aiming to be before any always_inline
13 * functions that are used are declared.
14 * Unfortunately, this cannot be done prior to inclusion of <stdlib.h>
15 * due to functions such as 'atof' that have SSE register return declared,
16 * so do so here, immediately after that.
17 */
18 #if __GNUC__ >= 6
19 # pragma GCC target("no-sse")
20 #endif
21 /*
22 * Attempt detection of unwanted prior inclusion of some headers known to use
23 * always_inline with SSE registers in some library / compiler / optimization
24 * combinations.
25 */
26 #ifdef _STRING_H
27 # error "Must not include <string.h> before x86-emulate.h"
28 #endif
29 #include <string.h>
30
31 /* EOF is a standard macro defined in <stdio.h> so use it for detection */
32 #ifdef EOF
33 # error "Must not include <stdio.h> before x86-emulate.h"
34 #endif
35 #include <stdio.h>
36
37 #include <xen/xen.h>
38
39 #include <xen/asm/msr-index.h>
40 #include <xen/asm/x86-defns.h>
41 #include <xen/asm/x86-vendors.h>
42
43 #include <xen-tools/common-macros.h>
44
45 #define ASSERT assert
46 #define ASSERT_UNREACHABLE() assert(!__LINE__)
47
48 #define DEFINE_PER_CPU(type, var) type per_cpu_##var
49 #define this_cpu(var) per_cpu_##var
50
51 #define __init
52 #define __maybe_unused __attribute__((__unused__))
53
54 #define likely(x) __builtin_expect(!!(x), true)
55 #define unlikely(x) __builtin_expect(!!(x), false)
56
57 #define cf_check /* No Control Flow Integriy checking */
58
59 /*
60 * Pseudo keyword 'fallthrough' to make explicit the fallthrough intention at
61 * the end of a case statement block.
62 */
63 #if !defined(__clang__) && (__GNUC__ >= 7)
64 # define fallthrough __attribute__((__fallthrough__))
65 #else
66 # define fallthrough do {} while (0) /* fallthrough */
67 #endif
68
69 #ifdef __GCC_ASM_FLAG_OUTPUTS__
70 # define ASM_FLAG_OUT(yes, no) yes
71 #else
72 # define ASM_FLAG_OUT(yes, no) no
73 #endif
74
75 #define hweight32 __builtin_popcount
76 #define hweight64 __builtin_popcountll
77
78 #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63))
79
place_ret(void * ptr)80 static inline void *place_ret(void *ptr)
81 {
82 *(uint8_t *)ptr = 0xc3;
83 return ptr + 1;
84 }
85
86 extern uint32_t mxcsr_mask;
87 extern struct cpu_policy cpu_policy;
88
89 #define MMAP_SZ 16384
90 bool emul_test_init(void);
91
92 /* Must save and restore FPU state between any call into libc. */
93 void emul_save_fpu_state(void);
94 void emul_restore_fpu_state(void);
95
96 struct x86_fxsr *get_fpu_save_area(void);
97
98 /*
99 * In order to reasonably use the above, wrap library calls we use and which we
100 * think might access any of the FPU state into wrappers saving/restoring state
101 * around the actual function.
102 */
103 #ifndef WRAP
104 # define WRAP(x) typeof(x) __wrap_ ## x
105 #endif
106
107 WRAP(fwrite);
108 WRAP(memcmp);
109 WRAP(memcpy);
110 WRAP(memset);
111 WRAP(printf);
112 WRAP(putchar);
113 WRAP(puts);
114 WRAP(snprintf);
115 WRAP(strstr);
116 WRAP(vprintf);
117 WRAP(vsnprintf);
118
119 #undef WRAP
120
121 #include "x86_emulate/x86_emulate.h"
122
123 void evex_disp8_test(void *instr, struct x86_emulate_ctxt *ctxt,
124 const struct x86_emulate_ops *ops);
125 void predicates_test(void *instr, struct x86_emulate_ctxt *ctxt,
126 int (*fetch)(unsigned long offset,
127 void *p_data,
128 unsigned int bytes,
129 struct x86_emulate_ctxt *ctxt));
130
xgetbv(uint32_t xcr)131 static inline uint64_t xgetbv(uint32_t xcr)
132 {
133 uint32_t lo, hi;
134
135 asm ( ".byte 0x0f, 0x01, 0xd0" : "=a" (lo), "=d" (hi) : "c" (xcr) );
136
137 return ((uint64_t)hi << 32) | lo;
138 }
139
140 /* Intentionally checking OSXSAVE here. */
141 #define cpu_has_xsave (cpu_policy.basic.raw[1].c & (1u << 27))
142
xcr0_mask(uint64_t mask)143 static inline bool xcr0_mask(uint64_t mask)
144 {
145 return cpu_has_xsave && ((xgetbv(0) & mask) == mask);
146 }
147
148 unsigned int rdpkru(void);
149 void wrpkru(unsigned int val);
150
151 #define cache_line_size() (cpu_policy.basic.clflush_size * 8)
152 #define cpu_has_fpu cpu_policy.basic.fpu
153 #define cpu_has_mmx cpu_policy.basic.mmx
154 #define cpu_has_fxsr cpu_policy.basic.fxsr
155 #define cpu_has_sse cpu_policy.basic.sse
156 #define cpu_has_sse2 cpu_policy.basic.sse2
157 #define cpu_has_sse3 cpu_policy.basic.sse3
158 #define cpu_has_pclmulqdq cpu_policy.basic.pclmulqdq
159 #define cpu_has_ssse3 cpu_policy.basic.ssse3
160 #define cpu_has_fma (cpu_policy.basic.fma && xcr0_mask(6))
161 #define cpu_has_sse4_1 cpu_policy.basic.sse4_1
162 #define cpu_has_sse4_2 cpu_policy.basic.sse4_2
163 #define cpu_has_popcnt cpu_policy.basic.popcnt
164 #define cpu_has_aesni cpu_policy.basic.aesni
165 #define cpu_has_avx (cpu_policy.basic.avx && xcr0_mask(6))
166 #define cpu_has_f16c (cpu_policy.basic.f16c && xcr0_mask(6))
167
168 #define cpu_has_avx2 (cpu_policy.feat.avx2 && xcr0_mask(6))
169 #define cpu_has_bmi1 cpu_policy.feat.bmi1
170 #define cpu_has_bmi2 cpu_policy.feat.bmi2
171 #define cpu_has_avx512f (cpu_policy.feat.avx512f && \
172 xcr0_mask(0xe6))
173 #define cpu_has_avx512dq (cpu_policy.feat.avx512dq && \
174 xcr0_mask(0xe6))
175 #define cpu_has_avx512_ifma (cpu_policy.feat.avx512_ifma && \
176 xcr0_mask(0xe6))
177 #define cpu_has_avx512cd (cpu_policy.feat.avx512cd && \
178 xcr0_mask(0xe6))
179 #define cpu_has_sha cpu_policy.feat.sha
180 #define cpu_has_avx512bw (cpu_policy.feat.avx512bw && \
181 xcr0_mask(0xe6))
182 #define cpu_has_avx512vl (cpu_policy.feat.avx512vl && \
183 xcr0_mask(0xe6))
184 #define cpu_has_avx512_vbmi (cpu_policy.feat.avx512_vbmi && \
185 xcr0_mask(0xe6))
186 #define cpu_has_avx512_vbmi2 (cpu_policy.feat.avx512_vbmi2 && \
187 xcr0_mask(0xe6))
188 #define cpu_has_gfni cpu_policy.feat.gfni
189 #define cpu_has_vaes (cpu_policy.feat.vaes && xcr0_mask(6))
190 #define cpu_has_vpclmulqdq (cpu_policy.feat.vpclmulqdq && xcr0_mask(6))
191 #define cpu_has_avx512_vnni (cpu_policy.feat.avx512_vnni && \
192 xcr0_mask(0xe6))
193 #define cpu_has_avx512_bitalg (cpu_policy.feat.avx512_bitalg && \
194 xcr0_mask(0xe6))
195 #define cpu_has_avx512_vpopcntdq (cpu_policy.feat.avx512_vpopcntdq && \
196 xcr0_mask(0xe6))
197 #define cpu_has_movdiri cpu_policy.feat.movdiri
198 #define cpu_has_movdir64b cpu_policy.feat.movdir64b
199 #define cpu_has_avx512_vp2intersect (cpu_policy.feat.avx512_vp2intersect && \
200 xcr0_mask(0xe6))
201 #define cpu_has_serialize cpu_policy.feat.serialize
202 #define cpu_has_avx512_fp16 (cpu_policy.feat.avx512_fp16 && \
203 xcr0_mask(0xe6))
204 #define cpu_has_sha512 (cpu_policy.feat.sha512 && xcr0_mask(6))
205 #define cpu_has_sm3 (cpu_policy.feat.sm3 && xcr0_mask(6))
206 #define cpu_has_sm4 (cpu_policy.feat.sm4 && xcr0_mask(6))
207 #define cpu_has_avx_vnni (cpu_policy.feat.avx_vnni && xcr0_mask(6))
208 #define cpu_has_avx512_bf16 (cpu_policy.feat.avx512_bf16 && \
209 xcr0_mask(0xe6))
210 #define cpu_has_cmpccxadd cpu_policy.feat.cmpccxadd
211 #define cpu_has_avx_ifma (cpu_policy.feat.avx_ifma && xcr0_mask(6))
212 #define cpu_has_avx_vnni_int8 (cpu_policy.feat.avx_vnni_int8 && \
213 xcr0_mask(6))
214 #define cpu_has_avx_ne_convert (cpu_policy.feat.avx_ne_convert && \
215 xcr0_mask(6))
216 #define cpu_has_avx_vnni_int16 (cpu_policy.feat.avx_vnni_int16 && \
217 xcr0_mask(6))
218
219 #define cpu_has_xgetbv1 (cpu_has_xsave && cpu_policy.xstate.xgetbv1)
220
221 #define cpu_has_3dnow_ext cpu_policy.extd._3dnowext
222 #define cpu_has_sse4a cpu_policy.extd.sse4a
223 #define cpu_has_xop (cpu_policy.extd.xop && xcr0_mask(6))
224 #define cpu_has_fma4 (cpu_policy.extd.fma4 && xcr0_mask(6))
225 #define cpu_has_tbm cpu_policy.extd.tbm
226
227 int emul_test_cpuid(
228 uint32_t leaf,
229 uint32_t subleaf,
230 struct cpuid_leaf *res,
231 struct x86_emulate_ctxt *ctxt);
232
233 int emul_test_read_cr(
234 unsigned int reg,
235 unsigned long *val,
236 struct x86_emulate_ctxt *ctxt);
237
238 int emul_test_read_xcr(
239 unsigned int reg,
240 uint64_t *val,
241 struct x86_emulate_ctxt *ctxt);
242
243 int emul_test_get_fpu(
244 enum x86_emulate_fpu_type type,
245 struct x86_emulate_ctxt *ctxt);
246
247 void emul_test_put_fpu(
248 struct x86_emulate_ctxt *ctxt,
249 enum x86_emulate_fpu_type backout,
250 const struct x86_emul_fpu_aux *aux);
251
252 #endif /* X86_EMULATE_H */
253