1 #ifndef __ASM_MSR_H
2 #define __ASM_MSR_H
3
4 #include "msr-index.h"
5
6 #ifndef __ASSEMBLY__
7
8 #include <xen/types.h>
9 #include <xen/percpu.h>
10 #include <xen/errno.h>
11 #include <asm/asm_defns.h>
12 #include <asm/cpufeature.h>
13
14 #define rdmsr(msr,val1,val2) \
15 __asm__ __volatile__("rdmsr" \
16 : "=a" (val1), "=d" (val2) \
17 : "c" (msr))
18
19 #define rdmsrl(msr,val) do { unsigned long a__,b__; \
20 __asm__ __volatile__("rdmsr" \
21 : "=a" (a__), "=d" (b__) \
22 : "c" (msr)); \
23 val = a__ | ((u64)b__<<32); \
24 } while(0)
25
26 #define wrmsr(msr,val1,val2) \
27 __asm__ __volatile__("wrmsr" \
28 : /* no outputs */ \
29 : "c" (msr), "a" (val1), "d" (val2))
30
wrmsrl(unsigned int msr,__u64 val)31 static inline void wrmsrl(unsigned int msr, __u64 val)
32 {
33 __u32 lo, hi;
34 lo = (__u32)val;
35 hi = (__u32)(val >> 32);
36 wrmsr(msr, lo, hi);
37 }
38
39 /* rdmsr with exception handling */
40 #define rdmsr_safe(msr,val) ({\
41 int _rc; \
42 uint32_t lo, hi; \
43 __asm__ __volatile__( \
44 "1: rdmsr\n2:\n" \
45 ".section .fixup,\"ax\"\n" \
46 "3: xorl %0,%0\n; xorl %1,%1\n" \
47 " movl %5,%2\n; jmp 2b\n" \
48 ".previous\n" \
49 _ASM_EXTABLE(1b, 3b) \
50 : "=a" (lo), "=d" (hi), "=&r" (_rc) \
51 : "c" (msr), "2" (0), "i" (-EFAULT)); \
52 val = lo | ((uint64_t)hi << 32); \
53 _rc; })
54
55 /* wrmsr with exception handling */
wrmsr_safe(unsigned int msr,uint64_t val)56 static inline int wrmsr_safe(unsigned int msr, uint64_t val)
57 {
58 int _rc;
59 uint32_t lo, hi;
60 lo = (uint32_t)val;
61 hi = (uint32_t)(val >> 32);
62
63 __asm__ __volatile__(
64 "1: wrmsr\n2:\n"
65 ".section .fixup,\"ax\"\n"
66 "3: movl %5,%0\n; jmp 2b\n"
67 ".previous\n"
68 _ASM_EXTABLE(1b, 3b)
69 : "=&r" (_rc)
70 : "c" (msr), "a" (lo), "d" (hi), "0" (0), "i" (-EFAULT));
71 return _rc;
72 }
73
msr_fold(const struct cpu_user_regs * regs)74 static inline uint64_t msr_fold(const struct cpu_user_regs *regs)
75 {
76 return (regs->rdx << 32) | regs->eax;
77 }
78
msr_split(struct cpu_user_regs * regs,uint64_t val)79 static inline void msr_split(struct cpu_user_regs *regs, uint64_t val)
80 {
81 regs->rdx = val >> 32;
82 regs->rax = (uint32_t)val;
83 }
84
rdtsc(void)85 static inline uint64_t rdtsc(void)
86 {
87 uint32_t low, high;
88
89 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high));
90
91 return ((uint64_t)high << 32) | low;
92 }
93
rdtsc_ordered(void)94 static inline uint64_t rdtsc_ordered(void)
95 {
96 /*
97 * The RDTSC instruction is not ordered relative to memory access.
98 * The Intel SDM and the AMD APM are both vague on this point, but
99 * empirically an RDTSC instruction can be speculatively executed
100 * before prior loads. An RDTSC immediately after an appropriate
101 * barrier appears to be ordered as a normal load, that is, it
102 * provides the same ordering guarantees as reading from a global
103 * memory location that some other imaginary CPU is updating
104 * continuously with a time stamp.
105 */
106 alternative("lfence", "mfence", X86_FEATURE_MFENCE_RDTSC);
107 return rdtsc();
108 }
109
110 #define __write_tsc(val) wrmsrl(MSR_IA32_TSC, val)
111 #define write_tsc(val) ({ \
112 /* Reliable TSCs are in lockstep across all CPUs. We should \
113 * never write to them. */ \
114 ASSERT(!boot_cpu_has(X86_FEATURE_TSC_RELIABLE)); \
115 __write_tsc(val); \
116 })
117
118 #define write_rdtscp_aux(val) wrmsr(MSR_TSC_AUX, (val), 0)
119
120 #define rdpmc(counter,low,high) \
121 __asm__ __volatile__("rdpmc" \
122 : "=a" (low), "=d" (high) \
123 : "c" (counter))
124
__rdfsbase(void)125 static inline unsigned long __rdfsbase(void)
126 {
127 unsigned long base;
128
129 #ifdef HAVE_GAS_FSGSBASE
130 asm volatile ( "rdfsbase %0" : "=r" (base) );
131 #else
132 asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc0" : "=a" (base) );
133 #endif
134
135 return base;
136 }
137
__rdgsbase(void)138 static inline unsigned long __rdgsbase(void)
139 {
140 unsigned long base;
141
142 #ifdef HAVE_GAS_FSGSBASE
143 asm volatile ( "rdgsbase %0" : "=r" (base) );
144 #else
145 asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xc8" : "=a" (base) );
146 #endif
147
148 return base;
149 }
150
rdfsbase(void)151 static inline unsigned long rdfsbase(void)
152 {
153 unsigned long base;
154
155 if ( cpu_has_fsgsbase )
156 return __rdfsbase();
157
158 rdmsrl(MSR_FS_BASE, base);
159
160 return base;
161 }
162
rdgsbase(void)163 static inline unsigned long rdgsbase(void)
164 {
165 unsigned long base;
166
167 if ( cpu_has_fsgsbase )
168 return __rdgsbase();
169
170 rdmsrl(MSR_GS_BASE, base);
171
172 return base;
173 }
174
wrfsbase(unsigned long base)175 static inline void wrfsbase(unsigned long base)
176 {
177 if ( cpu_has_fsgsbase )
178 #ifdef HAVE_GAS_FSGSBASE
179 asm volatile ( "wrfsbase %0" :: "r" (base) );
180 #else
181 asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd0" :: "a" (base) );
182 #endif
183 else
184 wrmsrl(MSR_FS_BASE, base);
185 }
186
wrgsbase(unsigned long base)187 static inline void wrgsbase(unsigned long base)
188 {
189 if ( cpu_has_fsgsbase )
190 #ifdef HAVE_GAS_FSGSBASE
191 asm volatile ( "wrgsbase %0" :: "r" (base) );
192 #else
193 asm volatile ( ".byte 0xf3, 0x48, 0x0f, 0xae, 0xd8" :: "a" (base) );
194 #endif
195 else
196 wrmsrl(MSR_GS_BASE, base);
197 }
198
199 DECLARE_PER_CPU(u64, efer);
200 u64 read_efer(void);
201 void write_efer(u64 val);
202
203 DECLARE_PER_CPU(u32, ler_msr);
204
205 /* MSR policy object for shared per-domain MSRs */
206 struct msr_domain_policy
207 {
208 /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
209 struct {
210 bool available; /* This MSR is non-architectural */
211 bool cpuid_faulting;
212 } plaform_info;
213 };
214
215 /* MSR policy object for per-vCPU MSRs */
216 struct msr_vcpu_policy
217 {
218 /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
219 struct {
220 bool available; /* This MSR is non-architectural */
221 bool cpuid_faulting;
222 } misc_features_enables;
223 };
224
225 void init_guest_msr_policy(void);
226 int init_domain_msr_policy(struct domain *d);
227 int init_vcpu_msr_policy(struct vcpu *v);
228
229 /*
230 * Below functions can return X86EMUL_UNHANDLEABLE which means that MSR is
231 * not (yet) handled by it and must be processed by legacy handlers. Such
232 * behaviour is needed for transition period until all rd/wrmsr are handled
233 * by the new MSR infrastructure.
234 *
235 * These functions are also used by the migration logic, so need to cope with
236 * being used outside of v's context.
237 */
238 int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val);
239 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val);
240
241 #endif /* !__ASSEMBLY__ */
242
243 #endif /* __ASM_MSR_H */
244