1 #ifndef __X86_CPUID_H__
2 #define __X86_CPUID_H__
3 
4 #include <asm/cpufeatureset.h>
5 
6 #ifndef __ASSEMBLY__
7 #include <xen/types.h>
8 #include <xen/kernel.h>
9 #include <xen/percpu.h>
10 
11 #include <public/sysctl.h>
12 
13 extern const uint32_t known_features[FSCAPINTS];
14 
15 /*
16  * Expected levelling capabilities (given cpuid vendor/family information),
17  * and levelling capabilities actually available (given MSR probing).
18  */
19 #define LCAP_faulting XEN_SYSCTL_CPU_LEVELCAP_faulting
20 #define LCAP_1cd      (XEN_SYSCTL_CPU_LEVELCAP_ecx |        \
21                        XEN_SYSCTL_CPU_LEVELCAP_edx)
22 #define LCAP_e1cd     (XEN_SYSCTL_CPU_LEVELCAP_extd_ecx |   \
23                        XEN_SYSCTL_CPU_LEVELCAP_extd_edx)
24 #define LCAP_Da1      XEN_SYSCTL_CPU_LEVELCAP_xsave_eax
25 #define LCAP_6c       XEN_SYSCTL_CPU_LEVELCAP_thermal_ecx
26 #define LCAP_7ab0     (XEN_SYSCTL_CPU_LEVELCAP_l7s0_eax |   \
27                        XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx)
28 extern unsigned int expected_levelling_cap, levelling_caps;
29 
30 struct cpuidmasks
31 {
32     uint64_t _1cd;
33     uint64_t e1cd;
34     uint64_t Da1;
35     uint64_t _6c;
36     uint64_t _7ab0;
37 };
38 
39 /* Per CPU shadows of masking MSR values, for lazy context switching. */
40 DECLARE_PER_CPU(struct cpuidmasks, cpuidmasks);
41 
42 /* Default masking MSR values, calculated at boot. */
43 extern struct cpuidmasks cpuidmask_defaults;
44 
45 /* Check that all previously present features are still available. */
46 bool recheck_cpu_features(unsigned int cpu);
47 
48 struct vcpu;
49 struct cpuid_leaf;
50 void guest_cpuid(const struct vcpu *v, uint32_t leaf,
51                  uint32_t subleaf, struct cpuid_leaf *res);
52 
53 #endif /* __ASSEMBLY__ */
54 #endif /* !__X86_CPUID_H__ */
55 
56 /*
57  * Local variables:
58  * mode: C
59  * c-file-style: "BSD"
60  * c-basic-offset: 4
61  * tab-width: 4
62  * indent-tabs-mode: nil
63  * End:
64  */
65