1 /******************************************************************************
2 * arch/x86/msr.c
3 *
4 * Policy objects for Model-Specific Registers.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; If not, see <http://www.gnu.org/licenses/>.
18 *
19 * Copyright (c) 2017 Citrix Systems Ltd.
20 */
21
22 #include <xen/init.h>
23 #include <xen/lib.h>
24 #include <xen/sched.h>
25 #include <asm/msr.h>
26
27 struct msr_domain_policy __read_mostly hvm_max_msr_domain_policy,
28 __read_mostly pv_max_msr_domain_policy;
29
30 struct msr_vcpu_policy __read_mostly hvm_max_msr_vcpu_policy,
31 __read_mostly pv_max_msr_vcpu_policy;
32
calculate_hvm_max_policy(void)33 static void __init calculate_hvm_max_policy(void)
34 {
35 struct msr_domain_policy *dp = &hvm_max_msr_domain_policy;
36 struct msr_vcpu_policy *vp = &hvm_max_msr_vcpu_policy;
37
38 if ( !hvm_enabled )
39 return;
40
41 /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
42 if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ||
43 boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
44 {
45 dp->plaform_info.available = true;
46 dp->plaform_info.cpuid_faulting = true;
47 }
48
49 /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
50 vp->misc_features_enables.available = dp->plaform_info.available;
51 }
52
calculate_pv_max_policy(void)53 static void __init calculate_pv_max_policy(void)
54 {
55 struct msr_domain_policy *dp = &pv_max_msr_domain_policy;
56 struct msr_vcpu_policy *vp = &pv_max_msr_vcpu_policy;
57
58 /* 0x000000ce MSR_INTEL_PLATFORM_INFO */
59 if ( cpu_has_cpuid_faulting )
60 {
61 dp->plaform_info.available = true;
62 dp->plaform_info.cpuid_faulting = true;
63 }
64
65 /* 0x00000140 MSR_INTEL_MISC_FEATURES_ENABLES */
66 vp->misc_features_enables.available = dp->plaform_info.available;
67 }
68
init_guest_msr_policy(void)69 void __init init_guest_msr_policy(void)
70 {
71 calculate_hvm_max_policy();
72 calculate_pv_max_policy();
73 }
74
init_domain_msr_policy(struct domain * d)75 int init_domain_msr_policy(struct domain *d)
76 {
77 struct msr_domain_policy *dp;
78
79 dp = xmalloc(struct msr_domain_policy);
80
81 if ( !dp )
82 return -ENOMEM;
83
84 *dp = is_pv_domain(d) ? pv_max_msr_domain_policy :
85 hvm_max_msr_domain_policy;
86
87 /* See comment in intel_ctxt_switch_levelling() */
88 if ( is_control_domain(d) )
89 {
90 dp->plaform_info.available = false;
91 dp->plaform_info.cpuid_faulting = false;
92 }
93
94 d->arch.msr = dp;
95
96 return 0;
97 }
98
init_vcpu_msr_policy(struct vcpu * v)99 int init_vcpu_msr_policy(struct vcpu *v)
100 {
101 struct domain *d = v->domain;
102 struct msr_vcpu_policy *vp;
103
104 vp = xmalloc(struct msr_vcpu_policy);
105
106 if ( !vp )
107 return -ENOMEM;
108
109 *vp = is_pv_domain(d) ? pv_max_msr_vcpu_policy :
110 hvm_max_msr_vcpu_policy;
111
112 /* See comment in intel_ctxt_switch_levelling() */
113 if ( is_control_domain(d) )
114 vp->misc_features_enables.available = false;
115
116 v->arch.msr = vp;
117
118 return 0;
119 }
120
guest_rdmsr(const struct vcpu * v,uint32_t msr,uint64_t * val)121 int guest_rdmsr(const struct vcpu *v, uint32_t msr, uint64_t *val)
122 {
123 const struct msr_domain_policy *dp = v->domain->arch.msr;
124 const struct msr_vcpu_policy *vp = v->arch.msr;
125
126 switch ( msr )
127 {
128 case MSR_INTEL_PLATFORM_INFO:
129 if ( !dp->plaform_info.available )
130 goto gp_fault;
131 *val = (uint64_t)dp->plaform_info.cpuid_faulting <<
132 _MSR_PLATFORM_INFO_CPUID_FAULTING;
133 break;
134
135 case MSR_INTEL_MISC_FEATURES_ENABLES:
136 if ( !vp->misc_features_enables.available )
137 goto gp_fault;
138 *val = (uint64_t)vp->misc_features_enables.cpuid_faulting <<
139 _MSR_MISC_FEATURES_CPUID_FAULTING;
140 break;
141
142 default:
143 return X86EMUL_UNHANDLEABLE;
144 }
145
146 return X86EMUL_OKAY;
147
148 gp_fault:
149 return X86EMUL_EXCEPTION;
150 }
151
guest_wrmsr(struct vcpu * v,uint32_t msr,uint64_t val)152 int guest_wrmsr(struct vcpu *v, uint32_t msr, uint64_t val)
153 {
154 const struct vcpu *curr = current;
155 struct domain *d = v->domain;
156 struct msr_domain_policy *dp = d->arch.msr;
157 struct msr_vcpu_policy *vp = v->arch.msr;
158
159 switch ( msr )
160 {
161 case MSR_INTEL_PLATFORM_INFO:
162 goto gp_fault;
163
164 case MSR_INTEL_MISC_FEATURES_ENABLES:
165 {
166 uint64_t rsvd = ~0ull;
167 bool old_cpuid_faulting = vp->misc_features_enables.cpuid_faulting;
168
169 if ( !vp->misc_features_enables.available )
170 goto gp_fault;
171
172 if ( dp->plaform_info.cpuid_faulting )
173 rsvd &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
174
175 if ( val & rsvd )
176 goto gp_fault;
177
178 vp->misc_features_enables.cpuid_faulting =
179 val & MSR_MISC_FEATURES_CPUID_FAULTING;
180
181 if ( v == curr && is_hvm_domain(d) && cpu_has_cpuid_faulting &&
182 (old_cpuid_faulting ^ vp->misc_features_enables.cpuid_faulting) )
183 ctxt_switch_levelling(v);
184 break;
185 }
186
187 default:
188 return X86EMUL_UNHANDLEABLE;
189 }
190
191 return X86EMUL_OKAY;
192
193 gp_fault:
194 return X86EMUL_EXCEPTION;
195 }
196
197 /*
198 * Local variables:
199 * mode: C
200 * c-file-style: "BSD"
201 * c-basic-offset: 4
202 * tab-width: 4
203 * indent-tabs-mode: nil
204 * End:
205 */
206