1 /*
2 * Copyright (C) 2018-2022 Intel Corporation.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <types.h>
8 #include <asm/msr.h>
9 #include <asm/page.h>
10 #include <asm/cpufeatures.h>
11 #include <asm/cpuid.h>
12 #include <asm/cpu.h>
13 #include <asm/per_cpu.h>
14 #include <asm/vmx.h>
15 #include <asm/cpu_caps.h>
16 #include <errno.h>
17 #include <logmsg.h>
18 #include <asm/guest/vmcs.h>
19
20 /* TODO: add more capability per requirement */
21 /* APICv features */
22 #define VAPIC_FEATURE_VIRT_ACCESS (1U << 0U)
23 #define VAPIC_FEATURE_VIRT_REG (1U << 1U)
24 #define VAPIC_FEATURE_INTR_DELIVERY (1U << 2U)
25 #define VAPIC_FEATURE_TPR_SHADOW (1U << 3U)
26 #define VAPIC_FEATURE_POST_INTR (1U << 4U)
27 #define VAPIC_FEATURE_VX2APIC_MODE (1U << 5U)
28
29 /* BASIC features: must supported by the physical platform and will enabled by default */
30 #define APICV_BASIC_FEATURE (VAPIC_FEATURE_TPR_SHADOW | VAPIC_FEATURE_VIRT_ACCESS | VAPIC_FEATURE_VX2APIC_MODE)
31 /* ADVANCED features: enable them by default if the physical platform support them all, otherwise, disable them all */
32 #define APICV_ADVANCED_FEATURE (VAPIC_FEATURE_VIRT_REG | VAPIC_FEATURE_INTR_DELIVERY | VAPIC_FEATURE_POST_INTR)
33
34 static struct cpu_capability {
35 uint8_t apicv_features;
36 uint8_t ept_features;
37
38 uint64_t vmx_ept_vpid;
39 uint32_t core_caps; /* value of MSR_IA32_CORE_CAPABLITIES */
40 } cpu_caps;
41
42 static struct cpuinfo_x86 boot_cpu_data;
43
44 struct vmx_capability {
45 uint32_t msr;
46 uint32_t bits;
47 };
48
49 /* SDM APPENDIX A:
50 * Bits 31:0 indicate the allowed 0-settings of these controls. VM entry allows control X
51 * to be 0 if bit X in the MSR is cleared to 0; if bit X in the MSR is set to 1,
52 * VM entry fails if control X is 0.
53 * Bits 63:32 indicate the allowed 1-settings of these controls. VM entry allows control X to be 1
54 * if bit 32+X in the MSR is set to 1; if bit 32+X in the MSR is cleared to 0, VM entry fails if control X is 1.
55 */
56 static struct vmx_capability vmx_caps[] = {
57 {
58 MSR_IA32_VMX_PINBASED_CTLS, VMX_PINBASED_CTLS_IRQ_EXIT
59 },
60 {
61 MSR_IA32_VMX_PROCBASED_CTLS, VMX_PROCBASED_CTLS_TSC_OFF | VMX_PROCBASED_CTLS_TPR_SHADOW |
62 VMX_PROCBASED_CTLS_IO_BITMAP | VMX_PROCBASED_CTLS_MSR_BITMAP |
63 VMX_PROCBASED_CTLS_HLT | VMX_PROCBASED_CTLS_SECONDARY
64 },
65 {
66 MSR_IA32_VMX_PROCBASED_CTLS2, VMX_PROCBASED_CTLS2_VAPIC | VMX_PROCBASED_CTLS2_EPT |
67 VMX_PROCBASED_CTLS2_VPID | VMX_PROCBASED_CTLS2_RDTSCP |
68 VMX_PROCBASED_CTLS2_UNRESTRICT
69 },
70 {
71 MSR_IA32_VMX_EXIT_CTLS, VMX_EXIT_CTLS_ACK_IRQ | VMX_EXIT_CTLS_SAVE_PAT |
72 VMX_EXIT_CTLS_LOAD_PAT | VMX_EXIT_CTLS_HOST_ADDR64
73 },
74 {
75 MSR_IA32_VMX_ENTRY_CTLS, VMX_ENTRY_CTLS_LOAD_PAT | VMX_ENTRY_CTLS_IA32E_MODE
76 }
77 };
78
pcpu_has_cap(uint32_t bit)79 bool pcpu_has_cap(uint32_t bit)
80 {
81 uint32_t feat_idx = bit >> 5U;
82 uint32_t feat_bit = bit & 0x1fU;
83 bool ret;
84
85 if (feat_idx >= FEATURE_WORDS) {
86 ret = false;
87 } else {
88 ret = ((boot_cpu_data.cpuid_leaves[feat_idx] & (1U << feat_bit)) != 0U);
89 }
90
91 return ret;
92 }
93
has_monitor_cap(void)94 bool has_monitor_cap(void)
95 {
96 bool ret = false;
97
98 if (pcpu_has_cap(X86_FEATURE_MONITOR)) {
99 /* don't use monitor for CPU (family: 0x6 model: 0x5c)
100 * in hypervisor, but still expose it to the guests and
101 * let them handle it correctly
102 */
103 if (!is_apl_platform()) {
104 ret = true;
105 }
106 }
107
108 return ret;
109 }
110
disable_host_monitor_wait(void)111 bool disable_host_monitor_wait(void)
112 {
113 bool ret = true;
114 uint32_t eax = 0U, ebx = 0U, ecx = 0U, edx = 0U;
115
116 cpuid_subleaf(0x1U, 0x0U, &eax, &ebx, &ecx, &edx);
117 if ((ecx & CPUID_ECX_MONITOR) != 0U) {
118 /* According to SDM Vol4 2.1 Table 2-2,
119 * update on 'MSR_IA32_MISC_ENABLE_MONITOR_ENA' bit
120 * is not allowed if the SSE3 feature flag is set to 0.
121 */
122 if ((ecx & CPUID_ECX_SSE3) != 0U) {
123 msr_write(MSR_IA32_MISC_ENABLE, (msr_read(MSR_IA32_MISC_ENABLE) &
124 ~MSR_IA32_MISC_ENABLE_MONITOR_ENA));
125
126 /* Update cpuid_leaves of boot_cpu_data to
127 * refresh 'has_monitor_cap' state.
128 */
129 if (has_monitor_cap()) {
130 cpuid_subleaf(CPUID_FEATURES, 0x0U, &eax, &ebx,
131 &boot_cpu_data.cpuid_leaves[FEAT_1_ECX],
132 &boot_cpu_data.cpuid_leaves[FEAT_1_EDX]);
133 }
134
135 } else {
136 ret = false;
137 }
138 }
139 return ret;
140 }
141
is_fast_string_erms_supported_and_enabled(void)142 static inline bool is_fast_string_erms_supported_and_enabled(void)
143 {
144 bool ret = false;
145 uint64_t misc_enable = msr_read(MSR_IA32_MISC_ENABLE);
146
147 if ((misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) == 0UL) {
148 pr_fatal("%s, fast string is not enabled\n", __func__);
149 } else {
150 if (!pcpu_has_cap(X86_FEATURE_ERMS)) {
151 pr_fatal("%s, enhanced rep movsb/stosb not supported\n", __func__);
152 } else {
153 ret = true;
154 }
155 }
156
157 return ret;
158 }
159
160 /*check allowed ONEs setting in vmx control*/
is_ctrl_setting_allowed(uint64_t msr_val,uint32_t ctrl)161 static bool is_ctrl_setting_allowed(uint64_t msr_val, uint32_t ctrl)
162 {
163 /*
164 * Intel SDM Appendix A.3
165 * - bitX in ctrl can be set 1
166 * only if bit 32+X in msr_val is 1
167 */
168 return ((((uint32_t)(msr_val >> 32UL)) & ctrl) == ctrl);
169 }
170
is_apl_platform(void)171 bool is_apl_platform(void)
172 {
173 bool ret = false;
174
175 if ((boot_cpu_data.displayfamily == 0x6U) && (boot_cpu_data.displaymodel == 0x5cU)) {
176 ret = true;
177 }
178
179 return ret;
180 }
181
has_core_cap(uint32_t bit_mask)182 bool has_core_cap(uint32_t bit_mask)
183 {
184 return ((cpu_caps.core_caps & bit_mask) != 0U);
185 }
186
is_ac_enabled(void)187 bool is_ac_enabled(void)
188 {
189 bool ac_enabled = false;
190
191 if (has_core_cap(CORE_CAP_SPLIT_LOCK) && ((msr_read(MSR_TEST_CTL) & MSR_TEST_CTL_AC_SPLITLOCK) != 0UL)) {
192 ac_enabled = true;
193 }
194
195 return ac_enabled;
196 }
197
is_gp_enabled(void)198 bool is_gp_enabled(void)
199 {
200 bool gp_enabled = false;
201
202 if (has_core_cap(CORE_CAP_UC_LOCK) && ((msr_read(MSR_TEST_CTL) & MSR_TEST_CTL_GP_UCLOCK) != 0UL)) {
203 gp_enabled = true;
204 }
205
206 return gp_enabled;
207 }
208
209
detect_ept_cap(void)210 static void detect_ept_cap(void)
211 {
212 uint64_t msr_val;
213
214 cpu_caps.ept_features = 0U;
215
216 /* Read primary processor based VM control. */
217 msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
218
219 /*
220 * According to SDM A.3.2 Primary Processor-Based VM-Execution Controls:
221 * The IA32_VMX_PROCBASED_CTLS MSR (index 482H) reports on the allowed
222 * settings of most of the primary processor-based VM-execution controls
223 * (see Section 24.6.2):
224 * Bits 63:32 indicate the allowed 1-settings of these controls.
225 * VM entry allows control X to be 1 if bit 32+X in the MSR is set to 1;
226 * if bit 32+X in the MSR is cleared to 0, VM entry fails if control X
227 * is 1.
228 */
229 msr_val = msr_val >> 32U;
230
231 /* Check if secondary processor based VM control is available. */
232 if ((msr_val & VMX_PROCBASED_CTLS_SECONDARY) != 0UL) {
233 /* Read secondary processor based VM control. */
234 msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
235
236 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_EPT)) {
237 cpu_caps.ept_features = 1U;
238 }
239 }
240 }
241
detect_apicv_cap(void)242 static void detect_apicv_cap(void)
243 {
244 uint8_t features = 0U;
245 uint64_t msr_val;
246
247 msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS);
248 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS_TPR_SHADOW)) {
249 features |= VAPIC_FEATURE_TPR_SHADOW;
250 }
251
252 msr_val = msr_read(MSR_IA32_VMX_PROCBASED_CTLS2);
253 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC)) {
254 features |= VAPIC_FEATURE_VIRT_ACCESS;
255 }
256 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VX2APIC)) {
257 features |= VAPIC_FEATURE_VX2APIC_MODE;
258 }
259 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VAPIC_REGS)) {
260 features |= VAPIC_FEATURE_VIRT_REG;
261 }
262 if (is_ctrl_setting_allowed(msr_val, VMX_PROCBASED_CTLS2_VIRQ)) {
263 features |= VAPIC_FEATURE_INTR_DELIVERY;
264 }
265
266 msr_val = msr_read(MSR_IA32_VMX_PINBASED_CTLS);
267 if (is_ctrl_setting_allowed(msr_val, VMX_PINBASED_CTLS_POST_IRQ)) {
268 features |= VAPIC_FEATURE_POST_INTR;
269 }
270
271 cpu_caps.apicv_features = features;
272
273 vlapic_set_apicv_ops();
274 }
275
detect_vmx_mmu_cap(void)276 static void detect_vmx_mmu_cap(void)
277 {
278 /* Read the MSR register of EPT and VPID Capability - SDM A.10 */
279 cpu_caps.vmx_ept_vpid = msr_read(MSR_IA32_VMX_EPT_VPID_CAP);
280 }
281
pcpu_vmx_set_32bit_addr_width(void)282 static bool pcpu_vmx_set_32bit_addr_width(void)
283 {
284 return ((msr_read(MSR_IA32_VMX_BASIC) & MSR_IA32_VMX_BASIC_ADDR_WIDTH) != 0UL);
285 }
286
detect_xsave_cap(void)287 static void detect_xsave_cap(void)
288 {
289 uint32_t unused;
290
291 cpuid_subleaf(CPUID_XSAVE_FEATURES, 0x0U,
292 &boot_cpu_data.cpuid_leaves[FEAT_D_0_EAX],
293 &unused,
294 &unused,
295 &boot_cpu_data.cpuid_leaves[FEAT_D_0_EDX]);
296 cpuid_subleaf(CPUID_XSAVE_FEATURES, 1U,
297 &boot_cpu_data.cpuid_leaves[FEAT_D_1_EAX],
298 &unused,
299 &boot_cpu_data.cpuid_leaves[FEAT_D_1_ECX],
300 &boot_cpu_data.cpuid_leaves[FEAT_D_1_EDX]);
301 }
302
detect_core_caps(void)303 static void detect_core_caps(void)
304 {
305 if (pcpu_has_cap(X86_FEATURE_CORE_CAP)) {
306 cpu_caps.core_caps = (uint32_t)msr_read(MSR_IA32_CORE_CAPABILITIES);
307 }
308 }
309
detect_pcpu_cap(void)310 static void detect_pcpu_cap(void)
311 {
312 detect_apicv_cap();
313 detect_ept_cap();
314 detect_vmx_mmu_cap();
315 detect_xsave_cap();
316 detect_core_caps();
317 }
318
get_address_mask(uint8_t limit)319 static uint64_t get_address_mask(uint8_t limit)
320 {
321 return ((1UL << limit) - 1UL) & PAGE_MASK;
322 }
323
init_pcpu_capabilities(void)324 void init_pcpu_capabilities(void)
325 {
326 uint32_t eax, unused;
327 uint32_t family_id, model_id, displayfamily, displaymodel;
328
329 cpuid_subleaf(CPUID_VENDORSTRING, 0x0U,
330 &boot_cpu_data.cpuid_level,
331 &unused, &unused, &unused);
332
333 cpuid_subleaf(CPUID_FEATURES, 0x0U, &eax, &unused,
334 &boot_cpu_data.cpuid_leaves[FEAT_1_ECX],
335 &boot_cpu_data.cpuid_leaves[FEAT_1_EDX]);
336
337 /* SDM Vol.2A 3-211 states the algorithm to calculate DisplayFamily and DisplayModel */
338 family_id = (eax >> 8U) & 0xfU;
339 displayfamily = family_id;
340 if (family_id == 0xFU) {
341 displayfamily += ((eax >> 20U) & 0xffU);
342 }
343 boot_cpu_data.displayfamily = (uint8_t)displayfamily;
344
345 model_id = (eax >> 4U) & 0xfU;
346 displaymodel = model_id;
347 if ((family_id == 0x06U) || (family_id == 0xFU)) {
348 displaymodel += ((eax >> 16U) & 0xfU) << 4U;
349 }
350 boot_cpu_data.displaymodel = (uint8_t)displaymodel;
351
352
353 cpuid_subleaf(CPUID_EXTEND_FEATURE, 0x0U, &unused,
354 &boot_cpu_data.cpuid_leaves[FEAT_7_0_EBX],
355 &boot_cpu_data.cpuid_leaves[FEAT_7_0_ECX],
356 &boot_cpu_data.cpuid_leaves[FEAT_7_0_EDX]);
357
358 cpuid_subleaf(CPUID_EXTEND_FEATURE, 0x2U, &unused, &unused, &unused,
359 &boot_cpu_data.cpuid_leaves[FEAT_7_2_EDX]);
360
361 cpuid_subleaf(CPUID_MAX_EXTENDED_FUNCTION, 0x0U,
362 &boot_cpu_data.extended_cpuid_level,
363 &unused, &unused, &unused);
364
365 if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_FUNCTION_1) {
366 cpuid_subleaf(CPUID_EXTEND_FUNCTION_1, 0x0U, &unused, &unused,
367 &boot_cpu_data.cpuid_leaves[FEAT_8000_0001_ECX],
368 &boot_cpu_data.cpuid_leaves[FEAT_8000_0001_EDX]);
369 }
370
371 if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_INVA_TSC) {
372 cpuid_subleaf(CPUID_EXTEND_INVA_TSC, 0x0U, &eax, &unused, &unused,
373 &boot_cpu_data.cpuid_leaves[FEAT_8000_0007_EDX]);
374 }
375
376 if (boot_cpu_data.extended_cpuid_level >= CPUID_EXTEND_ADDRESS_SIZE) {
377 cpuid_subleaf(CPUID_EXTEND_ADDRESS_SIZE, 0x0U, &eax,
378 &boot_cpu_data.cpuid_leaves[FEAT_8000_0008_EBX],
379 &unused, &unused);
380
381 /* EAX bits 07-00: #Physical Address Bits
382 * bits 15-08: #Linear Address Bits
383 */
384 boot_cpu_data.virt_bits = (uint8_t)((eax >> 8U) & 0xffU);
385 boot_cpu_data.phys_bits = (uint8_t)(eax & 0xffU);
386 boot_cpu_data.physical_address_mask =
387 get_address_mask(boot_cpu_data.phys_bits);
388 }
389
390 detect_pcpu_cap();
391 }
392
is_ept_supported(void)393 static bool is_ept_supported(void)
394 {
395 return (cpu_caps.ept_features != 0U);
396 }
397
is_apicv_basic_feature_supported(void)398 static inline bool is_apicv_basic_feature_supported(void)
399 {
400 return ((cpu_caps.apicv_features & APICV_BASIC_FEATURE) == APICV_BASIC_FEATURE);
401 }
402
is_apicv_advanced_feature_supported(void)403 bool is_apicv_advanced_feature_supported(void)
404 {
405 return ((cpu_caps.apicv_features & APICV_ADVANCED_FEATURE) == APICV_ADVANCED_FEATURE);
406 }
407
pcpu_has_vmx_ept_vpid_cap(uint64_t bit_mask)408 bool pcpu_has_vmx_ept_vpid_cap(uint64_t bit_mask)
409 {
410 return ((cpu_caps.vmx_ept_vpid & bit_mask) != 0U);
411 }
412
init_pcpu_model_name(void)413 void init_pcpu_model_name(void)
414 {
415 cpuid_subleaf(CPUID_EXTEND_FUNCTION_2, 0x0U,
416 (uint32_t *)(boot_cpu_data.model_name),
417 (uint32_t *)(&boot_cpu_data.model_name[4]),
418 (uint32_t *)(&boot_cpu_data.model_name[8]),
419 (uint32_t *)(&boot_cpu_data.model_name[12]));
420 cpuid_subleaf(CPUID_EXTEND_FUNCTION_3, 0x0U,
421 (uint32_t *)(&boot_cpu_data.model_name[16]),
422 (uint32_t *)(&boot_cpu_data.model_name[20]),
423 (uint32_t *)(&boot_cpu_data.model_name[24]),
424 (uint32_t *)(&boot_cpu_data.model_name[28]));
425 cpuid_subleaf(CPUID_EXTEND_FUNCTION_4, 0x0U,
426 (uint32_t *)(&boot_cpu_data.model_name[32]),
427 (uint32_t *)(&boot_cpu_data.model_name[36]),
428 (uint32_t *)(&boot_cpu_data.model_name[40]),
429 (uint32_t *)(&boot_cpu_data.model_name[44]));
430
431 boot_cpu_data.model_name[48] = '\0';
432 }
433
is_vmx_disabled(void)434 static inline bool is_vmx_disabled(void)
435 {
436 uint64_t msr_val;
437 bool ret = false;
438
439 /* Read Feature ControL MSR */
440 msr_val = msr_read(MSR_IA32_FEATURE_CONTROL);
441
442 /* Check if feature control is locked and vmx cannot be enabled */
443 if (((msr_val & MSR_IA32_FEATURE_CONTROL_LOCK) != 0U) &&
444 ((msr_val & MSR_IA32_FEATURE_CONTROL_VMX_NO_SMX) == 0U)) {
445 ret = true;
446 }
447
448 return ret;
449 }
450
pcpu_has_vmx_unrestricted_guest_cap(void)451 static inline bool pcpu_has_vmx_unrestricted_guest_cap(void)
452 {
453 return ((msr_read(MSR_IA32_VMX_MISC) & MSR_IA32_MISC_UNRESTRICTED_GUEST) != 0UL);
454 }
455
check_vmx_mmu_cap(void)456 static int32_t check_vmx_mmu_cap(void)
457 {
458 int32_t ret = 0;
459
460 if (!pcpu_has_vmx_ept_vpid_cap(VMX_EPT_INVEPT)) {
461 printf("%s, invept not supported\n", __func__);
462 ret = -ENODEV;
463 } else if (!pcpu_has_vmx_ept_vpid_cap(VMX_VPID_INVVPID) ||
464 !pcpu_has_vmx_ept_vpid_cap(VMX_VPID_INVVPID_SINGLE_CONTEXT) ||
465 !pcpu_has_vmx_ept_vpid_cap(VMX_VPID_INVVPID_GLOBAL_CONTEXT)) {
466 printf("%s, invvpid not supported\n", __func__);
467 ret = -ENODEV;
468 } else if (!pcpu_has_vmx_ept_vpid_cap(VMX_EPT_2MB_PAGE)) {
469 printf("%s, ept not support 2MB large page\n", __func__);
470 ret = -ENODEV;
471 } else {
472 /* No other state currently, do nothing */
473 }
474
475 return ret;
476 }
477
is_vmx_cap_supported(uint32_t msr,uint32_t bits)478 static bool is_vmx_cap_supported(uint32_t msr, uint32_t bits)
479 {
480 uint64_t vmx_msr;
481 uint32_t vmx_msr_low, vmx_msr_high;
482
483 vmx_msr = msr_read(msr);
484 vmx_msr_low = (uint32_t)vmx_msr;
485 vmx_msr_high = (uint32_t)(vmx_msr >> 32U);
486 /* Bits 31:0 indicate the allowed 0-settings
487 * Bits 63:32 indicate the allowed 1-settings
488 */
489 return (((vmx_msr_high & bits) == bits) && ((vmx_msr_low & bits) == 0U));
490 }
491
check_essential_vmx_caps(void)492 static int32_t check_essential_vmx_caps(void)
493 {
494 int32_t ret = 0;
495 uint32_t i;
496
497 if (check_vmx_mmu_cap() != 0) {
498 ret = -ENODEV;
499 } else if (!pcpu_has_vmx_unrestricted_guest_cap()) {
500 printf("%s, unrestricted guest not supported\n", __func__);
501 ret = -ENODEV;
502 } else if (pcpu_vmx_set_32bit_addr_width()) {
503 printf("%s, Only support Intel 64 architecture.\n", __func__);
504 ret = -ENODEV;
505 } else {
506 for (i = 0U; i < ARRAY_SIZE(vmx_caps); i++) {
507 if (!is_vmx_cap_supported(vmx_caps[i].msr, vmx_caps[i].bits)) {
508 printf("%s, check MSR[0x%x]:0x%lx bits:0x%x failed\n", __func__,
509 vmx_caps[i].msr, msr_read(vmx_caps[i].msr), vmx_caps[i].bits);
510 ret = -ENODEV;
511 break;
512 }
513 }
514 }
515
516 return ret;
517 }
518
519 /*
520 * basic hardware capability check
521 * we should supplement which feature/capability we must support
522 * here later.
523 */
detect_hardware_support(void)524 int32_t detect_hardware_support(void)
525 {
526 int32_t ret;
527
528 /* Long Mode (x86-64, 64-bit support) */
529 if (!pcpu_has_cap(X86_FEATURE_LM)) {
530 printf("%s, LM not supported\n", __func__);
531 ret = -ENODEV;
532 } else if ((boot_cpu_data.phys_bits == 0U) ||
533 (boot_cpu_data.virt_bits == 0U)) {
534 printf("%s, can't detect Linear/Physical Address size\n", __func__);
535 ret = -ENODEV;
536 } else if (boot_cpu_data.phys_bits > MAXIMUM_PA_WIDTH) {
537 printf("%s, physical-address width (%d) over maximum physical-address width (%d)\n",
538 __func__, boot_cpu_data.phys_bits, MAXIMUM_PA_WIDTH);
539 ret = -ENODEV;
540 } else if ((boot_cpu_data.phys_bits > 39U) && (!pcpu_has_cap(X86_FEATURE_PAGE1GB) ||
541 !pcpu_has_vmx_ept_vpid_cap(VMX_EPT_1GB_PAGE))) {
542 printf("%s, physical-address width %d over 39 bits must support 1GB large page\n",
543 __func__, boot_cpu_data.phys_bits);
544 ret = -ENODEV;
545 } else if (!pcpu_has_cap(X86_FEATURE_INVA_TSC)) {
546 /* check invariant TSC */
547 printf("%s, invariant TSC not supported\n", __func__);
548 ret = -ENODEV;
549 } else if (!pcpu_has_cap(X86_FEATURE_TSC_DEADLINE)) {
550 /* lapic TSC deadline timer */
551 printf("%s, TSC deadline not supported\n", __func__);
552 ret = -ENODEV;
553 } else if (!pcpu_has_cap(X86_FEATURE_NX)) {
554 /* Execute Disable */
555 printf("%s, NX not supported\n", __func__);
556 ret = -ENODEV;
557 } else if (!pcpu_has_cap(X86_FEATURE_SMEP)) {
558 /* Supervisor-Mode Execution Prevention */
559 printf("%s, SMEP not supported\n", __func__);
560 ret = -ENODEV;
561 } else if (!pcpu_has_cap(X86_FEATURE_SMAP)) {
562 /* Supervisor-Mode Access Prevention */
563 printf("%s, SMAP not supported\n", __func__);
564 ret = -ENODEV;
565 } else if (!pcpu_has_cap(X86_FEATURE_MTRR)) {
566 printf("%s, MTRR not supported\n", __func__);
567 ret = -ENODEV;
568 } else if (!pcpu_has_cap(X86_FEATURE_CLFLUSHOPT)) {
569 printf("%s, CLFLUSHOPT not supported\n", __func__);
570 ret = -ENODEV;
571 } else if (!pcpu_has_cap(X86_FEATURE_VMX)) {
572 printf("%s, vmx not supported\n", __func__);
573 ret = -ENODEV;
574 } else if (!is_fast_string_erms_supported_and_enabled()) {
575 ret = -ENODEV;
576 } else if (!is_ept_supported()) {
577 printf("%s, EPT not supported\n", __func__);
578 ret = -ENODEV;
579 } else if (!is_apicv_basic_feature_supported()) {
580 printf("%s, APICV not supported\n", __func__);
581 ret = -ENODEV;
582 } else if (boot_cpu_data.cpuid_level < 0x15U) {
583 printf("%s, required CPU feature not supported\n", __func__);
584 ret = -ENODEV;
585 } else if (is_vmx_disabled()) {
586 printf("%s, VMX can not be enabled\n", __func__);
587 ret = -ENODEV;
588 } else if (!pcpu_has_cap(X86_FEATURE_X2APIC)) {
589 printf("%s, x2APIC not supported\n", __func__);
590 ret = -ENODEV;
591 } else if (!pcpu_has_cap(X86_FEATURE_POPCNT)) {
592 printf("%s, popcnt instruction not supported\n", __func__);
593 ret = -ENODEV;
594 } else if (!pcpu_has_cap(X86_FEATURE_SSE)) {
595 printf("%s, SSE not supported\n", __func__);
596 ret = -ENODEV;
597 } else if (!pcpu_has_cap(X86_FEATURE_RDRAND)) {
598 printf("%s, RDRAND is not supported\n", __func__);
599 ret = -ENODEV;
600 } else {
601 ret = check_essential_vmx_caps();
602 }
603
604 return ret;
605 }
606
get_pcpu_info(void)607 struct cpuinfo_x86 *get_pcpu_info(void)
608 {
609 return &boot_cpu_data;
610 }
611