1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vmx_apic_access_test
4 *
5 * Copyright (C) 2020, Google LLC.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2.
8 *
9 * The first subtest simply checks to see that an L2 guest can be
10 * launched with a valid APIC-access address that is backed by a
11 * page of L1 physical memory.
12 *
13 * The second subtest sets the APIC-access address to a (valid) L1
14 * physical address that is not backed by memory. KVM can't handle
15 * this situation, so resuming L2 should result in a KVM exit for
16 * internal error (emulation). This is not an architectural
17 * requirement. It is just a shortcoming of KVM. The internal error
18 * is unfortunate, but it's better than what used to happen!
19 */
20
21 #include "test_util.h"
22 #include "kvm_util.h"
23 #include "processor.h"
24 #include "vmx.h"
25
26 #include <string.h>
27 #include <sys/ioctl.h>
28
29 #include "kselftest.h"
30
l2_guest_code(void)31 static void l2_guest_code(void)
32 {
33 /* Exit to L1 */
34 __asm__ __volatile__("vmcall");
35 }
36
l1_guest_code(struct vmx_pages * vmx_pages,unsigned long high_gpa)37 static void l1_guest_code(struct vmx_pages *vmx_pages, unsigned long high_gpa)
38 {
39 #define L2_GUEST_STACK_SIZE 64
40 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
41 uint32_t control;
42
43 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
44 GUEST_ASSERT(load_vmcs(vmx_pages));
45
46 /* Prepare the VMCS for L2 execution. */
47 prepare_vmcs(vmx_pages, l2_guest_code,
48 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
49 control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
50 control |= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
51 vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
52 control = vmreadz(SECONDARY_VM_EXEC_CONTROL);
53 control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
54 vmwrite(SECONDARY_VM_EXEC_CONTROL, control);
55 vmwrite(APIC_ACCESS_ADDR, vmx_pages->apic_access_gpa);
56
57 /* Try to launch L2 with the memory-backed APIC-access address. */
58 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
59 GUEST_ASSERT(!vmlaunch());
60 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
61
62 vmwrite(APIC_ACCESS_ADDR, high_gpa);
63
64 /* Try to resume L2 with the unbacked APIC-access address. */
65 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR));
66 GUEST_ASSERT(!vmresume());
67 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
68
69 GUEST_DONE();
70 }
71
main(int argc,char * argv[])72 int main(int argc, char *argv[])
73 {
74 unsigned long apic_access_addr = ~0ul;
75 vm_vaddr_t vmx_pages_gva;
76 unsigned long high_gpa;
77 struct vmx_pages *vmx;
78 bool done = false;
79
80 struct kvm_vcpu *vcpu;
81 struct kvm_vm *vm;
82
83 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
84
85 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
86
87 high_gpa = (vm->max_gfn - 1) << vm->page_shift;
88
89 vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
90 prepare_virtualize_apic_accesses(vmx, vm);
91 vcpu_args_set(vcpu, 2, vmx_pages_gva, high_gpa);
92
93 while (!done) {
94 volatile struct kvm_run *run = vcpu->run;
95 struct ucall uc;
96
97 vcpu_run(vcpu);
98 if (apic_access_addr == high_gpa) {
99 TEST_ASSERT(run->exit_reason ==
100 KVM_EXIT_INTERNAL_ERROR,
101 "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
102 run->exit_reason,
103 exit_reason_str(run->exit_reason));
104 TEST_ASSERT(run->internal.suberror ==
105 KVM_INTERNAL_ERROR_EMULATION,
106 "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
107 run->internal.suberror);
108 break;
109 }
110 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
111 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
112 run->exit_reason,
113 exit_reason_str(run->exit_reason));
114
115 switch (get_ucall(vcpu, &uc)) {
116 case UCALL_ABORT:
117 REPORT_GUEST_ASSERT(uc);
118 /* NOT REACHED */
119 case UCALL_SYNC:
120 apic_access_addr = uc.args[1];
121 break;
122 case UCALL_DONE:
123 done = true;
124 break;
125 default:
126 TEST_ASSERT(false, "Unknown ucall %lu", uc.cmd);
127 }
128 }
129 kvm_vm_free(vm);
130 return 0;
131 }
132