1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2024 Advanced Micro Devices, Inc.
4 */
5 #include <linux/atomic.h>
6
7 #include "kvm_util.h"
8 #include "processor.h"
9 #include "svm_util.h"
10 #include "vmx.h"
11 #include "test_util.h"
12
13 #define NR_BUS_LOCKS_PER_LEVEL 100
14 #define CACHE_LINE_SIZE 64
15
16 /*
17 * To generate a bus lock, carve out a buffer that precisely occupies two cache
18 * lines and perform an atomic access that splits the two lines.
19 */
20 static u8 buffer[CACHE_LINE_SIZE * 2] __aligned(CACHE_LINE_SIZE);
21 static atomic_t *val = (void *)&buffer[CACHE_LINE_SIZE - (sizeof(*val) / 2)];
22
guest_generate_buslocks(void)23 static void guest_generate_buslocks(void)
24 {
25 for (int i = 0; i < NR_BUS_LOCKS_PER_LEVEL; i++)
26 atomic_inc(val);
27 }
28
29 #define L2_GUEST_STACK_SIZE 64
30
l2_guest_code(void)31 static void l2_guest_code(void)
32 {
33 guest_generate_buslocks();
34 GUEST_DONE();
35 }
36
l1_svm_code(struct svm_test_data * svm)37 static void l1_svm_code(struct svm_test_data *svm)
38 {
39 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
40 struct vmcb *vmcb = svm->vmcb;
41
42 generic_svm_setup(svm, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
43 run_guest(vmcb, svm->vmcb_gpa);
44 }
45
l1_vmx_code(struct vmx_pages * vmx)46 static void l1_vmx_code(struct vmx_pages *vmx)
47 {
48 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
49
50 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
51 GUEST_ASSERT_EQ(load_vmcs(vmx), true);
52
53 prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
54
55 GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_guest_code));
56 GUEST_ASSERT(!vmlaunch());
57 }
58
guest_code(void * test_data)59 static void guest_code(void *test_data)
60 {
61 guest_generate_buslocks();
62
63 if (this_cpu_has(X86_FEATURE_SVM))
64 l1_svm_code(test_data);
65 else if (this_cpu_has(X86_FEATURE_VMX))
66 l1_vmx_code(test_data);
67 else
68 GUEST_DONE();
69
70 TEST_FAIL("L2 should have signaled 'done'");
71 }
72
main(int argc,char * argv[])73 int main(int argc, char *argv[])
74 {
75 const bool has_nested = kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX);
76 vm_vaddr_t nested_test_data_gva;
77 struct kvm_vcpu *vcpu;
78 struct kvm_run *run;
79 struct kvm_vm *vm;
80 int i, bus_locks = 0;
81
82 TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_BUS_LOCK_EXIT));
83
84 vm = vm_create(1);
85 vm_enable_cap(vm, KVM_CAP_X86_BUS_LOCK_EXIT, KVM_BUS_LOCK_DETECTION_EXIT);
86 vcpu = vm_vcpu_add(vm, 0, guest_code);
87
88 if (kvm_cpu_has(X86_FEATURE_SVM))
89 vcpu_alloc_svm(vm, &nested_test_data_gva);
90 else
91 vcpu_alloc_vmx(vm, &nested_test_data_gva);
92
93 vcpu_args_set(vcpu, 1, nested_test_data_gva);
94
95 run = vcpu->run;
96
97 for (i = 0; i <= NR_BUS_LOCKS_PER_LEVEL * (1 + has_nested); i++) {
98 struct ucall uc;
99
100 vcpu_run(vcpu);
101
102 if (run->exit_reason == KVM_EXIT_IO) {
103 switch (get_ucall(vcpu, &uc)) {
104 case UCALL_ABORT:
105 REPORT_GUEST_ASSERT(uc);
106 goto done;
107 case UCALL_SYNC:
108 continue;
109 case UCALL_DONE:
110 goto done;
111 default:
112 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
113 }
114 }
115
116 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_BUS_LOCK);
117
118 /*
119 * Verify the counter is actually getting incremented, e.g. that
120 * KVM isn't skipping the instruction. On Intel, the exit is
121 * trap-like, i.e. the counter should already have been
122 * incremented. On AMD, it's fault-like, i.e. the counter will
123 * be incremented when the guest re-executes the instruction.
124 */
125 sync_global_from_guest(vm, *val);
126 TEST_ASSERT_EQ(atomic_read(val), bus_locks + host_cpu_is_intel);
127
128 bus_locks++;
129 }
130 TEST_FAIL("Didn't receive UCALL_DONE, took %u bus lock exits\n", bus_locks);
131 done:
132 TEST_ASSERT_EQ(i, bus_locks);
133 kvm_vm_free(vm);
134 return 0;
135 }
136