1 /*
2 * Copyright 2024 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include <gmock/gmock.h>
10
11 extern "C" {
12 #include "hf/arch/mm.h"
13
14 #include "hf/check.h"
15 #include "hf/hf_ipi.h"
16 #include "hf/mm.h"
17 }
18
19 #include <map>
20
21 #include "mm_test.hh"
22
23 namespace
24 {
25 using namespace ::std::placeholders;
26 using ::testing::AllOf;
27 using ::testing::Each;
28 using ::testing::SizeIs;
29 using struct_vm = struct vm;
30 using struct_vcpu = struct vcpu;
31 using struct_vm_locked = struct vm_locked;
32
33 /**
34 * IPI Test to check sent IPIs are correctly recorded as pending.
35 */
36
37 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
38 class ipi : public ::testing::Test
39 {
40 protected:
41 static std::unique_ptr<uint8_t[]> test_heap;
42 struct mpool ppool;
43 struct_vm *test_vm[4];
SetUp()44 void SetUp() override
45 {
46 if (test_heap) {
47 return;
48 }
49 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
50 mpool_init(&ppool, sizeof(struct mm_page_table));
51 mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
52 for (size_t i = 0; i < std::size(test_vm); i++) {
53 test_vm[i] = vm_init(i + HF_VM_ID_OFFSET, MAX_CPUS,
54 &ppool, false, 0);
55 }
56
57 for (size_t i = 0; i < MAX_CPUS; i++) {
58 struct vcpu *running_vcpu = vm_get_vcpu(test_vm[0], i);
59 struct vcpu *waiting_vcpu = vm_get_vcpu(test_vm[1], i);
60 struct vcpu *blocked_vcpu = vm_get_vcpu(test_vm[2], i);
61 struct vcpu *preempted_vcpu =
62 vm_get_vcpu(test_vm[3], i);
63 struct vcpu_locked running_locked =
64 vcpu_lock(running_vcpu);
65 struct vcpu_locked waiting_locked =
66 vcpu_lock(waiting_vcpu);
67 struct vcpu_locked blocked_locked =
68 vcpu_lock(blocked_vcpu);
69 struct vcpu_locked preempted_locked =
70 vcpu_lock(preempted_vcpu);
71
72 struct cpu *cpu = cpu_find_index(i);
73
74 running_vcpu->cpu = cpu;
75 running_vcpu->state = VCPU_STATE_RUNNING;
76 vcpu_virt_interrupt_enable(running_locked, HF_IPI_INTID,
77 true);
78
79 waiting_vcpu->cpu = cpu;
80 waiting_vcpu->state = VCPU_STATE_WAITING;
81 vcpu_virt_interrupt_enable(waiting_locked, HF_IPI_INTID,
82 true);
83
84 blocked_vcpu->cpu = cpu;
85 blocked_vcpu->state = VCPU_STATE_BLOCKED;
86 vcpu_virt_interrupt_enable(blocked_locked, HF_IPI_INTID,
87 true);
88
89 preempted_vcpu->cpu = cpu;
90 preempted_vcpu->state = VCPU_STATE_PREEMPTED;
91 vcpu_virt_interrupt_enable(preempted_locked,
92 HF_IPI_INTID, true);
93
94 list_init(&cpu->pending_ipis);
95
96 vcpu_unlock(&running_locked);
97 vcpu_unlock(&waiting_locked);
98 vcpu_unlock(&blocked_locked);
99 vcpu_unlock(&preempted_locked);
100 }
101 }
102 };
103
104 std::unique_ptr<uint8_t[]> ipi::test_heap;
105
106 /**
107 * Check that when an IPI is sent to vCPU0, vCPU0 is
108 * stored as the pending target_vcpu within the IPI framework.
109 *
110 * This function also sets the vm at index 1 to running on all
111 * CPUs. This is used in later tests.
112 */
TEST_F(ipi,one_service_to_one_cpu)113 TEST_F(ipi, one_service_to_one_cpu)
114 {
115 struct_vm *current_vm = ipi::test_vm[0];
116 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
117
118 CHECK(vcpu_count == MAX_CPUS);
119
120 for (size_t i = 0; i < MAX_CPUS; i++) {
121 struct vcpu *vcpu = vm_get_vcpu(current_vm, i);
122 struct cpu *cpu = cpu_find_index(i);
123 vcpu->cpu = cpu;
124 vcpu->state = VCPU_STATE_RUNNING;
125 list_init(&cpu->pending_ipis);
126 }
127
128 hf_ipi_send_interrupt(current_vm, 0);
129
130 /* Check vCPU0 is stored as having a pending interrupt on CPU 0. */
131 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
132 vm_get_vcpu(current_vm, 0));
133 /* Check that there are no longer pending interrupts on CPU 0. */
134 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
135 (struct vcpu *)NULL);
136 }
137
138 /**
139 * Check if one service sends IPIs to different target vCPUs they are stored
140 * under the correct CPUs.
141 */
TEST_F(ipi,one_service_to_different_cpus)142 TEST_F(ipi, one_service_to_different_cpus)
143 {
144 struct_vm *current_vm = ipi::test_vm[0];
145 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
146
147 CHECK(vcpu_count >= 2);
148
149 hf_ipi_send_interrupt(current_vm, 0);
150 hf_ipi_send_interrupt(current_vm, 1);
151
152 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 0)),
153 vm_get_vcpu(current_vm, 0));
154 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(current_vm, 1)),
155 vm_get_vcpu(current_vm, 1));
156 }
157
158 /**
159 * Multiple services targeting IPIs to CPU0,1,2 and 3 respectively.
160 */
TEST_F(ipi,multiple_services_to_different_cpus)161 TEST_F(ipi, multiple_services_to_different_cpus)
162 {
163 struct_vm *running_vm = ipi::test_vm[0];
164 struct_vm *waiting_vm = ipi::test_vm[1];
165 struct_vm *blocked_vm = ipi::test_vm[2];
166 struct_vm *preempted_vm = ipi::test_vm[3];
167
168 hf_ipi_send_interrupt(running_vm, 0);
169 hf_ipi_send_interrupt(waiting_vm, 1);
170 hf_ipi_send_interrupt(blocked_vm, 2);
171 hf_ipi_send_interrupt(preempted_vm, 3);
172
173 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
174 vm_get_vcpu(running_vm, 0));
175 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 1)),
176 vm_get_vcpu(waiting_vm, 1));
177 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 2)),
178 vm_get_vcpu(blocked_vm, 2));
179 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 3)),
180 vm_get_vcpu(preempted_vm, 3));
181 }
182
183 /**
184 * Multiple services targeting IPIs to CPU0 are both pending.
185 */
TEST_F(ipi,multiple_services_to_same_cpu)186 TEST_F(ipi, multiple_services_to_same_cpu)
187 {
188 struct_vm *running_vm = ipi::test_vm[0];
189 struct_vm *waiting_vm = ipi::test_vm[1];
190 struct_vm *blocked_vm = ipi::test_vm[2];
191 struct_vm *preempted_vm = ipi::test_vm[3];
192
193 hf_ipi_send_interrupt(running_vm, 0);
194 hf_ipi_send_interrupt(waiting_vm, 0);
195 hf_ipi_send_interrupt(blocked_vm, 0);
196 hf_ipi_send_interrupt(preempted_vm, 0);
197
198 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
199 vm_get_vcpu(running_vm, 0));
200 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
201 vm_get_vcpu(waiting_vm, 0));
202 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
203 vm_get_vcpu(blocked_vm, 0));
204 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
205 vm_get_vcpu(preempted_vm, 0));
206 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
207 (struct vcpu *)NULL);
208 }
209
210 /**
211 * Check if the same service sends an IPI to the same target_vcpu
212 * multiple times it is only added to the list once and does not create
213 * loops in the list.
214 */
TEST_F(ipi,multiple_services_to_same_cpu_multiple_sends)215 TEST_F(ipi, multiple_services_to_same_cpu_multiple_sends)
216 {
217 struct_vm *running_vm = ipi::test_vm[0];
218 struct_vm *waiting_vm = ipi::test_vm[1];
219
220 hf_ipi_send_interrupt(running_vm, 0);
221 hf_ipi_send_interrupt(waiting_vm, 0);
222 hf_ipi_send_interrupt(running_vm, 0);
223
224 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
225 vm_get_vcpu(running_vm, 0));
226 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
227 vm_get_vcpu(waiting_vm, 0));
228 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
229 (struct vcpu *)NULL);
230 }
231
232 /**
233 * Multiple services targeting IPIs to CPU0 are both pending and the running
234 * vCPU is returned first.
235 */
TEST_F(ipi,multiple_services_to_same_cpu_running_prioritized)236 TEST_F(ipi, multiple_services_to_same_cpu_running_prioritized)
237 {
238 struct_vm *running_vm = ipi::test_vm[0];
239 struct_vm *waiting_vm = ipi::test_vm[1];
240 struct_vm *blocked_vm = ipi::test_vm[2];
241 struct_vm *preempted_vm = ipi::test_vm[3];
242
243 hf_ipi_send_interrupt(waiting_vm, 0);
244 hf_ipi_send_interrupt(blocked_vm, 0);
245 hf_ipi_send_interrupt(preempted_vm, 0);
246 hf_ipi_send_interrupt(running_vm, 0);
247
248 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
249 vm_get_vcpu(running_vm, 0));
250 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
251 vm_get_vcpu(waiting_vm, 0));
252 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
253 vm_get_vcpu(blocked_vm, 0));
254 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
255 vm_get_vcpu(preempted_vm, 0));
256 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(vm_get_vcpu(running_vm, 0)),
257 (struct vcpu *)NULL);
258 }
259
260 /**
261 * Multiple services targeting IPIs to CPU0 are both pending and the running
262 * vCPU is returned first.
263 */
TEST_F(ipi,multiple_services_to_same_cpu_full_handle)264 TEST_F(ipi, multiple_services_to_same_cpu_full_handle)
265 {
266 struct_vm *running_vm = ipi::test_vm[0];
267 struct_vm *waiting_vm = ipi::test_vm[1];
268 struct_vm *blocked_vm = ipi::test_vm[2];
269 struct_vm *preempted_vm = ipi::test_vm[3];
270
271 struct vcpu *top_priority_vcpu;
272 struct vcpu_locked vcpu_locked;
273 constexpr size_t test_service_count = 4;
274 struct_vm *test_service[test_service_count] = {
275 waiting_vm, blocked_vm, preempted_vm, running_vm};
276
277 for (size_t i = 0; i < test_service_count; i++) {
278 for (size_t j = 0; j < MAX_CPUS; j++) {
279 hf_ipi_send_interrupt(test_service[i], j);
280 }
281 }
282
283 /* Handle the IPI on all CPUs and do some inital checks. */
284 for (size_t i = 0; i < MAX_CPUS; i++) {
285 top_priority_vcpu = hf_ipi_get_pending_target_vcpu(
286 vm_get_vcpu(running_vm, i));
287 vcpu_locked = vcpu_lock(top_priority_vcpu);
288 /*
289 * Check running service is returned as the top priority vCPU.
290 */
291 EXPECT_EQ(top_priority_vcpu, vm_get_vcpu(running_vm, i));
292 /* Run IPI handle on CPU0. */
293 hf_ipi_handle(vcpu_locked);
294 /*
295 * Since there is a running vCPU with a pending IPI when handing
296 * the WAITING vCPU we should have set the SRI to be delayed.
297 * Check this is the case.
298 */
299 EXPECT_TRUE(top_priority_vcpu->cpu->is_sri_delayed);
300 vcpu_unlock(&vcpu_locked);
301 }
302
303 for (size_t i = 0; i < test_service_count; i++) {
304 struct vm_locked vm_locked = vm_lock(test_service[i]);
305 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
306 uint32_t ids_count = 0;
307 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
308 uint32_t lists_count = 0;
309 enum notifications_info_get_state current_state = INIT;
310 const bool is_from_vm = false;
311 /*
312 * Check response of FFA_NOTIFICATION_INFO_GET. The ID should
313 * only be returned if the service is in the waiting state.
314 */
315 vm_notifications_info_get_pending(
316 vm_locked, is_from_vm, ids, &ids_count, lists_sizes,
317 &lists_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
318 ¤t_state);
319 /* In this test setup all vCPUs of a service are in the same
320 * state. */
321 if (vm_get_vcpu(test_service[i], 0)->state ==
322 VCPU_STATE_WAITING) {
323 EXPECT_EQ(ids_count, 6);
324 EXPECT_EQ(lists_count, 2);
325 EXPECT_EQ(lists_sizes[0], 3);
326 EXPECT_EQ(lists_sizes[1], 1);
327 EXPECT_EQ(ids[0], test_service[i]->id);
328 EXPECT_EQ(ids[1], 0);
329 EXPECT_EQ(ids[2], 1);
330 EXPECT_EQ(ids[3], 2);
331 EXPECT_EQ(ids[4], test_service[i]->id);
332 EXPECT_EQ(ids[5], 3);
333 } else {
334 EXPECT_EQ(ids_count, 0);
335 EXPECT_EQ(lists_count, 0);
336 }
337
338 for (size_t j = 0; j < MAX_CPUS; j++) {
339 /* Check the IPI interrupt is pending. */
340 struct vcpu *vcpu = vm_get_vcpu(test_service[i], j);
341 vcpu_locked = vcpu_lock(vcpu);
342 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(
343 vcpu_locked),
344 HF_IPI_INTID);
345 vcpu_unlock(&vcpu_locked);
346 }
347 vm_unlock(&vm_locked);
348 }
349
350 for (size_t i = 0; i < MAX_CPUS; i++) {
351 /* Check that there are no more vCPUs with pending IPIs */
352 EXPECT_EQ(hf_ipi_get_pending_target_vcpu(
353 vm_get_vcpu(running_vm, i)),
354 (struct vcpu *)NULL);
355 }
356 }
357 } /* namespace */
358