1 /*
2 * Copyright 2025 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include <gmock/gmock.h>
10
11 extern "C" {
12 #include "hf/arch/mm.h"
13
14 #include "hf/check.h"
15 #include "hf/vcpu.h"
16 #include "hf/vm.h"
17 }
18
19 #include <map>
20
21 #include "mm_test.hh"
22
23 namespace
24 {
25 using namespace ::std::placeholders;
26 using ::testing::AllOf;
27 using ::testing::Each;
28 using ::testing::SizeIs;
29 using struct_vm = struct vm;
30 using struct_vcpu = struct vcpu;
31 using struct_vm_locked = struct vm_locked;
32
33 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
34 class vcpu : public ::testing::Test
35 {
36 protected:
37 static std::unique_ptr<uint8_t[]> test_heap;
38 struct mpool ppool;
39 const uint32_t first_intid = HF_NUM_INTIDS - 2;
40 const uint32_t second_intid = HF_NUM_INTIDS - 1;
41 struct_vm *test_vm;
42 struct_vcpu *test_vcpu;
43 struct vcpu_locked vcpu_locked;
44 struct interrupts *interrupts;
45
SetUp()46 void SetUp() override
47 {
48 if (!test_heap) {
49 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
50 mpool_init(&ppool, sizeof(struct mm_page_table));
51 mpool_add_chunk(&ppool, test_heap.get(),
52 TEST_HEAP_SIZE);
53 }
54
55 test_vm = vm_init(HF_VM_ID_OFFSET, 8, &ppool, false, 0);
56 test_vcpu = vm_get_vcpu(test_vm, 0);
57 vcpu_locked = vcpu_lock(test_vcpu);
58 interrupts = &test_vcpu->interrupts;
59
60 /* Enable the interrupts used in testing. */
61 vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true);
62 vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true);
63 }
64
TearDown()65 void TearDown() override
66 {
67 vcpu_unlock(&vcpu_locked);
68 }
69 };
70
71 std::unique_ptr<uint8_t[]> vcpu::test_heap;
72
73 /**
74 * Check that interrupts that are set pending, can later be fetched
75 * from the queue.
76 */
TEST_F(vcpu,pending_interrupts_are_fetched)77 TEST_F(vcpu, pending_interrupts_are_fetched)
78 {
79 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
80
81 /* Pend the interrupts, and check the count is incremented. */
82 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
83 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
84 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
85
86 /*
87 * Check the pended interrupts are correctly returned, and once both
88 * have been returned the invalid intid is given to show there are no
89 * more pending interrupts.
90 */
91 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
92 first_intid);
93 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
94 second_intid);
95 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
96 HF_INVALID_INTID);
97
98 /*
99 * Check, having been fetched, the interrupts are no longer marked as
100 * pending in the bitmap, and the interrupt count is 0.
101 */
102 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
103 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
104 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
105
106 /*
107 * Check that this expected behavour happens on a consecutive run.
108 * Invert the order of the interrupts to add some variation.
109 */
110 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
111 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
112
113 EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
114 EXPECT_TRUE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
115 EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 2);
116
117 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
118 second_intid);
119 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
120 first_intid);
121 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
122 HF_INVALID_INTID);
123
124 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, second_intid));
125 EXPECT_FALSE(vcpu_is_virt_interrupt_pending(interrupts, first_intid));
126 EXPECT_EQ(vcpu_virt_interrupt_irq_count_get(vcpu_locked), 0);
127 }
128
129 /*
130 * Check that a disabled interrupt will not be returned until it is
131 * enabled.
132 */
TEST_F(vcpu,pending_interrupts_not_enabled_are_not_returned)133 TEST_F(vcpu, pending_interrupts_not_enabled_are_not_returned)
134 {
135 /*
136 * Pend the interrupts, check the count is incremented, the pending
137 * interrupts are returned correctly and this causes the count to
138 * return to 0.
139 */
140 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
141 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
142 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
143 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
144 first_intid);
145 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
146 second_intid);
147 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
148
149 /* Again pend the interrupts. */
150 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
151 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
152 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
153
154 /* Disable the first interrupt. */
155 vcpu_virt_interrupt_enable(vcpu_locked, first_intid, false);
156 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
157
158 /*
159 * Check that the disabled first interrupt is not returned,
160 * the second intid should be returned and then the invalid
161 * intid to show there are no more pending and enabled interrupts.
162 */
163 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
164 second_intid);
165 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
166 HF_INVALID_INTID);
167 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
168
169 /* Reenable the first interrupt and disable the second interrupt.*/
170 vcpu_virt_interrupt_enable(vcpu_locked, first_intid, true);
171 vcpu_virt_interrupt_enable(vcpu_locked, second_intid, false);
172 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
173
174 /*
175 * Check that an interrupt injected when the interrupt is disabled will
176 * eventually be returned once the interrupt is enabled.
177 */
178 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
179 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
180
181 /*
182 * Check that it is now returned as a pending interrupt and is the only
183 * interrupt pending.
184 */
185 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
186 first_intid);
187 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
188 HF_INVALID_INTID);
189 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
190
191 /* Enable the second interrupt to check it will now be returned. */
192 vcpu_virt_interrupt_enable(vcpu_locked, second_intid, true);
193 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
194
195 /*
196 * Check that it is now returned as a pending interrupt and is the only
197 * interrupt pending.
198 */
199 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
200 second_intid);
201 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
202 HF_INVALID_INTID);
203 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
204 }
205
206 /**
207 * Check the queue state from disabling some interrupts. And then reenabling.
208 */
TEST_F(vcpu,injecting_getting_interrupts_multiple_times)209 TEST_F(vcpu, injecting_getting_interrupts_multiple_times)
210 {
211 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
212
213 /* Pend the interrupts, and check the count is incremented. */
214 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
215 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
216
217 for (uint32_t i = 0; i < VINT_QUEUE_MAX * 3; i++) {
218 uint32_t it_intid = vcpu_virt_interrupt_get_pending_and_enabled(
219 vcpu_locked);
220 uint32_t peek_intid =
221 vcpu_virt_interrupt_peek_pending_and_enabled(
222 vcpu_locked);
223
224 EXPECT_NE(it_intid, HF_INVALID_INTID);
225 /*
226 * Sequence to validate the `first_intid` and `second_intid`
227 * are retrieved and left pending as expected.
228 */
229 if (i % 2 == 0) {
230 EXPECT_EQ(it_intid, first_intid);
231 EXPECT_EQ(peek_intid, second_intid);
232 } else {
233 EXPECT_EQ(it_intid, second_intid);
234 EXPECT_EQ(peek_intid, first_intid);
235 }
236 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
237 vcpu_virt_interrupt_inject(vcpu_locked, it_intid);
238 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
239 }
240
241 EXPECT_NE(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
242 HF_INVALID_INTID);
243 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
244 EXPECT_NE(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
245 HF_INVALID_INTID);
246 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
247 }
248
249 /*
250 * Test that each interrupt ID is only set to pending and the count is
251 * incremented once.
252 */
TEST_F(vcpu,pending_interrupt_is_only_added_once)253 TEST_F(vcpu, pending_interrupt_is_only_added_once)
254 {
255 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
256
257 /* Pend the interrupt, and check the count is incremented. */
258 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
259 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
260
261 /* Inject the same interrupt the count should not be incremented. */
262 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
263 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
264
265 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
266 first_intid);
267 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
268 HF_INVALID_INTID);
269 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
270 }
271
272 /* Check that an cleared interrupts are made to be no longer pending. */
TEST_F(vcpu,pending_interrupts_can_be_cleared)273 TEST_F(vcpu, pending_interrupts_can_be_cleared)
274 {
275 /*
276 * Pend the interrupts, check the count is incremented, the pending
277 * interrupts are returned correctly and this causes the count to
278 * return to 0.
279 */
280 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
281 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
282 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
283 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
284 first_intid);
285 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
286 second_intid);
287 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
288
289 /* Again pend the interrupts. */
290 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
291 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
292
293 /* Remove the first interrupt. */
294 vcpu_virt_interrupt_clear(vcpu_locked, first_intid);
295 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
296
297 /*
298 * Check that the first interrupt is cleared.
299 * The second intid should be returned and then the invalid
300 * intid to show there are no more pending and enabled interrupts.
301 */
302 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
303 second_intid);
304 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
305 HF_INVALID_INTID);
306 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
307
308 /* Inject the interrupts again. */
309 vcpu_virt_interrupt_inject(vcpu_locked, first_intid);
310 vcpu_virt_interrupt_inject(vcpu_locked, second_intid);
311 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 2);
312
313 /* Remove the second interrupt. */
314 vcpu_virt_interrupt_clear(vcpu_locked, second_intid);
315 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
316
317 /*
318 * Check that it is now returned as a pending interrupt and is the only
319 * interrupt pending.
320 */
321 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
322 first_intid);
323 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
324 HF_INVALID_INTID);
325 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
326 }
327
328 /*
329 * Check that when an interrupt is cleared space is create for a new
330 * interrupt to be injected. In addition that after clearing interrupts
331 * the FIFO policy of the remaining interrupts is maintainted.
332 */
TEST_F(vcpu,pending_interrupts_clear_full_list)333 TEST_F(vcpu, pending_interrupts_clear_full_list)
334 {
335 /* Fill the interrupt queue for the vCPU. */
336 for (int i = 0; i < VINT_QUEUE_MAX; i++) {
337 vcpu_virt_interrupt_enable(vcpu_locked, i, true);
338 vcpu_virt_interrupt_inject(vcpu_locked, i);
339 }
340
341 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), VINT_QUEUE_MAX);
342
343 /* Check clearing an interrupt clears space for another interrupt. */
344 vcpu_virt_interrupt_clear(vcpu_locked, 2);
345
346 vcpu_virt_interrupt_enable(vcpu_locked, VINT_QUEUE_MAX, true);
347 vcpu_virt_interrupt_inject(vcpu_locked, VINT_QUEUE_MAX);
348
349 /* Check disabled interrupts are also cleared. */
350 vcpu_virt_interrupt_enable(vcpu_locked, 1, false);
351 vcpu_virt_interrupt_clear(vcpu_locked, 1);
352
353 vcpu_virt_interrupt_inject(vcpu_locked, VINT_QUEUE_MAX + 1);
354 /*
355 * Enable the interrupt after injecting it to ensure it will be returned
356 * by vcpu_virt_interrupt_get_pending_and_enabled but is correctly
357 * injected when disabled.
358 */
359 vcpu_virt_interrupt_enable(vcpu_locked, VINT_QUEUE_MAX + 1, true);
360
361 /* Get the interrupts to check the FIFO order is maintained. */
362 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 0);
363 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 3);
364 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 4);
365 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 5);
366 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked), 6);
367 }
368 } /* namespace */
369