1 /*
2 * Copyright 2019 The Hafnium Authors.
3 *
4 * Use of this source code is governed by a BSD-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/BSD-3-Clause.
7 */
8
9 #include <gmock/gmock.h>
10
11 extern "C" {
12 #include "hf/arch/mm.h"
13
14 #include "hf/check.h"
15 #include "hf/list.h"
16 #include "hf/mm.h"
17 #include "hf/mpool.h"
18 #include "hf/timer_mgmt.h"
19 #include "hf/vm.h"
20 }
21
22 #include <list>
23 #include <memory>
24 #include <span>
25 #include <vector>
26
27 #include "mm_test.hh"
28
29 namespace
30 {
31 using namespace ::std::placeholders;
32
33 using ::testing::AllOf;
34 using ::testing::Each;
35 using ::testing::SizeIs;
36
37 using struct_vm = struct vm;
38 using struct_vcpu = struct vcpu;
39 using struct_vm_locked = struct vm_locked;
40
41 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
42 const mm_level_t TOP_LEVEL = arch_mm_stage2_root_level() - 1;
43
44 class vm : public ::testing::Test
45 {
46 protected:
47 static std::unique_ptr<uint8_t[]> test_heap;
48
49 struct mpool ppool;
50
SetUp()51 void SetUp() override
52 {
53 if (!test_heap) {
54 /*
55 * TODO: replace with direct use of stdlib allocator so
56 * sanitizers are more effective.
57 */
58 test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
59 mpool_init(&ppool, sizeof(struct mm_page_table));
60 mpool_add_chunk(&ppool, test_heap.get(),
61 TEST_HEAP_SIZE);
62 }
63 }
64
65 public:
BootOrderSmallerThan(struct_vm * vm1,struct_vm * vm2)66 static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
67 {
68 return vm1->boot_order < vm2->boot_order;
69 }
70 };
71
72 std::unique_ptr<uint8_t[]> vm::test_heap;
73
74 /**
75 * If nothing is mapped, unmapping the hypervisor has no effect.
76 */
TEST_F(vm,vm_unmap_hypervisor_not_mapped)77 TEST_F(vm, vm_unmap_hypervisor_not_mapped)
78 {
79 struct_vm *vm;
80 struct vm_locked vm_locked;
81
82 /* TODO: check ptable usage (security state?) */
83 EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
84 vm_locked = vm_lock(vm);
85 ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
86 EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
87 EXPECT_THAT(
88 mm_test::get_ptable(vm->ptable),
89 AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
90 mm_vm_fini(&vm->ptable, &ppool);
91 vm_unlock(&vm_locked);
92 }
93
94 /**
95 * Validate the "boot_list" is created properly, according to vm's "boot_order"
96 * field.
97 */
TEST_F(vm,vm_boot_order)98 TEST_F(vm, vm_boot_order)
99 {
100 struct_vm *vm_cur;
101 struct_vm *vm;
102 std::list<struct_vm *> expected_final_order;
103
104 /*
105 * Insertion when no call to "vcpu_update_boot" has been made yet.
106 * The "boot_list" is expected to be empty.
107 */
108 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
109 vm_cur->boot_order = 3;
110 vm_update_boot(vm_cur);
111 expected_final_order.push_back(vm_cur);
112
113 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
114
115 /* Insertion at the head of the boot list */
116 EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
117 vm_cur->boot_order = 1;
118 vm_update_boot(vm_cur);
119 expected_final_order.push_back(vm_cur);
120
121 EXPECT_EQ(vm_get_boot_vm()->id, vm_cur->id);
122
123 /* Insertion of two in the middle of the boot list */
124 for (uint32_t i = 0; i < 2; i++) {
125 EXPECT_TRUE(vm_init_next(MAX_CPUS, &ppool, &vm_cur, false, 0));
126 vm_cur->boot_order = 2;
127 vm_update_boot(vm_cur);
128 expected_final_order.push_back(vm_cur);
129 }
130
131 /*
132 * Insertion in the end of the list.
133 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
134 * As such, a VM is expected to have been initialized before this
135 * test, with ID 1 and boot_order 0.
136 */
137 vm_cur = vm_find(1);
138 EXPECT_FALSE(vm_cur == NULL);
139 vm_update_boot(vm_cur);
140 expected_final_order.push_back(vm_cur);
141
142 /*
143 * Number of VMs initialized should be the same as in the
144 * "expected_final_order", before the final verification.
145 */
146 EXPECT_EQ(expected_final_order.size(), vm_get_count())
147 << "Something went wrong with the test itself...\n";
148
149 /* Sort VMs from lower to higher "boot_order" field.*/
150 expected_final_order.sort(vm::BootOrderSmallerThan);
151
152 std::list<struct_vm *>::iterator it;
153 vm = vm_get_boot_vm();
154 for (it = expected_final_order.begin();
155 it != expected_final_order.end(); it++) {
156 EXPECT_TRUE(vm != NULL);
157 EXPECT_EQ((*it)->id, vm->id);
158 vm = vm_get_next_boot(vm);
159 }
160 }
161
TEST_F(vm,vcpu_arch_timer)162 TEST_F(vm, vcpu_arch_timer)
163 {
164 const cpu_id_t cpu_ids[2] = {0, 1};
165 struct_vcpu *vm0_vcpu;
166 struct_vcpu *vm1_vcpu;
167 struct_vcpu *deadline_vcpu;
168 struct_vcpu *target_vcpu;
169 struct vcpu_locked vcpu_locked;
170 struct cpu *cpu0;
171 struct cpu *cpu1;
172
173 /* Initialie CPU module with two physical CPUs. */
174 cpu_module_init(cpu_ids, 2);
175 cpu0 = cpu_find_index(0);
176 cpu1 = cpu_find_index(1);
177
178 /* Two UP endpoints are deployed for this test. */
179 CHECK(vm_get_count() >= 2);
180 vm0_vcpu = vm_get_vcpu(vm_find_index(0), 0);
181 vm1_vcpu = vm_get_vcpu(vm_find_index(1), 0);
182
183 /* The execution context of each VM is scheduled on CPU0. */
184 vm0_vcpu->cpu = cpu0;
185 vm1_vcpu->cpu = cpu0;
186
187 /*
188 * Enable the timer peripheral for each vCPU and setup an arbitraty
189 * countdown value.
190 */
191 vm0_vcpu->regs.arch_timer.cval = 555555;
192 vm1_vcpu->regs.arch_timer.cval = 999999;
193 vm0_vcpu->regs.arch_timer.ctl = 1;
194 vm1_vcpu->regs.arch_timer.ctl = 1;
195
196 /* No vCPU is being tracked through either timer list. */
197 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
198 EXPECT_TRUE(deadline_vcpu == NULL);
199 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu1);
200 EXPECT_TRUE(deadline_vcpu == NULL);
201
202 /* vCPU of VM0 and VM1 are being added to the list. */
203 timer_vcpu_manage(vm0_vcpu);
204 timer_vcpu_manage(vm1_vcpu);
205
206 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
207 EXPECT_EQ(deadline_vcpu, vm0_vcpu);
208
209 /* Remove one of the vCPUs from the CPU0 list. */
210 vm0_vcpu->regs.arch_timer.cval = 0;
211 vm0_vcpu->regs.arch_timer.ctl = 0;
212 timer_vcpu_manage(vm0_vcpu);
213
214 /* This leaves one vCPU entry on CPU0 list. */
215 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
216 EXPECT_EQ(deadline_vcpu, vm1_vcpu);
217
218 /* Attempt to migrate VM1 vCPU from CPU0 to CPU1. */
219 vcpu_locked = vcpu_lock(vm1_vcpu);
220 timer_migrate_to_other_cpu(cpu1, vcpu_locked);
221 vcpu_unlock(&vcpu_locked);
222
223 /*
224 * After migration, ensure the list is empty on CPU0 but non-empty on
225 * CPU1.
226 */
227 deadline_vcpu = timer_find_vcpu_nearest_deadline(cpu0);
228 EXPECT_TRUE(deadline_vcpu == NULL);
229
230 /*
231 * vCPU of VM1 is now running on CPU1. It must be the target vCPU when
232 * the timer has expired.
233 */
234 target_vcpu = timer_find_target_vcpu(vm1_vcpu);
235 EXPECT_EQ(target_vcpu, vm1_vcpu);
236 }
237
238 /**
239 * Validates updates and check functions for binding notifications to endpoints.
240 */
TEST_F(vm,vm_notifications_bind_diff_senders)241 TEST_F(vm, vm_notifications_bind_diff_senders)
242 {
243 struct_vm *current_vm = nullptr;
244 struct vm_locked current_vm_locked;
245 std::vector<struct_vm *> dummy_senders;
246 ffa_notifications_bitmap_t bitmaps[] = {
247 0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
248 bool is_from_vm = true;
249
250 /* For the subsequent tests three VMs are used. */
251 CHECK(vm_get_count() >= 3);
252
253 current_vm = vm_find_index(0);
254
255 dummy_senders.push_back(vm_find_index(1));
256 dummy_senders.push_back(vm_find_index(2));
257
258 current_vm_locked = vm_lock(current_vm);
259
260 for (unsigned int i = 0; i < 2; i++) {
261 /* Validate bindings condition after initialization. */
262 EXPECT_TRUE(vm_notifications_validate_binding(
263 current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
264 bitmaps[i], false));
265
266 /*
267 * Validate bind related operations. For this test considering
268 * only global notifications.
269 */
270 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
271 dummy_senders[i]->id,
272 bitmaps[i], false);
273
274 EXPECT_TRUE(vm_notifications_validate_binding(
275 current_vm_locked, is_from_vm, dummy_senders[i]->id,
276 bitmaps[i], false));
277
278 EXPECT_FALSE(vm_notifications_validate_binding(
279 current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
280 bitmaps[i], false));
281
282 EXPECT_FALSE(vm_notifications_validate_binding(
283 current_vm_locked, is_from_vm, dummy_senders[i]->id,
284 bitmaps[1 - i], false));
285
286 EXPECT_FALSE(vm_notifications_validate_binding(
287 current_vm_locked, is_from_vm, dummy_senders[i]->id,
288 bitmaps[2], false));
289 }
290
291 /** Clean up bind for other tests. */
292 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
293 bitmaps[0], false);
294 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
295 bitmaps[1], false);
296
297 vm_unlock(¤t_vm_locked);
298 }
299
300 /**
301 * Validates updates and check functions for binding notifications, namely the
302 * configuration of bindings of global and per-vCPU notifications.
303 */
TEST_F(vm,vm_notification_bind_per_vcpu_vs_global)304 TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
305 {
306 struct_vm *current_vm;
307 struct vm_locked current_vm_locked;
308 struct_vm *dummy_sender;
309 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
310 ffa_notifications_bitmap_t per_vcpu = ~global;
311 bool is_from_vm = true;
312
313 CHECK(vm_get_count() >= 2);
314
315 current_vm = vm_find_index(0);
316
317 dummy_sender = vm_find_index(1);
318
319 current_vm_locked = vm_lock(current_vm);
320
321 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
322 dummy_sender->id, global, false);
323 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
324 dummy_sender->id, per_vcpu, true);
325
326 /* Check validation of global notifications bindings. */
327 EXPECT_TRUE(vm_notifications_validate_binding(
328 current_vm_locked, is_from_vm, dummy_sender->id, global,
329 false));
330
331 /* Check validation of per-vCPU notifications bindings. */
332 EXPECT_TRUE(vm_notifications_validate_binding(
333 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
334 true));
335
336 /**
337 * Check that global notifications are not validated as per-vCPU, and
338 * vice-versa.
339 */
340 EXPECT_FALSE(vm_notifications_validate_binding(
341 current_vm_locked, is_from_vm, dummy_sender->id, global, true));
342 EXPECT_FALSE(vm_notifications_validate_binding(
343 current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
344 false));
345 EXPECT_FALSE(vm_notifications_validate_binding(
346 current_vm_locked, is_from_vm, dummy_sender->id,
347 global | per_vcpu, true));
348 EXPECT_FALSE(vm_notifications_validate_binding(
349 current_vm_locked, is_from_vm, dummy_sender->id,
350 global | per_vcpu, false));
351
352 /** Undo the bindings */
353 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
354 global, false);
355 EXPECT_TRUE(vm_notifications_validate_binding(
356 current_vm_locked, is_from_vm, 0, global, false));
357
358 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
359 per_vcpu, false);
360 EXPECT_TRUE(vm_notifications_validate_binding(
361 current_vm_locked, is_from_vm, 0, per_vcpu, false));
362
363 vm_unlock(¤t_vm_locked);
364 }
365
366 /**
367 * Validates accesses to notifications bitmaps.
368 */
TEST_F(vm,vm_notifications_set_and_get)369 TEST_F(vm, vm_notifications_set_and_get)
370 {
371 struct_vm *current_vm;
372 struct vm_locked current_vm_locked;
373 struct_vm *dummy_sender;
374 ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
375 ffa_notifications_bitmap_t per_vcpu = ~global;
376 ffa_notifications_bitmap_t ret;
377 const unsigned int vcpu_idx = 0;
378 struct notifications *notifications;
379 const bool is_from_vm = true;
380
381 CHECK(vm_get_count() >= 2);
382
383 current_vm = vm_find_index(0);
384 dummy_sender = vm_find_index(1);
385
386 notifications = ¤t_vm->notifications.from_vm;
387 current_vm_locked = vm_lock(current_vm);
388
389 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
390 dummy_sender->id, global, false);
391 vm_notifications_update_bindings(current_vm_locked, is_from_vm,
392 dummy_sender->id, per_vcpu, true);
393
394 /*
395 * Validate get notifications bitmap for global notifications.
396 */
397 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
398 global, 0ull, false);
399
400 EXPECT_EQ(notifications->global.pending, global);
401
402 /* Counter should track pending notifications. */
403 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
404
405 ret = vm_notifications_partition_get_pending(current_vm_locked,
406 is_from_vm, 0ull);
407 EXPECT_EQ(ret, global);
408 EXPECT_EQ(notifications->global.pending, 0ull);
409
410 /*
411 * After getting the pending notifications, the pending count should
412 * be zeroed.
413 */
414 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
415
416 /*
417 * Validate get notifications bitmap for per-vCPU notifications.
418 */
419 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
420 per_vcpu, vcpu_idx, true);
421
422 /*
423 * Duplicate call to check that the state of the counters doesn't alter
424 * because of it.
425 */
426 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
427 per_vcpu, vcpu_idx, true);
428
429 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
430
431 ret = vm_notifications_partition_get_pending(current_vm_locked,
432 is_from_vm, vcpu_idx);
433 EXPECT_EQ(ret, per_vcpu);
434 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
435 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
436
437 /*
438 * Validate that getting notifications for a specific vCPU also returns
439 * global notifications.
440 */
441 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
442 per_vcpu, vcpu_idx, true);
443
444 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
445 global, 0ull, false);
446 EXPECT_FALSE(vm_is_notifications_pending_count_zero());
447
448 ret = vm_notifications_partition_get_pending(current_vm_locked,
449 is_from_vm, vcpu_idx);
450 EXPECT_EQ(ret, per_vcpu | global);
451 EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
452 EXPECT_EQ(notifications->global.pending, 0ull);
453 EXPECT_TRUE(vm_is_notifications_pending_count_zero());
454
455 /** Undo the binding */
456 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
457 global, false);
458 vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
459 per_vcpu, true);
460 vm_unlock(¤t_vm_locked);
461 }
462
463 /**
464 * Validates simple getting of notifications info for global notifications.
465 */
TEST_F(vm,vm_notifications_info_get_global)466 TEST_F(vm, vm_notifications_info_get_global)
467 {
468 ffa_notifications_bitmap_t to_set = 0xFU;
469 ffa_notifications_bitmap_t got;
470
471 /**
472 * Following set of variables that are also expected to be used when
473 * handling FFA_NOTIFICATION_INFO_GET.
474 */
475 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
476 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
477 uint32_t ids_count = 0;
478 uint32_t lists_count = 0;
479 enum notifications_info_get_state current_state = INIT;
480
481 CHECK(vm_get_count() >= 2);
482
483 for (unsigned int i = 0; i < 2; i++) {
484 struct_vm *current_vm = vm_find_index(0);
485 struct vm_locked current_vm_locked = vm_lock(current_vm);
486 struct notifications *notifications =
487 ¤t_vm->notifications.from_sp;
488 const bool is_from_vm = false;
489
490 vm_notifications_partition_set_pending(
491 current_vm_locked, is_from_vm, to_set, 0, false);
492
493 vm_notifications_info_get_pending(
494 current_vm_locked, is_from_vm, ids, &ids_count,
495 lists_sizes, &lists_count,
496 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, ¤t_state);
497
498 /*
499 * Here the number of IDs and list count should be the same.
500 * As we are testing with Global notifications, this is
501 * expected.
502 */
503 EXPECT_EQ(ids_count, i + 1);
504 EXPECT_EQ(lists_count, i + 1);
505 EXPECT_EQ(lists_sizes[i], 0);
506 EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
507
508 /* Action must be reset to initial state for each VM. */
509 current_state = INIT;
510
511 /*
512 * Check that getting pending notifications gives the expected
513 * return and cleans the 'pending' and 'info_get_retrieved'
514 * bitmaps.
515 */
516 got = vm_notifications_partition_get_pending(current_vm_locked,
517 is_from_vm, 0);
518 EXPECT_EQ(got, to_set);
519
520 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
521 EXPECT_EQ(notifications->global.pending, 0U);
522
523 vm_unlock(¤t_vm_locked);
524 }
525 }
526
527 /**
528 * Validates simple getting of notifications info for per-vCPU notifications.
529 */
TEST_F(vm,vm_notifications_info_get_per_vcpu)530 TEST_F(vm, vm_notifications_info_get_per_vcpu)
531 {
532 const ffa_notifications_bitmap_t per_vcpu = 0xFU;
533 ffa_notifications_bitmap_t got;
534
535 /*
536 * Following set of variables that are also expected to be used when
537 * handling ffa_notification_info_get.
538 */
539 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
540 uint32_t ids_count = 0;
541 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
542 uint32_t lists_count = 0;
543 enum notifications_info_get_state current_state = INIT;
544
545 CHECK(vm_get_count() >= 2);
546
547 for (unsigned int i = 0; i < 2; i++) {
548 struct_vm *current_vm = vm_find_index(0);
549 struct vm_locked current_vm_locked = vm_lock(current_vm);
550 struct notifications *notifications =
551 ¤t_vm->notifications.from_sp;
552 const bool is_from_vm = false;
553
554 vm_notifications_partition_set_pending(
555 current_vm_locked, is_from_vm, per_vcpu, 0, true);
556
557 vm_notifications_info_get_pending(
558 current_vm_locked, is_from_vm, ids, &ids_count,
559 lists_sizes, &lists_count,
560 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, ¤t_state);
561
562 /*
563 * Here the number of IDs and list count should be the same.
564 * As we are testing with Global notifications, this is
565 * expected.
566 */
567 EXPECT_EQ(ids_count, (i + 1) * 2);
568 EXPECT_EQ(lists_count, i + 1);
569 EXPECT_EQ(lists_sizes[i], 1);
570 EXPECT_EQ(per_vcpu,
571 notifications->per_vcpu[0].info_get_retrieved);
572
573 /* Action must be reset to initial state for each VM. */
574 current_state = INIT;
575
576 /*
577 * Check that getting pending notifications gives the expected
578 * return and cleans the 'pending' and 'info_get_retrieved'
579 * bitmaps.
580 */
581 got = vm_notifications_partition_get_pending(current_vm_locked,
582 is_from_vm, 0);
583 EXPECT_EQ(got, per_vcpu);
584
585 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
586 EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
587
588 vm_unlock(¤t_vm_locked);
589 }
590 }
591
592 /**
593 * Validate getting of notifications information if all VCPUs have notifications
594 * pending.
595 */
TEST_F(vm,vm_notifications_info_get_per_vcpu_all_vcpus)596 TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
597 {
598 struct_vm *current_vm = nullptr;
599 struct vm_locked current_vm_locked;
600 const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
601 ffa_notifications_bitmap_t got;
602 const ffa_notifications_bitmap_t global = 0xF0000;
603
604 /*
605 * Following set of variables that are also expected to be used when
606 * handling ffa_notification_info_get.
607 */
608 struct notifications *notifications;
609 const bool is_from_sp = false;
610 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
611 uint32_t ids_count = 0;
612 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
613 uint32_t lists_count = 0;
614 enum notifications_info_get_state current_state = INIT;
615
616 EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, ¤t_vm, false, 0));
617 current_vm_locked = vm_lock(current_vm);
618 notifications = ¤t_vm->notifications.from_sp;
619
620 for (unsigned int i = 0; i < vcpu_count; i++) {
621 vm_notifications_partition_set_pending(
622 current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
623 i, true);
624 }
625
626 /*
627 * Adding a global notification should not change the list of IDs,
628 * because global notifications only require the VM ID to be included in
629 * the list, at least once.
630 */
631 vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
632 global, 0, false);
633
634 vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
635 &ids_count, lists_sizes, &lists_count,
636 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
637 ¤t_state);
638
639 /*
640 * This test has been conceived for the expected MAX_CPUS 4.
641 * All VCPUs have notifications of the same VM, to be broken down in 2
642 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
643 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
644 */
645 CHECK(MAX_CPUS == 4);
646 EXPECT_EQ(ids_count, 6U);
647 EXPECT_EQ(lists_count, 2U);
648 EXPECT_EQ(lists_sizes[0], 3);
649 EXPECT_EQ(lists_sizes[1], 1);
650
651 for (unsigned int i = 0; i < vcpu_count; i++) {
652 got = vm_notifications_partition_get_pending(current_vm_locked,
653 is_from_sp, i);
654
655 /*
656 * The first call to
657 * vm_notifications_partition_get_pending should also
658 * include the global notifications on the return.
659 */
660 ffa_notifications_bitmap_t to_check =
661 (i != 0) ? FFA_NOTIFICATION_MASK(i)
662 : FFA_NOTIFICATION_MASK(i) | global;
663
664 EXPECT_EQ(got, to_check);
665
666 EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
667 EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
668 }
669
670 vm_unlock(¤t_vm_locked);
671 }
672
673 /**
674 * Validate change of state from 'vm_notifications_info_get_pending', when the
675 * list of IDs is full.
676 */
TEST_F(vm,vm_notifications_info_get_full_per_vcpu)677 TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
678 {
679 struct_vm *current_vm = vm_find_index(0);
680 struct vm_locked current_vm_locked = vm_lock(current_vm);
681 struct notifications *notifications =
682 ¤t_vm->notifications.from_sp;
683 const bool is_from_vm = false;
684 ffa_notifications_bitmap_t got = 0;
685
686 /*
687 * Following set of variables that are also expected to be used when
688 * handling ffa_notification_info_get.
689 * For this 'ids_count' has been initialized such that it indicates
690 * there is no space in the list for a per-vCPU notification (VM ID and
691 * VCPU ID).
692 */
693 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
694 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
695 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
696 uint32_t lists_count = 10;
697 enum notifications_info_get_state current_state = INIT;
698 CHECK(vm_get_count() >= 2);
699
700 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
701 FFA_NOTIFICATION_MASK(1), 0,
702 true);
703
704 /* Call function to get notifications info, with only per-vCPU set. */
705 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
706 &ids_count, lists_sizes, &lists_count,
707 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
708 ¤t_state);
709
710 /*
711 * Verify that as soon as there isn't space to do the required
712 * insertion in the list, the
713 * 'vm_notifications_partition_get_pending' returns and changes
714 * list state to FULL. In this case returning, because it would need to
715 * add two IDs (VM ID and VCPU ID).
716 */
717 EXPECT_EQ(current_state, FULL);
718 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
719 EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
720
721 /*
722 * At this point there is still room for the information of a global
723 * notification (only VM ID to be added). Reset 'current_state'
724 * for the insertion to happen at the last position of the array.
725 */
726 current_state = INIT;
727
728 /* Setting global notification */
729 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
730 FFA_NOTIFICATION_MASK(2), 0,
731 false);
732
733 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
734 &ids_count, lists_sizes, &lists_count,
735 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
736 ¤t_state);
737
738 /*
739 * Now List must be full, the set global notification must be part of
740 * 'info_get_retrieved', and the 'current_state' should be set to FULL
741 * due to the pending per-vCPU notification in VCPU 0.
742 */
743 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
744 EXPECT_EQ(current_state, FULL);
745 EXPECT_EQ(notifications->global.info_get_retrieved,
746 FFA_NOTIFICATION_MASK(2));
747
748 got = vm_notifications_partition_get_pending(current_vm_locked,
749 is_from_vm, 0);
750 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
751
752 vm_unlock(¤t_vm_locked);
753 }
754
TEST_F(vm,vm_notifications_info_get_full_global)755 TEST_F(vm, vm_notifications_info_get_full_global)
756 {
757 struct_vm *current_vm = vm_find_index(0);
758 struct vm_locked current_vm_locked = vm_lock(current_vm);
759 ffa_notifications_bitmap_t got;
760 struct notifications *notifications;
761 const bool is_from_vm = false;
762 /*
763 * Following set of variables that are also expected to be used when
764 * handling ffa_notification_info_get.
765 * For this 'ids_count' has been initialized such that it indicates
766 * there is no space in the list for a global notification (VM ID only).
767 */
768 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
769 uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
770 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
771 uint32_t lists_count = 10;
772 enum notifications_info_get_state current_state = INIT;
773
774 CHECK(vm_get_count() >= 1);
775
776 current_vm = vm_find_index(0);
777
778 notifications = ¤t_vm->notifications.from_sp;
779
780 /* Set global notification. */
781 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
782 FFA_NOTIFICATION_MASK(10), 0,
783 false);
784
785 /* Get notifications info for the given notifications. */
786 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
787 &ids_count, lists_sizes, &lists_count,
788 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
789 ¤t_state);
790
791 /* Expect 'info_get_retrieved' bitmap to be 0. */
792 EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
793 EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
794 EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
795 EXPECT_EQ(current_state, FULL);
796
797 got = vm_notifications_partition_get_pending(current_vm_locked,
798 is_from_vm, 0);
799 EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
800
801 vm_unlock(¤t_vm_locked);
802 }
803
TEST_F(vm,vm_notifications_info_get_from_framework)804 TEST_F(vm, vm_notifications_info_get_from_framework)
805 {
806 struct vm_locked vm_locked = vm_lock(vm_find_index(0));
807 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
808 uint32_t ids_count = 0;
809 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
810 uint32_t lists_count = 0;
811
812 vm_notifications_framework_set_pending(vm_locked, 0x1U);
813
814 /* Get notifications info for the given notifications. */
815 vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
816 &lists_count,
817 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
818
819 EXPECT_EQ(ids[0], vm_locked.vm->id);
820 EXPECT_EQ(ids_count, 1);
821 EXPECT_EQ(lists_sizes[0], 0);
822 EXPECT_EQ(lists_count, 1);
823
824 EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
825
826 vm_unlock(&vm_locked);
827 }
828
829 /**
830 * Validates simple getting of notifications info for pending IPI.
831 * Also checks that vCPUs with pending IPIs are only reported if the
832 * vCPU is in the waiting state.
833 */
TEST_F(vm,vm_notifications_info_get_ipi)834 TEST_F(vm, vm_notifications_info_get_ipi)
835 {
836 /*
837 * Following set of variables that are also expected to be used when
838 * handling ffa_notification_info_get.
839 */
840 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
841 uint32_t ids_count = 0;
842 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
843 uint32_t lists_count = 0;
844 enum notifications_info_get_state current_state = INIT;
845 struct_vm *current_vm = vm_find_index(4);
846 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
847 struct vcpu_locked vcpu_locked;
848 const bool is_from_vm = false;
849 struct vm_locked current_vm_locked = vm_lock(current_vm);
850
851 EXPECT_TRUE(current_vm->vcpu_count >= 2);
852
853 vcpu_locked = vcpu_lock(target_vcpu);
854 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
855 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
856 vcpu_unlock(&vcpu_locked);
857
858 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
859 &ids_count, lists_sizes, &lists_count,
860 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
861 ¤t_state);
862
863 EXPECT_EQ(ids_count, 0);
864 EXPECT_EQ(lists_count, 0);
865
866 target_vcpu->state = VCPU_STATE_WAITING;
867
868 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
869 &ids_count, lists_sizes, &lists_count,
870 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
871 ¤t_state);
872
873 EXPECT_EQ(ids_count, 2);
874 EXPECT_EQ(lists_count, 1);
875 EXPECT_EQ(lists_sizes[0], 1);
876 EXPECT_EQ(ids[0], current_vm->id);
877 EXPECT_EQ(ids[1], 1);
878 EXPECT_EQ(target_vcpu->interrupts_info_get_retrieved, true);
879
880 /* Check it is not retrieved multiple times. */
881 current_state = INIT;
882 ids[0] = 0;
883 ids[1] = 0;
884 ids_count = 0;
885 lists_sizes[0] = 0;
886 lists_count = 0;
887
888 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
889 &ids_count, lists_sizes, &lists_count,
890 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
891 ¤t_state);
892 EXPECT_EQ(ids_count, 0);
893 EXPECT_EQ(lists_count, 0);
894 EXPECT_EQ(lists_sizes[0], 0);
895
896 vcpu_locked = vcpu_lock(target_vcpu);
897
898 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
899 HF_IPI_INTID);
900 EXPECT_FALSE(vcpu_locked.vcpu->interrupts_info_get_retrieved);
901
902 vcpu_unlock(&vcpu_locked);
903
904 vm_unlock(¤t_vm_locked);
905 }
906
907 /**
908 * Validates simple getting of notifications info for pending with IPI when
909 * notification for the same vcpu is also pending.
910 */
TEST_F(vm,vm_notifications_info_get_ipi_with_per_vcpu)911 TEST_F(vm, vm_notifications_info_get_ipi_with_per_vcpu)
912 {
913 /*
914 * Following set of variables that are also expected to be used when
915 * handling ffa_notification_info_get.
916 */
917 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
918 uint32_t ids_count = 0;
919 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
920 uint32_t lists_count = 0;
921 enum notifications_info_get_state current_state = INIT;
922 struct_vm *current_vm = vm_find_index(4);
923 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 1);
924 struct vcpu_locked vcpu_locked;
925 const bool is_from_vm = false;
926 struct vm_locked current_vm_locked = vm_lock(current_vm);
927
928 EXPECT_TRUE(current_vm->vcpu_count >= 2);
929
930 vcpu_locked = vcpu_lock(target_vcpu);
931 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
932 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
933 vcpu_unlock(&vcpu_locked);
934
935 vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
936 true, 1, true);
937 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
938 &ids_count, lists_sizes, &lists_count,
939 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
940 ¤t_state);
941
942 EXPECT_EQ(ids_count, 2);
943 EXPECT_EQ(lists_count, 1);
944 EXPECT_EQ(lists_sizes[0], 1);
945 EXPECT_EQ(ids[0], current_vm->id);
946 EXPECT_EQ(ids[1], 1);
947 EXPECT_EQ(target_vcpu->interrupts_info_get_retrieved, true);
948
949 /* Reset the state and values. */
950 current_state = INIT;
951 ids[0] = 0;
952 ids[1] = 0;
953 ids_count = 0;
954 lists_sizes[0] = 0;
955 lists_count = 0;
956
957 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
958 &ids_count, lists_sizes, &lists_count,
959 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
960 ¤t_state);
961 EXPECT_EQ(ids_count, 0);
962 EXPECT_EQ(lists_count, 0);
963 EXPECT_EQ(lists_sizes[0], 0);
964
965 vcpu_locked = vcpu_lock(target_vcpu);
966 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
967 HF_IPI_INTID);
968 vcpu_unlock(&vcpu_locked);
969
970 vm_unlock(¤t_vm_locked);
971 }
972
973 /**
974 * Validate that a mix of a pending IPI and notifcations are correctly
975 * reported across vcpus.
976 */
TEST_F(vm,vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)977 TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus_and_ipi)
978 {
979 struct_vm *current_vm = vm_find_index(4);
980 ffa_vcpu_count_t vcpu_count = current_vm->vcpu_count;
981 CHECK(vcpu_count > 1);
982
983 struct vm_locked current_vm_locked = vm_lock(current_vm);
984
985 /*
986 * Following set of variables that are also expected to be used when
987 * handling ffa_notification_info_get.
988 */
989 const bool is_from_vm = false;
990 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
991 uint32_t ids_count = 0;
992 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
993 uint32_t lists_count = 0;
994 enum notifications_info_get_state current_state = INIT;
995 struct vcpu *target_vcpu = vm_get_vcpu(current_vm, 0);
996 struct vcpu_locked vcpu_locked;
997
998 target_vcpu->state = VCPU_STATE_WAITING;
999
1000 vcpu_locked = vcpu_lock(target_vcpu);
1001 vcpu_virt_interrupt_inject(vcpu_locked, HF_IPI_INTID);
1002 vcpu_virt_interrupt_enable(vcpu_locked, HF_IPI_INTID, true);
1003 vcpu_unlock(&vcpu_locked);
1004
1005 for (unsigned int i = 1; i < vcpu_count; i++) {
1006 vm_notifications_partition_set_pending(
1007 current_vm_locked, is_from_vm, FFA_NOTIFICATION_MASK(i),
1008 i, true);
1009 }
1010
1011 vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
1012 &ids_count, lists_sizes, &lists_count,
1013 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
1014 ¤t_state);
1015
1016 /*
1017 * This test has been conceived for the expected MAX_CPUS 4.
1018 * All VCPUs have notifications of the same VM, to be broken down in 2
1019 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
1020 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
1021 */
1022 EXPECT_EQ(ids_count, 6U);
1023 EXPECT_EQ(lists_count, 2U);
1024 EXPECT_EQ(lists_sizes[0], 3);
1025 EXPECT_EQ(lists_sizes[1], 1);
1026 EXPECT_EQ(ids[0], current_vm->id);
1027 EXPECT_EQ(ids[1], 0);
1028 EXPECT_EQ(ids[2], 1);
1029 EXPECT_EQ(ids[3], 2);
1030 EXPECT_EQ(ids[4], current_vm->id);
1031 EXPECT_EQ(ids[5], 3);
1032
1033 vcpu_locked = vcpu_lock(target_vcpu);
1034 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
1035 HF_IPI_INTID);
1036 vcpu_unlock(&vcpu_locked);
1037
1038 vm_unlock(¤t_vm_locked);
1039 }
1040
TEST_F(vm,pending_interrupts_info_retrieved)1041 TEST_F(vm, pending_interrupts_info_retrieved)
1042 {
1043 struct_vm *test_vm = vm_find_index(4);
1044 struct_vcpu *vcpu = vm_get_vcpu(test_vm, 1);
1045 const uint32_t intid = HF_NUM_INTIDS - 2;
1046 struct vm_locked test_vm_locked;
1047 struct vcpu_locked vcpu_locked;
1048
1049 /*
1050 *
1051 * Following set of variables that are also expected to be used when
1052 * handling ffa_notification_info_get.
1053 * For this 'ids_count' has been initialized such that it indicates
1054 * there is no space in the list for a per-vCPU notification (VM ID and
1055 * VCPU ID).
1056 */
1057 uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
1058 uint32_t ids_count = 0;
1059 uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
1060 uint32_t lists_count = 0;
1061 enum notifications_info_get_state current_state = INIT;
1062
1063 /*
1064 * Make it such the FF-A and vCPU ID are included in the list,
1065 * when invoking notification info get.
1066 */
1067 test_vm->sri_policy.intr_while_waiting = true;
1068
1069 vcpu_locked = vcpu_lock(vcpu);
1070
1071 /* Check this is starting from a clean state. */
1072 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 0);
1073 EXPECT_FALSE(vcpu->interrupts_info_get_retrieved);
1074
1075 /* Enable and get pending. */
1076 vcpu_virt_interrupt_enable(vcpu_locked, intid, true);
1077
1078 vcpu_virt_interrupt_inject(vcpu_locked, intid);
1079
1080 vcpu->state = VCPU_STATE_WAITING;
1081
1082 EXPECT_EQ(vcpu_virt_interrupt_count_get(vcpu_locked), 1);
1083
1084 /* Free resource. */
1085 vcpu_unlock(&vcpu_locked);
1086
1087 test_vm_locked = vm_lock(test_vm);
1088
1089 vm_notifications_info_get_pending(test_vm_locked, true, ids, &ids_count,
1090 lists_sizes, &lists_count,
1091 FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
1092 ¤t_state);
1093
1094 /* Assert the information flag as been retrieved. */
1095 EXPECT_TRUE(vcpu->interrupts_info_get_retrieved);
1096
1097 vm_unlock(&test_vm_locked);
1098
1099 /* Pop to clear test and attest intid is returned. */
1100 vcpu_locked = vcpu_lock(vcpu);
1101
1102 EXPECT_EQ(vcpu_virt_interrupt_get_pending_and_enabled(vcpu_locked),
1103 intid);
1104
1105 EXPECT_FALSE(vcpu_locked.vcpu->interrupts_info_get_retrieved);
1106
1107 vcpu_unlock(&vcpu_locked);
1108 }
1109 } /* namespace */
1110