1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include <gmock/gmock.h>
10 
11 extern "C" {
12 #include "hf/check.h"
13 #include "hf/mpool.h"
14 #include "hf/vm.h"
15 }
16 
17 #include <list>
18 #include <memory>
19 #include <span>
20 #include <vector>
21 
22 #include "mm_test.hh"
23 
24 namespace
25 {
26 using namespace ::std::placeholders;
27 
28 using ::testing::AllOf;
29 using ::testing::Each;
30 using ::testing::SizeIs;
31 
32 using struct_vm = struct vm;
33 using struct_vcpu = struct vcpu;
34 using struct_vm_locked = struct vm_locked;
35 
36 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 64;
37 const int TOP_LEVEL = arch_mm_stage2_max_level();
38 
39 class vm : public ::testing::Test
40 {
41        protected:
42 	static std::unique_ptr<uint8_t[]> test_heap;
43 
44 	struct mpool ppool;
45 
SetUp()46 	void SetUp() override
47 	{
48 		if (!test_heap) {
49 			/*
50 			 * TODO: replace with direct use of stdlib allocator so
51 			 * sanitizers are more effective.
52 			 */
53 			test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
54 			mpool_init(&ppool, sizeof(struct mm_page_table));
55 			mpool_add_chunk(&ppool, test_heap.get(),
56 					TEST_HEAP_SIZE);
57 		}
58 	}
59 
60        public:
BootOrderSmallerThan(struct_vm * vm1,struct_vm * vm2)61 	static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
62 	{
63 		return vm1->boot_order < vm2->boot_order;
64 	}
65 };
66 
67 std::unique_ptr<uint8_t[]> vm::test_heap;
68 
69 /**
70  * If nothing is mapped, unmapping the hypervisor has no effect.
71  */
TEST_F(vm,vm_unmap_hypervisor_not_mapped)72 TEST_F(vm, vm_unmap_hypervisor_not_mapped)
73 {
74 	struct_vm *vm;
75 	struct vm_locked vm_locked;
76 
77 	/* TODO: check ptable usage (security state?) */
78 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false, 0));
79 	vm_locked = vm_lock(vm);
80 	ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
81 	EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
82 	EXPECT_THAT(
83 		mm_test::get_ptable(vm->ptable),
84 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
85 	mm_vm_fini(&vm->ptable, &ppool);
86 	vm_unlock(&vm_locked);
87 }
88 
89 /**
90  * Validate the "boot_list" is created properly, according to vm's "boot_order"
91  * field.
92  */
TEST_F(vm,vm_boot_order)93 TEST_F(vm, vm_boot_order)
94 {
95 	struct_vm *vm_cur;
96 	struct_vcpu *vcpu;
97 	std::list<struct_vm *> expected_final_order;
98 
99 	EXPECT_TRUE(vcpu_get_boot_vcpu() == NULL);
100 
101 	/*
102 	 * Insertion when no call to "vcpu_update_boot" has been made yet.
103 	 * The "boot_list" is expected to be empty.
104 	 */
105 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
106 	vm_cur->boot_order = 3;
107 	vcpu = vm_get_vcpu(vm_cur, 0);
108 	vcpu_update_boot(vcpu);
109 	expected_final_order.push_back(vm_cur);
110 
111 	EXPECT_EQ(vcpu_get_boot_vcpu()->vm->id, vm_cur->id);
112 
113 	/* Insertion at the head of the boot list */
114 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
115 	vm_cur->boot_order = 1;
116 	vcpu = vm_get_vcpu(vm_cur, 0);
117 	vcpu_update_boot(vcpu);
118 	expected_final_order.push_back(vm_cur);
119 
120 	EXPECT_EQ(vcpu_get_boot_vcpu()->vm->id, vm_cur->id);
121 
122 	/* Insertion of two in the middle of the boot list */
123 	for (uint32_t i = 0; i < 2; i++) {
124 		EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false, 0));
125 		vm_cur->boot_order = 2;
126 		vcpu = vm_get_vcpu(vm_cur, 0);
127 		vcpu_update_boot(vcpu);
128 		expected_final_order.push_back(vm_cur);
129 	}
130 
131 	/*
132 	 * Insertion in the end of the list.
133 	 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
134 	 * As such, a VM is expected to have been initialized before this
135 	 * test, with ID 1 and boot_order 0.
136 	 */
137 	vm_cur = vm_find(1);
138 	EXPECT_FALSE(vm_cur == NULL);
139 	vcpu = vm_get_vcpu(vm_cur, 0);
140 	vcpu_update_boot(vcpu);
141 	expected_final_order.push_back(vm_cur);
142 
143 	/*
144 	 * Number of VMs initialized should be the same as in the
145 	 * "expected_final_order", before the final verification.
146 	 */
147 	EXPECT_EQ(expected_final_order.size(), vm_get_count())
148 		<< "Something went wrong with the test itself...\n";
149 
150 	/* Sort VMs from lower to higher "boot_order" field.*/
151 	expected_final_order.sort(vm::BootOrderSmallerThan);
152 
153 	std::list<struct_vm *>::iterator it;
154 	vcpu = vcpu_get_boot_vcpu();
155 	for (it = expected_final_order.begin();
156 	     it != expected_final_order.end(); it++) {
157 		EXPECT_TRUE(vcpu != NULL);
158 		EXPECT_EQ((*it)->id, vcpu->vm->id);
159 		vcpu = vcpu->next_boot;
160 	}
161 }
162 
163 /**
164  * Validates updates and check functions for binding notifications to endpoints.
165  */
TEST_F(vm,vm_notifications_bind_diff_senders)166 TEST_F(vm, vm_notifications_bind_diff_senders)
167 {
168 	struct_vm *current_vm = nullptr;
169 	struct vm_locked current_vm_locked;
170 	std::vector<struct_vm *> dummy_senders;
171 	ffa_notifications_bitmap_t bitmaps[] = {
172 		0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
173 	bool is_from_vm = true;
174 
175 	/* For the subsequent tests three VMs are used. */
176 	CHECK(vm_get_count() >= 3);
177 
178 	current_vm = vm_find_index(0);
179 
180 	dummy_senders.push_back(vm_find_index(1));
181 	dummy_senders.push_back(vm_find_index(2));
182 
183 	current_vm_locked = vm_lock(current_vm);
184 
185 	for (unsigned int i = 0; i < 2; i++) {
186 		/* Validate bindings condition after initialization. */
187 		EXPECT_TRUE(vm_notifications_validate_binding(
188 			current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
189 			bitmaps[i], false));
190 
191 		/*
192 		 * Validate bind related operations. For this test considering
193 		 * only global notifications.
194 		 */
195 		vm_notifications_update_bindings(current_vm_locked, is_from_vm,
196 						 dummy_senders[i]->id,
197 						 bitmaps[i], false);
198 
199 		EXPECT_TRUE(vm_notifications_validate_binding(
200 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
201 			bitmaps[i], false));
202 
203 		EXPECT_FALSE(vm_notifications_validate_binding(
204 			current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
205 			bitmaps[i], false));
206 
207 		EXPECT_FALSE(vm_notifications_validate_binding(
208 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
209 			bitmaps[1 - i], false));
210 
211 		EXPECT_FALSE(vm_notifications_validate_binding(
212 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
213 			bitmaps[2], false));
214 	}
215 
216 	/** Clean up bind for other tests. */
217 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
218 					 bitmaps[0], false);
219 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
220 					 bitmaps[1], false);
221 
222 	vm_unlock(&current_vm_locked);
223 }
224 
225 /**
226  * Validates updates and check functions for binding notifications, namely the
227  * configuration of bindings of global and per-vCPU notifications.
228  */
TEST_F(vm,vm_notification_bind_per_vcpu_vs_global)229 TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
230 {
231 	struct_vm *current_vm;
232 	struct vm_locked current_vm_locked;
233 	struct_vm *dummy_sender;
234 	ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
235 	ffa_notifications_bitmap_t per_vcpu = ~global;
236 	bool is_from_vm = true;
237 
238 	CHECK(vm_get_count() >= 2);
239 
240 	current_vm = vm_find_index(0);
241 
242 	dummy_sender = vm_find_index(1);
243 
244 	current_vm_locked = vm_lock(current_vm);
245 
246 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
247 					 dummy_sender->id, global, false);
248 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
249 					 dummy_sender->id, per_vcpu, true);
250 
251 	/* Check validation of global notifications bindings. */
252 	EXPECT_TRUE(vm_notifications_validate_binding(
253 		current_vm_locked, is_from_vm, dummy_sender->id, global,
254 		false));
255 
256 	/* Check validation of per-vCPU notifications bindings. */
257 	EXPECT_TRUE(vm_notifications_validate_binding(
258 		current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
259 		true));
260 
261 	/**
262 	 * Check that global notifications are not validated as per-vCPU, and
263 	 * vice-versa.
264 	 */
265 	EXPECT_FALSE(vm_notifications_validate_binding(
266 		current_vm_locked, is_from_vm, dummy_sender->id, global, true));
267 	EXPECT_FALSE(vm_notifications_validate_binding(
268 		current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
269 		false));
270 	EXPECT_FALSE(vm_notifications_validate_binding(
271 		current_vm_locked, is_from_vm, dummy_sender->id,
272 		global | per_vcpu, true));
273 	EXPECT_FALSE(vm_notifications_validate_binding(
274 		current_vm_locked, is_from_vm, dummy_sender->id,
275 		global | per_vcpu, false));
276 
277 	/** Undo the bindings */
278 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
279 					 global, false);
280 	EXPECT_TRUE(vm_notifications_validate_binding(
281 		current_vm_locked, is_from_vm, 0, global, false));
282 
283 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
284 					 per_vcpu, false);
285 	EXPECT_TRUE(vm_notifications_validate_binding(
286 		current_vm_locked, is_from_vm, 0, per_vcpu, false));
287 
288 	vm_unlock(&current_vm_locked);
289 }
290 
291 /**
292  * Validates accesses to notifications bitmaps.
293  */
TEST_F(vm,vm_notifications_set_and_get)294 TEST_F(vm, vm_notifications_set_and_get)
295 {
296 	struct_vm *current_vm;
297 	struct vm_locked current_vm_locked;
298 	struct_vm *dummy_sender;
299 	ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
300 	ffa_notifications_bitmap_t per_vcpu = ~global;
301 	ffa_notifications_bitmap_t ret;
302 	const unsigned int vcpu_idx = 0;
303 	struct notifications *notifications;
304 	const bool is_from_vm = true;
305 
306 	CHECK(vm_get_count() >= 2);
307 
308 	current_vm = vm_find_index(0);
309 	dummy_sender = vm_find_index(1);
310 
311 	notifications = &current_vm->notifications.from_vm;
312 	current_vm_locked = vm_lock(current_vm);
313 
314 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
315 					 dummy_sender->id, global, false);
316 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
317 					 dummy_sender->id, per_vcpu, true);
318 
319 	/*
320 	 * Validate get notifications bitmap for global notifications.
321 	 */
322 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
323 					       global, 0ull, false);
324 
325 	ret = vm_notifications_partition_get_pending(current_vm_locked,
326 						     is_from_vm, 0ull);
327 	EXPECT_EQ(ret, global);
328 	EXPECT_EQ(notifications->global.pending, 0ull);
329 
330 	/*
331 	 * Validate get notifications bitmap for per-vCPU notifications.
332 	 */
333 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
334 					       per_vcpu, vcpu_idx, true);
335 
336 	ret = vm_notifications_partition_get_pending(current_vm_locked,
337 						     is_from_vm, vcpu_idx);
338 	EXPECT_EQ(ret, per_vcpu);
339 	EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
340 
341 	/*
342 	 * Validate that getting notifications for a specific vCPU also returns
343 	 * global notifications.
344 	 */
345 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
346 					       per_vcpu, vcpu_idx, true);
347 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
348 					       global, 0ull, false);
349 
350 	ret = vm_notifications_partition_get_pending(current_vm_locked,
351 						     is_from_vm, vcpu_idx);
352 	EXPECT_EQ(ret, per_vcpu | global);
353 	EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
354 	EXPECT_EQ(notifications->global.pending, 0ull);
355 
356 	/** Undo the binding */
357 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
358 					 global, false);
359 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
360 					 per_vcpu, true);
361 	vm_unlock(&current_vm_locked);
362 }
363 
364 /**
365  * Validates simple getting of notifications info for global notifications.
366  */
TEST_F(vm,vm_notifications_info_get_global)367 TEST_F(vm, vm_notifications_info_get_global)
368 {
369 	ffa_notifications_bitmap_t to_set = 0xFU;
370 	ffa_notifications_bitmap_t got;
371 
372 	/**
373 	 * Following set of variables that are also expected to be used when
374 	 * handling FFA_NOTIFICATION_INFO_GET.
375 	 */
376 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
377 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
378 	uint32_t ids_count = 0;
379 	uint32_t lists_count = 0;
380 	enum notifications_info_get_state current_state = INIT;
381 
382 	CHECK(vm_get_count() >= 2);
383 
384 	for (unsigned int i = 0; i < 2; i++) {
385 		struct_vm *current_vm = vm_find_index(0);
386 		struct vm_locked current_vm_locked = vm_lock(current_vm);
387 		struct notifications *notifications =
388 			&current_vm->notifications.from_sp;
389 		const bool is_from_vm = false;
390 
391 		vm_notifications_partition_set_pending(
392 			current_vm_locked, is_from_vm, to_set, 0, false);
393 
394 		vm_notifications_info_get_pending(
395 			current_vm_locked, is_from_vm, ids, &ids_count,
396 			lists_sizes, &lists_count,
397 			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
398 
399 		/*
400 		 * Here the number of IDs and list count should be the same.
401 		 * As we are testing with Global notifications, this is
402 		 * expected.
403 		 */
404 		EXPECT_EQ(ids_count, i + 1);
405 		EXPECT_EQ(lists_count, i + 1);
406 		EXPECT_EQ(lists_sizes[i], 0);
407 		EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
408 
409 		/* Action must be reset to initial state for each VM. */
410 		current_state = INIT;
411 
412 		/*
413 		 * Check that getting pending notifications gives the expected
414 		 * return and cleans the 'pending' and 'info_get_retrieved'
415 		 * bitmaps.
416 		 */
417 		got = vm_notifications_partition_get_pending(current_vm_locked,
418 							     is_from_vm, 0);
419 		EXPECT_EQ(got, to_set);
420 
421 		EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
422 		EXPECT_EQ(notifications->global.pending, 0U);
423 
424 		vm_unlock(&current_vm_locked);
425 	}
426 }
427 
428 /**
429  * Validates simple getting of notifications info for per-vCPU notifications.
430  */
TEST_F(vm,vm_notifications_info_get_per_vcpu)431 TEST_F(vm, vm_notifications_info_get_per_vcpu)
432 {
433 	const ffa_notifications_bitmap_t per_vcpu = 0xFU;
434 	ffa_notifications_bitmap_t got;
435 
436 	/*
437 	 * Following set of variables that are also expected to be used when
438 	 * handling ffa_notification_info_get.
439 	 */
440 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
441 	uint32_t ids_count = 0;
442 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
443 	uint32_t lists_count = 0;
444 	enum notifications_info_get_state current_state = INIT;
445 
446 	CHECK(vm_get_count() >= 2);
447 
448 	for (unsigned int i = 0; i < 2; i++) {
449 		struct_vm *current_vm = vm_find_index(0);
450 		struct vm_locked current_vm_locked = vm_lock(current_vm);
451 		struct notifications *notifications =
452 			&current_vm->notifications.from_sp;
453 		const bool is_from_vm = false;
454 
455 		vm_notifications_partition_set_pending(
456 			current_vm_locked, is_from_vm, per_vcpu, 0, true);
457 
458 		vm_notifications_info_get_pending(
459 			current_vm_locked, is_from_vm, ids, &ids_count,
460 			lists_sizes, &lists_count,
461 			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
462 
463 		/*
464 		 * Here the number of IDs and list count should be the same.
465 		 * As we are testing with Global notifications, this is
466 		 * expected.
467 		 */
468 		EXPECT_EQ(ids_count, (i + 1) * 2);
469 		EXPECT_EQ(lists_count, i + 1);
470 		EXPECT_EQ(lists_sizes[i], 1);
471 		EXPECT_EQ(per_vcpu,
472 			  notifications->per_vcpu[0].info_get_retrieved);
473 
474 		/* Action must be reset to initial state for each VM. */
475 		current_state = INIT;
476 
477 		/*
478 		 * Check that getting pending notifications gives the expected
479 		 * return and cleans the 'pending' and 'info_get_retrieved'
480 		 * bitmaps.
481 		 */
482 		got = vm_notifications_partition_get_pending(current_vm_locked,
483 							     is_from_vm, 0);
484 		EXPECT_EQ(got, per_vcpu);
485 
486 		EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
487 		EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
488 
489 		vm_unlock(&current_vm_locked);
490 	}
491 }
492 
493 /**
494  * Validate getting of notifications information if all VCPUs have notifications
495  * pending.
496  */
TEST_F(vm,vm_notifications_info_get_per_vcpu_all_vcpus)497 TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
498 {
499 	struct_vm *current_vm = nullptr;
500 	struct vm_locked current_vm_locked;
501 	const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
502 	ffa_notifications_bitmap_t got;
503 	const ffa_notifications_bitmap_t global = 0xF0000;
504 
505 	/*
506 	 * Following set of variables that are also expected to be used when
507 	 * handling ffa_notification_info_get.
508 	 */
509 	struct notifications *notifications;
510 	const bool is_from_sp = false;
511 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
512 	uint32_t ids_count = 0;
513 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
514 	uint32_t lists_count = 0;
515 	enum notifications_info_get_state current_state = INIT;
516 
517 	EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false, 0));
518 	current_vm_locked = vm_lock(current_vm);
519 	notifications = &current_vm->notifications.from_sp;
520 
521 	for (unsigned int i = 0; i < vcpu_count; i++) {
522 		vm_notifications_partition_set_pending(
523 			current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
524 			i, true);
525 	}
526 
527 	/*
528 	 * Adding a global notification should not change the list of IDs,
529 	 * because global notifications only require the VM ID to be included in
530 	 * the list, at least once.
531 	 */
532 	vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
533 					       global, 0, false);
534 
535 	vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
536 					  &ids_count, lists_sizes, &lists_count,
537 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
538 					  &current_state);
539 
540 	/*
541 	 * This test has been conceived for the expected MAX_CPUS 4.
542 	 * All VCPUs have notifications of the same VM, to be broken down in 2
543 	 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
544 	 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
545 	 */
546 	CHECK(MAX_CPUS == 4);
547 	EXPECT_EQ(ids_count, 6U);
548 	EXPECT_EQ(lists_count, 2U);
549 	EXPECT_EQ(lists_sizes[0], 3);
550 	EXPECT_EQ(lists_sizes[1], 1);
551 
552 	for (unsigned int i = 0; i < vcpu_count; i++) {
553 		got = vm_notifications_partition_get_pending(current_vm_locked,
554 							     is_from_sp, i);
555 
556 		/*
557 		 * The first call to
558 		 * vm_notifications_partition_get_pending should also
559 		 * include the global notifications on the return.
560 		 */
561 		ffa_notifications_bitmap_t to_check =
562 			(i != 0) ? FFA_NOTIFICATION_MASK(i)
563 				 : FFA_NOTIFICATION_MASK(i) | global;
564 
565 		EXPECT_EQ(got, to_check);
566 
567 		EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
568 		EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
569 	}
570 
571 	vm_unlock(&current_vm_locked);
572 }
573 
574 /**
575  * Validate change of state from 'vm_notifications_info_get_pending', when the
576  * list of IDs is full.
577  */
TEST_F(vm,vm_notifications_info_get_full_per_vcpu)578 TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
579 {
580 	struct_vm *current_vm = vm_find_index(0);
581 	struct vm_locked current_vm_locked = vm_lock(current_vm);
582 	struct notifications *notifications =
583 		&current_vm->notifications.from_sp;
584 	const bool is_from_vm = false;
585 	ffa_notifications_bitmap_t got = 0;
586 
587 	/*
588 	 * Following set of variables that are also expected to be used when
589 	 * handling ffa_notification_info_get.
590 	 * For this 'ids_count' has been initialized such that it indicates
591 	 * there is no space in the list for a per-vCPU notification (VM ID and
592 	 * VCPU ID).
593 	 */
594 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
595 	uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
596 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
597 	uint32_t lists_count = 10;
598 	enum notifications_info_get_state current_state = INIT;
599 	CHECK(vm_get_count() >= 2);
600 
601 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
602 					       FFA_NOTIFICATION_MASK(1), 0,
603 					       true);
604 
605 	/* Call function to get notifications info, with only per-vCPU set. */
606 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
607 					  &ids_count, lists_sizes, &lists_count,
608 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
609 					  &current_state);
610 
611 	/*
612 	 * Verify that as soon as there isn't space to do the required
613 	 * insertion in the list, the
614 	 * 'vm_notifications_partition_get_pending' returns and changes
615 	 * list state to FULL. In this case returning, because it would need to
616 	 * add two IDs (VM ID and VCPU ID).
617 	 */
618 	EXPECT_EQ(current_state, FULL);
619 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
620 	EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
621 
622 	/*
623 	 * At this point there is still room for the information of a global
624 	 * notification (only VM ID to be added). Reset 'current_state'
625 	 * for the insertion to happen at the last position of the array.
626 	 */
627 	current_state = INIT;
628 
629 	/* Setting global notification */
630 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
631 					       FFA_NOTIFICATION_MASK(2), 0,
632 					       false);
633 
634 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
635 					  &ids_count, lists_sizes, &lists_count,
636 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
637 					  &current_state);
638 
639 	/*
640 	 * Now List must be full, the set global notification must be part of
641 	 * 'info_get_retrieved', and the 'current_state' should be set to FULL
642 	 * due to the pending per-vCPU notification in VCPU 0.
643 	 */
644 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
645 	EXPECT_EQ(current_state, FULL);
646 	EXPECT_EQ(notifications->global.info_get_retrieved,
647 		  FFA_NOTIFICATION_MASK(2));
648 
649 	got = vm_notifications_partition_get_pending(current_vm_locked,
650 						     is_from_vm, 0);
651 	EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
652 
653 	vm_unlock(&current_vm_locked);
654 }
655 
TEST_F(vm,vm_notifications_info_get_full_global)656 TEST_F(vm, vm_notifications_info_get_full_global)
657 {
658 	struct_vm *current_vm = vm_find_index(0);
659 	struct vm_locked current_vm_locked = vm_lock(current_vm);
660 	ffa_notifications_bitmap_t got;
661 	struct notifications *notifications;
662 	const bool is_from_vm = false;
663 	/*
664 	 * Following set of variables that are also expected to be used when
665 	 * handling ffa_notification_info_get.
666 	 * For this 'ids_count' has been initialized such that it indicates
667 	 * there is no space in the list for a global notification (VM ID only).
668 	 */
669 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
670 	uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
671 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
672 	uint32_t lists_count = 10;
673 	enum notifications_info_get_state current_state = INIT;
674 
675 	CHECK(vm_get_count() >= 1);
676 
677 	current_vm = vm_find_index(0);
678 
679 	notifications = &current_vm->notifications.from_sp;
680 
681 	/* Set global notification. */
682 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
683 					       FFA_NOTIFICATION_MASK(10), 0,
684 					       false);
685 
686 	/* Get notifications info for the given notifications. */
687 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
688 					  &ids_count, lists_sizes, &lists_count,
689 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
690 					  &current_state);
691 
692 	/* Expect 'info_get_retrieved' bitmap to be 0. */
693 	EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
694 	EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
695 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
696 	EXPECT_EQ(current_state, FULL);
697 
698 	got = vm_notifications_partition_get_pending(current_vm_locked,
699 						     is_from_vm, 0);
700 	EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
701 
702 	vm_unlock(&current_vm_locked);
703 }
704 
TEST_F(vm,vm_notifications_info_get_from_framework)705 TEST_F(vm, vm_notifications_info_get_from_framework)
706 {
707 	struct vm_locked vm_locked = vm_lock(vm_find_index(0));
708 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
709 	uint32_t ids_count = 0;
710 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
711 	uint32_t lists_count = 0;
712 
713 	vm_notifications_framework_set_pending(vm_locked, 0x1U);
714 
715 	/* Get notifications info for the given notifications. */
716 	vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
717 				  &lists_count,
718 				  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
719 
720 	EXPECT_EQ(ids[0], vm_locked.vm->id);
721 	EXPECT_EQ(ids_count, 1);
722 	EXPECT_EQ(lists_sizes[0], 0);
723 	EXPECT_EQ(lists_count, 1);
724 
725 	EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
726 
727 	vm_unlock(&vm_locked);
728 }
729 
730 } /* namespace */
731