1 /*
2  * Copyright 2019 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include <gmock/gmock.h>
10 
11 extern "C" {
12 #include "hf/check.h"
13 #include "hf/mpool.h"
14 #include "hf/vm.h"
15 }
16 
17 #include <list>
18 #include <memory>
19 #include <span>
20 #include <vector>
21 
22 #include "mm_test.hh"
23 
24 namespace
25 {
26 using namespace ::std::placeholders;
27 
28 using ::testing::AllOf;
29 using ::testing::Each;
30 using ::testing::SizeIs;
31 
32 using struct_vm = struct vm;
33 using struct_vm_locked = struct vm_locked;
34 
35 constexpr size_t TEST_HEAP_SIZE = PAGE_SIZE * 32;
36 const int TOP_LEVEL = arch_mm_stage2_max_level();
37 
38 class vm : public ::testing::Test
39 {
SetUp()40 	void SetUp() override
41 	{
42 		/*
43 		 * TODO: replace with direct use of stdlib allocator so
44 		 * sanitizers are more effective.
45 		 */
46 		test_heap = std::make_unique<uint8_t[]>(TEST_HEAP_SIZE);
47 		mpool_init(&ppool, sizeof(struct mm_page_table));
48 		mpool_add_chunk(&ppool, test_heap.get(), TEST_HEAP_SIZE);
49 	}
50 
51 	std::unique_ptr<uint8_t[]> test_heap;
52 
53        protected:
54 	struct mpool ppool;
55 
56        public:
BootOrderSmallerThan(struct_vm * vm1,struct_vm * vm2)57 	static bool BootOrderSmallerThan(struct_vm *vm1, struct_vm *vm2)
58 	{
59 		return vm1->boot_order < vm2->boot_order;
60 	}
61 };
62 
63 /**
64  * If nothing is mapped, unmapping the hypervisor has no effect.
65  */
TEST_F(vm,vm_unmap_hypervisor_not_mapped)66 TEST_F(vm, vm_unmap_hypervisor_not_mapped)
67 {
68 	struct_vm *vm;
69 	struct vm_locked vm_locked;
70 
71 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm, false));
72 	vm_locked = vm_lock(vm);
73 	ASSERT_TRUE(mm_vm_init(&vm->ptable, vm->id, &ppool));
74 	EXPECT_TRUE(vm_unmap_hypervisor(vm_locked, &ppool));
75 	EXPECT_THAT(
76 		mm_test::get_ptable(vm->ptable),
77 		AllOf(SizeIs(4), Each(Each(arch_mm_absent_pte(TOP_LEVEL)))));
78 	mm_vm_fini(&vm->ptable, &ppool);
79 	vm_unlock(&vm_locked);
80 }
81 
82 /**
83  * Validate the "boot_list" is created properly, according to vm's "boot_order"
84  * field.
85  */
TEST_F(vm,vm_boot_order)86 TEST_F(vm, vm_boot_order)
87 {
88 	struct_vm *vm_cur;
89 	std::list<struct_vm *> expected_final_order;
90 
91 	EXPECT_FALSE(vm_get_first_boot());
92 
93 	/*
94 	 * Insertion when no call to "vm_update_boot" has been made yet.
95 	 * The "boot_list" is expected to be empty.
96 	 */
97 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
98 	vm_cur->boot_order = 3;
99 	vm_update_boot(vm_cur);
100 	expected_final_order.push_back(vm_cur);
101 
102 	EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
103 
104 	/* Insertion at the head of the boot list */
105 	EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
106 	vm_cur->boot_order = 1;
107 	vm_update_boot(vm_cur);
108 	expected_final_order.push_back(vm_cur);
109 
110 	EXPECT_EQ(vm_get_first_boot()->id, vm_cur->id);
111 
112 	/* Insertion of two in the middle of the boot list */
113 	for (int i = 0; i < 2; i++) {
114 		EXPECT_TRUE(vm_init_next(1, &ppool, &vm_cur, false));
115 		vm_cur->boot_order = 2;
116 		vm_update_boot(vm_cur);
117 		expected_final_order.push_back(vm_cur);
118 	}
119 
120 	/*
121 	 * Insertion in the end of the list.
122 	 * This tests shares the data with "vm_unmap_hypervisor_not_mapped".
123 	 * As such, a VM is expected to have been initialized before this
124 	 * test, with ID 1 and boot_order 0.
125 	 */
126 	vm_cur = vm_find(1);
127 	EXPECT_FALSE(vm_cur == NULL);
128 	vm_update_boot(vm_cur);
129 	expected_final_order.push_back(vm_cur);
130 
131 	/*
132 	 * Number of VMs initialized should be the same as in the
133 	 * "expected_final_order", before the final verification.
134 	 */
135 	EXPECT_EQ(expected_final_order.size(), vm_get_count())
136 		<< "Something went wrong with the test itself...\n";
137 
138 	/* Sort VMs from lower to higher "boot_order" field.*/
139 	expected_final_order.sort(vm::BootOrderSmallerThan);
140 
141 	std::list<struct_vm *>::iterator it;
142 	for (it = expected_final_order.begin(), vm_cur = vm_get_first_boot();
143 	     it != expected_final_order.end() && vm_cur != NULL;
144 	     it++, vm_cur = vm_cur->next_boot) {
145 		EXPECT_EQ((*it)->id, vm_cur->id);
146 	}
147 }
148 
149 /**
150  * Validates updates and check functions for binding notifications to endpoints.
151  */
TEST_F(vm,vm_notifications_bind_diff_senders)152 TEST_F(vm, vm_notifications_bind_diff_senders)
153 {
154 	struct_vm *current_vm = nullptr;
155 	struct vm_locked current_vm_locked;
156 	std::vector<struct_vm *> dummy_senders;
157 	ffa_notifications_bitmap_t bitmaps[] = {
158 		0x00000000FFFFFFFFU, 0xFFFFFFFF00000000U, 0x0000FFFFFFFF0000U};
159 	bool is_from_vm = true;
160 
161 	/* For the subsequent tests three VMs are used. */
162 	CHECK(vm_get_count() >= 3);
163 
164 	current_vm = vm_find_index(0);
165 
166 	dummy_senders.push_back(vm_find_index(1));
167 	dummy_senders.push_back(vm_find_index(2));
168 
169 	current_vm_locked = vm_lock(current_vm);
170 
171 	for (unsigned int i = 0; i < 2; i++) {
172 		/* Validate bindings condition after initialization. */
173 		EXPECT_TRUE(vm_notifications_validate_binding(
174 			current_vm_locked, is_from_vm, HF_INVALID_VM_ID,
175 			bitmaps[i], false));
176 
177 		/*
178 		 * Validate bind related operations. For this test considering
179 		 * only global notifications.
180 		 */
181 		vm_notifications_update_bindings(current_vm_locked, is_from_vm,
182 						 dummy_senders[i]->id,
183 						 bitmaps[i], false);
184 
185 		EXPECT_TRUE(vm_notifications_validate_binding(
186 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
187 			bitmaps[i], false));
188 
189 		EXPECT_FALSE(vm_notifications_validate_binding(
190 			current_vm_locked, is_from_vm, dummy_senders[1 - i]->id,
191 			bitmaps[i], false));
192 
193 		EXPECT_FALSE(vm_notifications_validate_binding(
194 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
195 			bitmaps[1 - i], false));
196 
197 		EXPECT_FALSE(vm_notifications_validate_binding(
198 			current_vm_locked, is_from_vm, dummy_senders[i]->id,
199 			bitmaps[2], false));
200 	}
201 
202 	/** Clean up bind for other tests. */
203 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
204 					 bitmaps[0], false);
205 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
206 					 bitmaps[1], false);
207 
208 	vm_unlock(&current_vm_locked);
209 }
210 
211 /**
212  * Validates updates and check functions for binding notifications, namely the
213  * configuration of bindings of global and per-vCPU notifications.
214  */
TEST_F(vm,vm_notification_bind_per_vcpu_vs_global)215 TEST_F(vm, vm_notification_bind_per_vcpu_vs_global)
216 {
217 	struct_vm *current_vm;
218 	struct vm_locked current_vm_locked;
219 	struct_vm *dummy_sender;
220 	ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
221 	ffa_notifications_bitmap_t per_vcpu = ~global;
222 	bool is_from_vm = true;
223 
224 	CHECK(vm_get_count() >= 2);
225 
226 	current_vm = vm_find_index(0);
227 
228 	dummy_sender = vm_find_index(1);
229 
230 	current_vm_locked = vm_lock(current_vm);
231 
232 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
233 					 dummy_sender->id, global, false);
234 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
235 					 dummy_sender->id, per_vcpu, true);
236 
237 	/* Check validation of global notifications bindings. */
238 	EXPECT_TRUE(vm_notifications_validate_binding(
239 		current_vm_locked, is_from_vm, dummy_sender->id, global,
240 		false));
241 
242 	/* Check validation of per-vCPU notifications bindings. */
243 	EXPECT_TRUE(vm_notifications_validate_binding(
244 		current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
245 		true));
246 
247 	/**
248 	 * Check that global notifications are not validated as per-vCPU, and
249 	 * vice-versa.
250 	 */
251 	EXPECT_FALSE(vm_notifications_validate_binding(
252 		current_vm_locked, is_from_vm, dummy_sender->id, global, true));
253 	EXPECT_FALSE(vm_notifications_validate_binding(
254 		current_vm_locked, is_from_vm, dummy_sender->id, per_vcpu,
255 		false));
256 	EXPECT_FALSE(vm_notifications_validate_binding(
257 		current_vm_locked, is_from_vm, dummy_sender->id,
258 		global | per_vcpu, true));
259 	EXPECT_FALSE(vm_notifications_validate_binding(
260 		current_vm_locked, is_from_vm, dummy_sender->id,
261 		global | per_vcpu, false));
262 
263 	/** Undo the bindings */
264 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
265 					 global, false);
266 	EXPECT_TRUE(vm_notifications_validate_binding(
267 		current_vm_locked, is_from_vm, 0, global, false));
268 
269 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0,
270 					 per_vcpu, false);
271 	EXPECT_TRUE(vm_notifications_validate_binding(
272 		current_vm_locked, is_from_vm, 0, per_vcpu, false));
273 
274 	vm_unlock(&current_vm_locked);
275 }
276 
277 /**
278  * Validates accesses to notifications bitmaps.
279  */
TEST_F(vm,vm_notifications_set_and_get)280 TEST_F(vm, vm_notifications_set_and_get)
281 {
282 	struct_vm *current_vm;
283 	struct vm_locked current_vm_locked;
284 	struct_vm *dummy_sender;
285 	ffa_notifications_bitmap_t global = 0x00000000FFFFFFFFU;
286 	ffa_notifications_bitmap_t per_vcpu = ~global;
287 	ffa_notifications_bitmap_t ret;
288 	const unsigned int vcpu_idx = 0;
289 	struct notifications *notifications;
290 	const bool is_from_vm = true;
291 
292 	CHECK(vm_get_count() >= 2);
293 
294 	current_vm = vm_find_index(0);
295 	dummy_sender = vm_find_index(1);
296 
297 	notifications = &current_vm->notifications.from_vm;
298 	current_vm_locked = vm_lock(current_vm);
299 
300 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
301 					 dummy_sender->id, global, false);
302 	vm_notifications_update_bindings(current_vm_locked, is_from_vm,
303 					 dummy_sender->id, per_vcpu, true);
304 
305 	/*
306 	 * Validate get notifications bitmap for global notifications.
307 	 */
308 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
309 					       global, 0ull, false);
310 
311 	ret = vm_notifications_partition_get_pending(current_vm_locked,
312 						     is_from_vm, 0ull);
313 	EXPECT_EQ(ret, global);
314 	EXPECT_EQ(notifications->global.pending, 0ull);
315 
316 	/*
317 	 * Validate get notifications bitmap for per-vCPU notifications.
318 	 */
319 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
320 					       per_vcpu, vcpu_idx, true);
321 
322 	ret = vm_notifications_partition_get_pending(current_vm_locked,
323 						     is_from_vm, vcpu_idx);
324 	EXPECT_EQ(ret, per_vcpu);
325 	EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
326 
327 	/*
328 	 * Validate that getting notifications for a specific vCPU also returns
329 	 * global notifications.
330 	 */
331 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
332 					       per_vcpu, vcpu_idx, true);
333 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
334 					       global, 0ull, false);
335 
336 	ret = vm_notifications_partition_get_pending(current_vm_locked,
337 						     is_from_vm, vcpu_idx);
338 	EXPECT_EQ(ret, per_vcpu | global);
339 	EXPECT_EQ(notifications->per_vcpu[vcpu_idx].pending, 0ull);
340 	EXPECT_EQ(notifications->global.pending, 0ull);
341 
342 	/** Undo the binding */
343 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
344 					 global, false);
345 	vm_notifications_update_bindings(current_vm_locked, is_from_vm, 0ull,
346 					 per_vcpu, true);
347 	vm_unlock(&current_vm_locked);
348 }
349 
350 /**
351  * Validates simple getting of notifications info for global notifications.
352  */
TEST_F(vm,vm_notifications_info_get_global)353 TEST_F(vm, vm_notifications_info_get_global)
354 {
355 	ffa_notifications_bitmap_t to_set = 0xFU;
356 	ffa_notifications_bitmap_t got;
357 
358 	/**
359 	 * Following set of variables that are also expected to be used when
360 	 * handling FFA_NOTIFICATION_INFO_GET.
361 	 */
362 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
363 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
364 	uint32_t ids_count = 0;
365 	uint32_t lists_count = 0;
366 	enum notifications_info_get_state current_state = INIT;
367 
368 	CHECK(vm_get_count() >= 2);
369 
370 	for (unsigned int i = 0; i < 2; i++) {
371 		struct_vm *current_vm = vm_find_index(0);
372 		struct vm_locked current_vm_locked = vm_lock(current_vm);
373 		struct notifications *notifications =
374 			&current_vm->notifications.from_sp;
375 		const bool is_from_vm = false;
376 
377 		vm_notifications_partition_set_pending(
378 			current_vm_locked, is_from_vm, to_set, 0, false);
379 
380 		vm_notifications_info_get_pending(
381 			current_vm_locked, is_from_vm, ids, &ids_count,
382 			lists_sizes, &lists_count,
383 			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
384 
385 		/*
386 		 * Here the number of IDs and list count should be the same.
387 		 * As we are testing with Global notifications, this is
388 		 * expected.
389 		 */
390 		EXPECT_EQ(ids_count, i + 1);
391 		EXPECT_EQ(lists_count, i + 1);
392 		EXPECT_EQ(lists_sizes[i], 0);
393 		EXPECT_EQ(to_set, notifications->global.info_get_retrieved);
394 
395 		/* Action must be reset to initial state for each VM. */
396 		current_state = INIT;
397 
398 		/*
399 		 * Check that getting pending notifications gives the expected
400 		 * return and cleans the 'pending' and 'info_get_retrieved'
401 		 * bitmaps.
402 		 */
403 		got = vm_notifications_partition_get_pending(current_vm_locked,
404 							     is_from_vm, 0);
405 		EXPECT_EQ(got, to_set);
406 
407 		EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
408 		EXPECT_EQ(notifications->global.pending, 0U);
409 
410 		vm_unlock(&current_vm_locked);
411 	}
412 }
413 
414 /**
415  * Validates simple getting of notifications info for per-vCPU notifications.
416  */
TEST_F(vm,vm_notifications_info_get_per_vcpu)417 TEST_F(vm, vm_notifications_info_get_per_vcpu)
418 {
419 	const ffa_notifications_bitmap_t per_vcpu = 0xFU;
420 	ffa_notifications_bitmap_t got;
421 
422 	/*
423 	 * Following set of variables that are also expected to be used when
424 	 * handling ffa_notification_info_get.
425 	 */
426 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
427 	uint32_t ids_count = 0;
428 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
429 	uint32_t lists_count = 0;
430 	enum notifications_info_get_state current_state = INIT;
431 
432 	CHECK(vm_get_count() >= 2);
433 
434 	for (unsigned int i = 0; i < 2; i++) {
435 		struct_vm *current_vm = vm_find_index(0);
436 		struct vm_locked current_vm_locked = vm_lock(current_vm);
437 		struct notifications *notifications =
438 			&current_vm->notifications.from_sp;
439 		const bool is_from_vm = false;
440 
441 		vm_notifications_partition_set_pending(
442 			current_vm_locked, is_from_vm, per_vcpu, 0, true);
443 
444 		vm_notifications_info_get_pending(
445 			current_vm_locked, is_from_vm, ids, &ids_count,
446 			lists_sizes, &lists_count,
447 			FFA_NOTIFICATIONS_INFO_GET_MAX_IDS, &current_state);
448 
449 		/*
450 		 * Here the number of IDs and list count should be the same.
451 		 * As we are testing with Global notifications, this is
452 		 * expected.
453 		 */
454 		EXPECT_EQ(ids_count, (i + 1) * 2);
455 		EXPECT_EQ(lists_count, i + 1);
456 		EXPECT_EQ(lists_sizes[i], 1);
457 		EXPECT_EQ(per_vcpu,
458 			  notifications->per_vcpu[0].info_get_retrieved);
459 
460 		/* Action must be reset to initial state for each VM. */
461 		current_state = INIT;
462 
463 		/*
464 		 * Check that getting pending notifications gives the expected
465 		 * return and cleans the 'pending' and 'info_get_retrieved'
466 		 * bitmaps.
467 		 */
468 		got = vm_notifications_partition_get_pending(current_vm_locked,
469 							     is_from_vm, 0);
470 		EXPECT_EQ(got, per_vcpu);
471 
472 		EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
473 		EXPECT_EQ(notifications->per_vcpu[0].pending, 0U);
474 
475 		vm_unlock(&current_vm_locked);
476 	}
477 }
478 
479 /**
480  * Validate getting of notifications information if all VCPUs have notifications
481  * pending.
482  */
TEST_F(vm,vm_notifications_info_get_per_vcpu_all_vcpus)483 TEST_F(vm, vm_notifications_info_get_per_vcpu_all_vcpus)
484 {
485 	struct_vm *current_vm = nullptr;
486 	struct vm_locked current_vm_locked;
487 	const ffa_vcpu_count_t vcpu_count = MAX_CPUS;
488 	ffa_notifications_bitmap_t got;
489 	const ffa_notifications_bitmap_t global = 0xF0000;
490 
491 	/*
492 	 * Following set of variables that are also expected to be used when
493 	 * handling ffa_notification_info_get.
494 	 */
495 	struct notifications *notifications;
496 	const bool is_from_sp = false;
497 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
498 	uint32_t ids_count = 0;
499 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
500 	uint32_t lists_count = 0;
501 	enum notifications_info_get_state current_state = INIT;
502 
503 	EXPECT_TRUE(vm_init_next(vcpu_count, &ppool, &current_vm, false));
504 	current_vm_locked = vm_lock(current_vm);
505 	notifications = &current_vm->notifications.from_sp;
506 
507 	for (unsigned int i = 0; i < vcpu_count; i++) {
508 		vm_notifications_partition_set_pending(
509 			current_vm_locked, is_from_sp, FFA_NOTIFICATION_MASK(i),
510 			i, true);
511 	}
512 
513 	/*
514 	 * Adding a global notification should not change the list of IDs,
515 	 * because global notifications only require the VM ID to be included in
516 	 * the list, at least once.
517 	 */
518 	vm_notifications_partition_set_pending(current_vm_locked, is_from_sp,
519 					       global, 0, false);
520 
521 	vm_notifications_info_get_pending(current_vm_locked, is_from_sp, ids,
522 					  &ids_count, lists_sizes, &lists_count,
523 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
524 					  &current_state);
525 
526 	/*
527 	 * This test has been conceived for the expected MAX_CPUS 4.
528 	 * All VCPUs have notifications of the same VM, to be broken down in 2
529 	 * lists with 3 VCPU IDs, and 1 VCPU ID respectively.
530 	 * The list of IDs should look like: {<vm_id>, 0, 1, 2, <vm_id>, 3}.
531 	 */
532 	CHECK(MAX_CPUS == 4);
533 	EXPECT_EQ(ids_count, 6U);
534 	EXPECT_EQ(lists_count, 2U);
535 	EXPECT_EQ(lists_sizes[0], 3);
536 	EXPECT_EQ(lists_sizes[1], 1);
537 
538 	for (unsigned int i = 0; i < vcpu_count; i++) {
539 		got = vm_notifications_partition_get_pending(current_vm_locked,
540 							     is_from_sp, i);
541 
542 		/*
543 		 * The first call to
544 		 * vm_notifications_partition_get_pending should also
545 		 * include the global notifications on the return.
546 		 */
547 		ffa_notifications_bitmap_t to_check =
548 			(i != 0) ? FFA_NOTIFICATION_MASK(i)
549 				 : FFA_NOTIFICATION_MASK(i) | global;
550 
551 		EXPECT_EQ(got, to_check);
552 
553 		EXPECT_EQ(notifications->per_vcpu[i].pending, 0);
554 		EXPECT_EQ(notifications->per_vcpu[i].info_get_retrieved, 0);
555 	}
556 
557 	vm_unlock(&current_vm_locked);
558 }
559 
560 /**
561  * Validate change of state from 'vm_notifications_info_get_pending', when the
562  * list of IDs is full.
563  */
TEST_F(vm,vm_notifications_info_get_full_per_vcpu)564 TEST_F(vm, vm_notifications_info_get_full_per_vcpu)
565 {
566 	struct_vm *current_vm = vm_find_index(0);
567 	struct vm_locked current_vm_locked = vm_lock(current_vm);
568 	struct notifications *notifications =
569 		&current_vm->notifications.from_sp;
570 	const bool is_from_vm = false;
571 	ffa_notifications_bitmap_t got = 0;
572 
573 	/*
574 	 * Following set of variables that are also expected to be used when
575 	 * handling ffa_notification_info_get.
576 	 * For this 'ids_count' has been initialized such that it indicates
577 	 * there is no space in the list for a per-vCPU notification (VM ID and
578 	 * VCPU ID).
579 	 */
580 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
581 	uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1;
582 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
583 	uint32_t lists_count = 10;
584 	enum notifications_info_get_state current_state = INIT;
585 	CHECK(vm_get_count() >= 2);
586 
587 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
588 					       FFA_NOTIFICATION_MASK(1), 0,
589 					       true);
590 
591 	/* Call function to get notifications info, with only per-vCPU set. */
592 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
593 					  &ids_count, lists_sizes, &lists_count,
594 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
595 					  &current_state);
596 
597 	/*
598 	 * Verify that as soon as there isn't space to do the required
599 	 * insertion in the list, the
600 	 * 'vm_notifications_partition_get_pending' returns and changes
601 	 * list state to FULL. In this case returning, because it would need to
602 	 * add two IDs (VM ID and VCPU ID).
603 	 */
604 	EXPECT_EQ(current_state, FULL);
605 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS - 1);
606 	EXPECT_EQ(notifications->per_vcpu[0].info_get_retrieved, 0U);
607 
608 	/*
609 	 * At this point there is still room for the information of a global
610 	 * notification (only VM ID to be added). Reset 'current_state'
611 	 * for the insertion to happen at the last position of the array.
612 	 */
613 	current_state = INIT;
614 
615 	/* Setting global notification */
616 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
617 					       FFA_NOTIFICATION_MASK(2), 0,
618 					       false);
619 
620 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
621 					  &ids_count, lists_sizes, &lists_count,
622 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
623 					  &current_state);
624 
625 	/*
626 	 * Now List must be full, the set global notification must be part of
627 	 * 'info_get_retrieved', and the 'current_state' should be set to FULL
628 	 * due to the pending per-vCPU notification in VCPU 0.
629 	 */
630 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
631 	EXPECT_EQ(current_state, FULL);
632 	EXPECT_EQ(notifications->global.info_get_retrieved,
633 		  FFA_NOTIFICATION_MASK(2));
634 
635 	got = vm_notifications_partition_get_pending(current_vm_locked,
636 						     is_from_vm, 0);
637 	EXPECT_EQ(got, FFA_NOTIFICATION_MASK(1) | FFA_NOTIFICATION_MASK(2));
638 
639 	vm_unlock(&current_vm_locked);
640 }
641 
TEST_F(vm,vm_notifications_info_get_full_global)642 TEST_F(vm, vm_notifications_info_get_full_global)
643 {
644 	struct_vm *current_vm = vm_find_index(0);
645 	struct vm_locked current_vm_locked = vm_lock(current_vm);
646 	ffa_notifications_bitmap_t got;
647 	struct notifications *notifications;
648 	const bool is_from_vm = false;
649 	/*
650 	 * Following set of variables that are also expected to be used when
651 	 * handling ffa_notification_info_get.
652 	 * For this 'ids_count' has been initialized such that it indicates
653 	 * there is no space in the list for a global notification (VM ID only).
654 	 */
655 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
656 	uint32_t ids_count = FFA_NOTIFICATIONS_INFO_GET_MAX_IDS;
657 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
658 	uint32_t lists_count = 10;
659 	enum notifications_info_get_state current_state = INIT;
660 
661 	CHECK(vm_get_count() >= 1);
662 
663 	current_vm = vm_find_index(0);
664 
665 	notifications = &current_vm->notifications.from_sp;
666 
667 	/* Set global notification. */
668 	vm_notifications_partition_set_pending(current_vm_locked, is_from_vm,
669 					       FFA_NOTIFICATION_MASK(10), 0,
670 					       false);
671 
672 	/* Get notifications info for the given notifications. */
673 	vm_notifications_info_get_pending(current_vm_locked, is_from_vm, ids,
674 					  &ids_count, lists_sizes, &lists_count,
675 					  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS,
676 					  &current_state);
677 
678 	/* Expect 'info_get_retrieved' bitmap to be 0. */
679 	EXPECT_EQ(notifications->global.info_get_retrieved, 0U);
680 	EXPECT_EQ(notifications->global.pending, FFA_NOTIFICATION_MASK(10));
681 	EXPECT_EQ(ids_count, FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
682 	EXPECT_EQ(current_state, FULL);
683 
684 	got = vm_notifications_partition_get_pending(current_vm_locked,
685 						     is_from_vm, 0);
686 	EXPECT_EQ(got, FFA_NOTIFICATION_MASK(10));
687 
688 	vm_unlock(&current_vm_locked);
689 }
690 
TEST_F(vm,vm_notifications_info_get_from_framework)691 TEST_F(vm, vm_notifications_info_get_from_framework)
692 {
693 	struct vm_locked vm_locked = vm_lock(vm_find_index(0));
694 	uint16_t ids[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
695 	uint32_t ids_count = 0;
696 	uint32_t lists_sizes[FFA_NOTIFICATIONS_INFO_GET_MAX_IDS] = {0};
697 	uint32_t lists_count = 0;
698 
699 	vm_notifications_framework_set_pending(vm_locked, 0x1U);
700 
701 	/* Get notifications info for the given notifications. */
702 	vm_notifications_info_get(vm_locked, ids, &ids_count, lists_sizes,
703 				  &lists_count,
704 				  FFA_NOTIFICATIONS_INFO_GET_MAX_IDS);
705 
706 	EXPECT_EQ(ids[0], vm_locked.vm->id);
707 	EXPECT_EQ(ids_count, 1);
708 	EXPECT_EQ(lists_sizes[0], 0);
709 	EXPECT_EQ(lists_count, 1);
710 
711 	EXPECT_EQ(vm_notifications_framework_get_pending(vm_locked), 0x1U);
712 
713 	vm_unlock(&vm_locked);
714 }
715 
716 } /* namespace */
717