1 /*
2  * Copyright 2018 The Hafnium Authors.
3  *
4  * Use of this source code is governed by a BSD-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/BSD-3-Clause.
7  */
8 
9 #include "hf/ffa.h"
10 
11 #include "hf/arch/mmu.h"
12 
13 #include "hf/check.h"
14 #include "hf/mm.h"
15 #include "hf/static_assert.h"
16 
17 #include "vmapi/hf/call.h"
18 #include "vmapi/hf/ffa_v1_0.h"
19 
20 #include "test/hftest.h"
21 #include "test/vmapi/ffa.h"
22 
23 static alignas(PAGE_SIZE) uint8_t send_page[PAGE_SIZE];
24 static alignas(PAGE_SIZE) uint8_t recv_page[PAGE_SIZE];
25 static_assert(sizeof(send_page) == PAGE_SIZE, "Send page is not a page.");
26 static_assert(sizeof(recv_page) == PAGE_SIZE, "Recv page is not a page.");
27 
28 static hf_ipaddr_t send_page_addr = (hf_ipaddr_t)send_page;
29 static hf_ipaddr_t recv_page_addr = (hf_ipaddr_t)recv_page;
30 
set_up_mailbox(void)31 struct mailbox_buffers set_up_mailbox(void)
32 {
33 	ASSERT_EQ(ffa_rxtx_map(send_page_addr, recv_page_addr).func,
34 		  FFA_SUCCESS_32);
35 	return (struct mailbox_buffers){
36 		.send = send_page,
37 		.recv = recv_page,
38 	};
39 }
40 
mailbox_unmap_buffers(struct mailbox_buffers * mb)41 void mailbox_unmap_buffers(struct mailbox_buffers *mb)
42 {
43 	ASSERT_EQ(ffa_rxtx_unmap().func, FFA_SUCCESS_32);
44 	mb->send = NULL;
45 	mb->recv = NULL;
46 }
47 
48 /**
49  * Try to receive a message from the mailbox, blocking if necessary, and
50  * retrying if interrupted.
51  */
mailbox_receive_retry(void * buffer,size_t buffer_size,void * recv,struct ffa_partition_rxtx_header * header)52 void mailbox_receive_retry(void *buffer, size_t buffer_size, void *recv,
53 			   struct ffa_partition_rxtx_header *header)
54 {
55 	const struct ffa_partition_msg *message;
56 	const uint32_t *payload;
57 	ffa_id_t sender;
58 	struct ffa_value ret;
59 	ffa_notifications_bitmap_t fwk_notif = 0U;
60 	const ffa_id_t own_id = hf_vm_get_id();
61 
62 	ASSERT_LE(buffer_size, FFA_MSG_PAYLOAD_MAX);
63 	ASSERT_TRUE(header != NULL);
64 	ASSERT_TRUE(recv != NULL);
65 
66 	/* Check notification and wait if not messages. */
67 	while (fwk_notif == 0U) {
68 		ret = ffa_notification_get(
69 			own_id, 0,
70 			FFA_NOTIFICATION_FLAG_BITMAP_SPM |
71 				FFA_NOTIFICATION_FLAG_BITMAP_HYP);
72 		if (ret.func == FFA_SUCCESS_32) {
73 			fwk_notif = ffa_notification_get_from_framework(ret);
74 		}
75 
76 		if (fwk_notif == 0U) {
77 			ffa_msg_wait();
78 		}
79 	}
80 
81 	message = (const struct ffa_partition_msg *)recv;
82 	memcpy_s(header, sizeof(*header), message,
83 		 sizeof(struct ffa_partition_rxtx_header));
84 
85 	sender = ffa_rxtx_header_sender(header);
86 
87 	if (is_ffa_hyp_buffer_full_notification(fwk_notif)) {
88 		EXPECT_TRUE(ffa_is_vm_id(sender));
89 	} else {
90 		FAIL("Unexpected message sender.\n");
91 	}
92 
93 	/* Check receiver ID against own ID. */
94 	ASSERT_EQ(ffa_rxtx_header_receiver(header), own_id);
95 	ASSERT_LE(header->size, buffer_size);
96 
97 	payload = (const uint32_t *)message->payload;
98 
99 	/* Get message to free the RX buffer. */
100 	memcpy_s(buffer, buffer_size, payload, header->size);
101 
102 	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
103 }
104 
send_fragmented_memory_region(struct ffa_value * send_ret,void * tx_buffer,struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,uint32_t remaining_constituent_count,uint32_t sent_length,uint32_t total_length,ffa_memory_handle_t * handle,uint64_t allocator_mask)105 void send_fragmented_memory_region(
106 	struct ffa_value *send_ret, void *tx_buffer,
107 	struct ffa_memory_region_constituent constituents[],
108 	uint32_t constituent_count, uint32_t remaining_constituent_count,
109 	uint32_t sent_length, uint32_t total_length,
110 	ffa_memory_handle_t *handle, uint64_t allocator_mask)
111 {
112 	const ffa_memory_handle_t INVALID_FRAGMENT_HANDLE = 0xffffffffffffffff;
113 	ffa_memory_handle_t fragment_handle = INVALID_FRAGMENT_HANDLE;
114 	uint32_t fragment_length;
115 
116 	/* Send the remaining fragments. */
117 	while (remaining_constituent_count != 0) {
118 		dlog_verbose("%d constituents left to send.\n",
119 			     remaining_constituent_count);
120 		EXPECT_EQ(send_ret->func, FFA_MEM_FRAG_RX_32);
121 		if (fragment_handle == INVALID_FRAGMENT_HANDLE) {
122 			fragment_handle = ffa_frag_handle(*send_ret);
123 		} else {
124 			EXPECT_EQ(ffa_frag_handle(*send_ret), fragment_handle);
125 		}
126 		EXPECT_EQ(send_ret->arg3, sent_length);
127 
128 		remaining_constituent_count = ffa_memory_fragment_init(
129 			tx_buffer, HF_MAILBOX_SIZE,
130 			constituents + constituent_count -
131 				remaining_constituent_count,
132 			remaining_constituent_count, &fragment_length);
133 
134 		*send_ret = ffa_mem_frag_tx(fragment_handle, fragment_length);
135 		sent_length += fragment_length;
136 	}
137 
138 	EXPECT_EQ(sent_length, total_length);
139 	EXPECT_EQ(send_ret->func, FFA_SUCCESS_32);
140 	*handle = ffa_mem_success_handle(*send_ret);
141 	EXPECT_EQ(*handle & FFA_MEMORY_HANDLE_ALLOCATOR_MASK, allocator_mask);
142 	if (fragment_handle != INVALID_FRAGMENT_HANDLE) {
143 		EXPECT_EQ(*handle, fragment_handle);
144 	}
145 }
146 
send_memory_and_retrieve_request_multi_receiver(uint32_t share_func,void * tx_buffer,ffa_id_t sender,struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,struct ffa_memory_access receivers_send[],uint32_t receivers_send_count,struct ffa_memory_access receivers_retrieve[],uint32_t receivers_retrieve_count,ffa_memory_region_flags_t send_flags,ffa_memory_region_flags_t retrieve_flags,enum ffa_memory_type send_memory_type,enum ffa_memory_type receive_memory_type,enum ffa_memory_cacheability send_cacheability,enum ffa_memory_cacheability receive_cacheability)147 ffa_memory_handle_t send_memory_and_retrieve_request_multi_receiver(
148 	uint32_t share_func, void *tx_buffer, ffa_id_t sender,
149 	struct ffa_memory_region_constituent constituents[],
150 	uint32_t constituent_count, struct ffa_memory_access receivers_send[],
151 	uint32_t receivers_send_count,
152 	struct ffa_memory_access receivers_retrieve[],
153 	uint32_t receivers_retrieve_count, ffa_memory_region_flags_t send_flags,
154 	ffa_memory_region_flags_t retrieve_flags,
155 	enum ffa_memory_type send_memory_type,
156 	enum ffa_memory_type receive_memory_type,
157 	enum ffa_memory_cacheability send_cacheability,
158 	enum ffa_memory_cacheability receive_cacheability)
159 {
160 	uint32_t total_length;
161 	uint32_t fragment_length;
162 	uint32_t msg_size;
163 	struct ffa_value ret;
164 	ffa_memory_handle_t handle;
165 	uint32_t remaining_constituent_count;
166 	uint32_t i;
167 	struct ffa_partition_msg *retrieve_message = tx_buffer;
168 	uint64_t allocator_mask;
169 	bool contains_secure_receiver = false;
170 
171 	/* Send the first fragment of the memory. */
172 	remaining_constituent_count = ffa_memory_region_init(
173 		tx_buffer, HF_MAILBOX_SIZE, sender, receivers_send,
174 		receivers_send_count, sizeof(struct ffa_memory_access),
175 		constituents, constituent_count, 0, send_flags,
176 		send_memory_type, send_cacheability, FFA_MEMORY_INNER_SHAREABLE,
177 		&total_length, &fragment_length);
178 
179 	if (remaining_constituent_count == 0) {
180 		EXPECT_EQ(total_length, fragment_length);
181 	}
182 	switch (share_func) {
183 	case FFA_MEM_DONATE_32:
184 		ret = ffa_mem_donate(total_length, fragment_length);
185 		break;
186 	case FFA_MEM_LEND_32:
187 		ret = ffa_mem_lend(total_length, fragment_length);
188 		break;
189 	case FFA_MEM_SHARE_32:
190 		ret = ffa_mem_share(total_length, fragment_length);
191 		break;
192 	default:
193 		FAIL("Invalid share_func %#x.\n", share_func);
194 		/* Never reached, but needed to keep clang-analyser happy. */
195 		return 0;
196 	}
197 
198 	/* Check if any of the receivers is a secure endpoint. */
199 	for (i = 0; i < receivers_send_count; i++) {
200 		if (!ffa_is_vm_id(
201 			    receivers_send[i].receiver_permissions.receiver)) {
202 			contains_secure_receiver = true;
203 			break;
204 		}
205 	}
206 
207 	/*
208 	 * If the sender is a secure endpoint, or at least one of the
209 	 * receivers in a multi-endpoint memory sharing is a secure endpoint,
210 	 * the allocator will be the SPMC.
211 	 * Else, it will be the hypervisor.
212 	 */
213 	allocator_mask = (!ffa_is_vm_id(sender) || contains_secure_receiver)
214 				 ? FFA_MEMORY_HANDLE_ALLOCATOR_SPMC
215 				 : FFA_MEMORY_HANDLE_ALLOCATOR_HYPERVISOR;
216 
217 	send_fragmented_memory_region(
218 		&ret, tx_buffer, constituents, constituent_count,
219 		remaining_constituent_count, fragment_length, total_length,
220 		&handle, allocator_mask);
221 
222 	msg_size = ffa_memory_retrieve_request_init(
223 		(struct ffa_memory_region *)retrieve_message->payload, handle,
224 		sender, receivers_retrieve, receivers_retrieve_count,
225 		sizeof(struct ffa_memory_access), 0, retrieve_flags,
226 		receive_memory_type, receive_cacheability,
227 		FFA_MEMORY_INNER_SHAREABLE);
228 
229 	for (i = 0; i < receivers_send_count; i++) {
230 		struct ffa_memory_region_attributes *receiver =
231 			&(receivers_send[i].receiver_permissions);
232 		dlog_verbose(
233 			"Sending the retrieve request message to receiver: "
234 			"%x\n",
235 			receiver->receiver);
236 
237 		/*
238 		 * Send the appropriate retrieve request to the VM so that it
239 		 * can use it to retrieve the memory.
240 		 */
241 		EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
242 		ffa_rxtx_header_init(sender, receiver->receiver, msg_size,
243 				     &retrieve_message->header);
244 		ASSERT_EQ(ffa_msg_send2(0).func, FFA_SUCCESS_32);
245 	}
246 
247 	return handle;
248 }
249 
250 /*
251  * Helper function to send memory to a VM then send a message with the retrieve
252  * request it needs to retrieve it.
253  */
send_memory_and_retrieve_request(uint32_t share_func,void * tx_buffer,ffa_id_t sender,ffa_id_t recipient,struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,ffa_memory_region_flags_t send_flags,ffa_memory_region_flags_t retrieve_flags,enum ffa_data_access send_data_access,enum ffa_data_access retrieve_data_access,enum ffa_instruction_access send_instruction_access,enum ffa_instruction_access retrieve_instruction_access,enum ffa_memory_type send_memory_type,enum ffa_memory_type receive_memory_type,enum ffa_memory_cacheability send_cacheability,enum ffa_memory_cacheability receive_cacheability)254 ffa_memory_handle_t send_memory_and_retrieve_request(
255 	uint32_t share_func, void *tx_buffer, ffa_id_t sender,
256 	ffa_id_t recipient, struct ffa_memory_region_constituent constituents[],
257 	uint32_t constituent_count, ffa_memory_region_flags_t send_flags,
258 	ffa_memory_region_flags_t retrieve_flags,
259 	enum ffa_data_access send_data_access,
260 	enum ffa_data_access retrieve_data_access,
261 	enum ffa_instruction_access send_instruction_access,
262 	enum ffa_instruction_access retrieve_instruction_access,
263 	enum ffa_memory_type send_memory_type,
264 	enum ffa_memory_type receive_memory_type,
265 	enum ffa_memory_cacheability send_cacheability,
266 	enum ffa_memory_cacheability receive_cacheability)
267 {
268 	struct ffa_memory_access receiver_send_permissions;
269 	struct ffa_memory_access receiver_retrieve_permissions;
270 	/*
271 	 * Use the sender id as the impdef value so we can use this in later
272 	 * testing.
273 	 */
274 	struct ffa_memory_access_impdef impdef_val =
275 		ffa_memory_access_impdef_init(sender, sender + 1);
276 
277 	ffa_memory_access_init(&receiver_send_permissions, recipient,
278 			       send_data_access, send_instruction_access, 0,
279 			       &impdef_val);
280 
281 	ffa_memory_access_init(&receiver_retrieve_permissions, recipient,
282 			       retrieve_data_access,
283 			       retrieve_instruction_access, 0, &impdef_val);
284 
285 	return send_memory_and_retrieve_request_multi_receiver(
286 		share_func, tx_buffer, sender, constituents, constituent_count,
287 		&receiver_send_permissions, 1, &receiver_retrieve_permissions,
288 		1, send_flags, retrieve_flags, send_memory_type,
289 		receive_memory_type, send_cacheability, receive_cacheability);
290 }
291 
292 /*
293  * Helper function to send memory to a VM then send a message with the retrieve
294  * request it needs to retrieve it, forcing the request to be made in at least
295  * two fragments even if it could fit in one.
296  * TODO: check if it can be based off a base function like the above functions.
297  */
send_memory_and_retrieve_request_force_fragmented(uint32_t share_func,void * tx_buffer,ffa_id_t sender,ffa_id_t recipient,struct ffa_memory_region_constituent constituents[],uint32_t constituent_count,ffa_memory_region_flags_t flags,enum ffa_data_access send_data_access,enum ffa_data_access retrieve_data_access,enum ffa_instruction_access send_instruction_access,enum ffa_instruction_access retrieve_instruction_access)298 ffa_memory_handle_t send_memory_and_retrieve_request_force_fragmented(
299 	uint32_t share_func, void *tx_buffer, ffa_id_t sender,
300 	ffa_id_t recipient, struct ffa_memory_region_constituent constituents[],
301 	uint32_t constituent_count, ffa_memory_region_flags_t flags,
302 	enum ffa_data_access send_data_access,
303 	enum ffa_data_access retrieve_data_access,
304 	enum ffa_instruction_access send_instruction_access,
305 	enum ffa_instruction_access retrieve_instruction_access)
306 {
307 	uint32_t total_length;
308 	uint32_t fragment_length;
309 	uint32_t msg_size;
310 	uint32_t remaining_constituent_count;
311 	struct ffa_value ret;
312 	ffa_memory_handle_t handle;
313 	struct ffa_partition_msg *retrieve_message;
314 	bool not_specify_memory_type = share_func == FFA_MEM_DONATE_32 ||
315 				       (share_func == FFA_MEM_LEND_32);
316 	struct ffa_memory_access_impdef impdef_val =
317 		ffa_memory_access_impdef_init(sender, sender + 1);
318 
319 	/* Send everything except the last constituent in the first fragment. */
320 	remaining_constituent_count = ffa_memory_region_init_single_receiver(
321 		tx_buffer, HF_MAILBOX_SIZE, sender, recipient, constituents,
322 		constituent_count, 0, flags, send_data_access,
323 		send_instruction_access,
324 		not_specify_memory_type ? FFA_MEMORY_NOT_SPECIFIED_MEM
325 					: FFA_MEMORY_NORMAL_MEM,
326 		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
327 		&impdef_val, &total_length, &fragment_length);
328 	EXPECT_EQ(remaining_constituent_count, 0);
329 	EXPECT_EQ(total_length, fragment_length);
330 	/* Don't include the last constituent in the first fragment. */
331 	fragment_length -= sizeof(struct ffa_memory_region_constituent);
332 	switch (share_func) {
333 	case FFA_MEM_DONATE_32:
334 		ret = ffa_mem_donate(total_length, fragment_length);
335 		break;
336 	case FFA_MEM_LEND_32:
337 		ret = ffa_mem_lend(total_length, fragment_length);
338 		break;
339 	case FFA_MEM_SHARE_32:
340 		ret = ffa_mem_share(total_length, fragment_length);
341 		break;
342 	default:
343 		FAIL("Invalid share_func %#x.\n", share_func);
344 		/* Never reached, but needed to keep clang-analyser happy. */
345 		return 0;
346 	}
347 	EXPECT_EQ(ret.func, FFA_MEM_FRAG_RX_32);
348 	EXPECT_EQ(ret.arg3, fragment_length);
349 
350 	handle = ffa_frag_handle(ret);
351 
352 	/* Send the last constituent in a separate fragment. */
353 	remaining_constituent_count = ffa_memory_fragment_init(
354 		tx_buffer, HF_MAILBOX_SIZE,
355 		&constituents[constituent_count - 1], 1, &fragment_length);
356 	EXPECT_EQ(remaining_constituent_count, 0);
357 	ret = ffa_mem_frag_tx(handle, fragment_length);
358 	EXPECT_EQ(ret.func, FFA_SUCCESS_32);
359 	EXPECT_EQ(ffa_mem_success_handle(ret), handle);
360 
361 	retrieve_message = (struct ffa_partition_msg *)tx_buffer;
362 	/*
363 	 * Send the appropriate retrieve request to the VM so that it can use it
364 	 */
365 	msg_size = ffa_memory_retrieve_request_init_single_receiver(
366 		(struct ffa_memory_region *)retrieve_message->payload, handle,
367 		sender, recipient, 0, flags, retrieve_data_access,
368 		retrieve_instruction_access, FFA_MEMORY_NORMAL_MEM,
369 		FFA_MEMORY_CACHE_WRITE_BACK, FFA_MEMORY_INNER_SHAREABLE,
370 		&impdef_val);
371 	ffa_rxtx_header_init(sender, recipient, msg_size,
372 			     &retrieve_message->header);
373 	EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
374 	ASSERT_EQ(ffa_msg_send2(0).func, FFA_SUCCESS_32);
375 
376 	return handle;
377 }
378 
send_retrieve_request_single_receiver(void * send,ffa_memory_handle_t handle,ffa_id_t sender,ffa_id_t receiver,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_data_access data_access,enum ffa_instruction_access instruction_access,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,struct ffa_memory_access_impdef * impdef_val)379 void send_retrieve_request_single_receiver(
380 	void *send, ffa_memory_handle_t handle, ffa_id_t sender,
381 	ffa_id_t receiver, uint32_t tag, ffa_memory_region_flags_t flags,
382 	enum ffa_data_access data_access,
383 	enum ffa_instruction_access instruction_access,
384 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
385 	enum ffa_memory_shareability shareability,
386 	struct ffa_memory_access_impdef *impdef_val)
387 {
388 	struct ffa_memory_access receiver_retrieve_permissions;
389 
390 	ffa_memory_access_init(&receiver_retrieve_permissions, receiver,
391 			       data_access, instruction_access, 0, impdef_val);
392 
393 	send_retrieve_request(send, handle, sender,
394 			      &receiver_retrieve_permissions, 1, tag, flags,
395 			      type, cacheability, shareability, receiver);
396 }
397 
send_retrieve_request(void * send,ffa_memory_handle_t handle,ffa_id_t sender,struct ffa_memory_access receivers[],uint32_t receiver_count,uint32_t tag,ffa_memory_region_flags_t flags,enum ffa_memory_type type,enum ffa_memory_cacheability cacheability,enum ffa_memory_shareability shareability,ffa_id_t recipient)398 void send_retrieve_request(
399 	void *send, ffa_memory_handle_t handle, ffa_id_t sender,
400 	struct ffa_memory_access receivers[], uint32_t receiver_count,
401 	uint32_t tag, ffa_memory_region_flags_t flags,
402 	enum ffa_memory_type type, enum ffa_memory_cacheability cacheability,
403 	enum ffa_memory_shareability shareability, ffa_id_t recipient)
404 {
405 	size_t msg_size;
406 	struct ffa_partition_msg *retrieve_message = send;
407 
408 	msg_size = ffa_memory_retrieve_request_init(
409 		(struct ffa_memory_region *)retrieve_message->payload, handle,
410 		sender, receivers, receiver_count,
411 		sizeof(struct ffa_memory_access), tag, flags, type,
412 		cacheability, shareability);
413 
414 	EXPECT_LE(msg_size, HF_MAILBOX_SIZE);
415 
416 	ffa_rxtx_header_init(sender, recipient, msg_size,
417 			     &retrieve_message->header);
418 
419 	ASSERT_EQ(ffa_msg_send2(0).func, FFA_SUCCESS_32);
420 }
421 
get_mailbox_message(void * recv)422 static struct ffa_partition_msg *get_mailbox_message(void *recv)
423 {
424 	ffa_id_t sender;
425 	ffa_id_t receiver;
426 	struct ffa_partition_msg *msg = (struct ffa_partition_msg *)recv;
427 	ffa_id_t own_id = hf_vm_get_id();
428 	struct ffa_value ret =
429 		ffa_notification_get(own_id, 0,
430 				     FFA_NOTIFICATION_FLAG_BITMAP_HYP |
431 					     FFA_NOTIFICATION_FLAG_BITMAP_SPM);
432 	ffa_notifications_bitmap_t fwk_notif =
433 		ffa_notification_get_from_framework(ret);
434 
435 	if (fwk_notif == 0U) {
436 		HFTEST_LOG("There is no framework notifications.");
437 		return NULL;
438 	}
439 
440 	sender = ffa_rxtx_header_sender(&(msg->header));
441 	receiver = ffa_rxtx_header_receiver(&(msg->header));
442 
443 	EXPECT_EQ(receiver, own_id);
444 
445 	if (is_ffa_spm_buffer_full_notification(fwk_notif)) {
446 		EXPECT_FALSE(ffa_is_vm_id(sender));
447 	} else if (is_ffa_hyp_buffer_full_notification(fwk_notif)) {
448 		EXPECT_TRUE(ffa_is_vm_id(sender));
449 	}
450 
451 	return msg;
452 }
453 
454 /**
455  * Retrieve a memory region descriptor from fragments in the rx buffer.
456  * We keep building the memory region descriptor form the rx buffer until
457  * the fragment offset matches the total length we expect.
458  */
memory_region_desc_from_rx_fragments(uint32_t fragment_length,uint32_t total_length,ffa_memory_handle_t handle,void * memory_region,void * recv_buf,uint32_t memory_region_max_size)459 void memory_region_desc_from_rx_fragments(uint32_t fragment_length,
460 					  uint32_t total_length,
461 					  ffa_memory_handle_t handle,
462 					  void *memory_region, void *recv_buf,
463 					  uint32_t memory_region_max_size)
464 {
465 	struct ffa_value ret;
466 	uint32_t fragment_offset = fragment_length;
467 
468 	while (fragment_offset < total_length) {
469 		ret = ffa_mem_frag_rx(handle, fragment_offset);
470 		EXPECT_EQ(ret.func, FFA_MEM_FRAG_TX_32);
471 		EXPECT_EQ(ffa_frag_handle(ret), handle);
472 		fragment_length = ret.arg3;
473 		EXPECT_GT(fragment_length, 0);
474 		ASSERT_LE(fragment_offset + fragment_length,
475 			  memory_region_max_size);
476 		/* Copy received fragment. */
477 		memcpy_s((uint8_t *)memory_region + fragment_offset,
478 			 memory_region_max_size - fragment_offset, recv_buf,
479 			 fragment_length);
480 		fragment_offset += fragment_length;
481 		ASSERT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
482 	}
483 	EXPECT_EQ(fragment_offset, total_length);
484 }
485 
486 /*
487  * Retrieve a memory region from `recv_buf`. Copies all the fragments into
488  * `memory_region_ret` if non-null, and checks that the total length of all
489  * fragments is no more than `memory_region_max_size`.
490  */
retrieve_memory(void * recv_buf,ffa_memory_handle_t handle,struct ffa_memory_region * memory_region_ret,size_t memory_region_max_size,uint32_t msg_size)491 void retrieve_memory(void *recv_buf, ffa_memory_handle_t handle,
492 		     struct ffa_memory_region *memory_region_ret,
493 		     size_t memory_region_max_size, uint32_t msg_size)
494 {
495 	struct ffa_value ret;
496 	struct ffa_memory_region *memory_region;
497 	struct ffa_memory_access *receiver;
498 	uint32_t fragment_length;
499 	uint32_t total_length;
500 	ffa_id_t own_id = hf_vm_get_id();
501 
502 	ret = ffa_mem_retrieve_req(msg_size, msg_size);
503 	ASSERT_EQ(ret.func, FFA_MEM_RETRIEVE_RESP_32);
504 	total_length = ret.arg1;
505 	fragment_length = ret.arg2;
506 	EXPECT_GE(fragment_length,
507 		  sizeof(struct ffa_memory_region) +
508 			  sizeof(struct ffa_memory_access_v1_0) +
509 			  sizeof(struct ffa_composite_memory_region));
510 	EXPECT_LE(fragment_length, HF_MAILBOX_SIZE);
511 	EXPECT_LE(fragment_length, total_length);
512 	memory_region = (struct ffa_memory_region *)recv_buf;
513 	EXPECT_EQ(memory_region->receiver_count, 1);
514 	receiver = ffa_memory_region_get_receiver(memory_region, 0);
515 	EXPECT_TRUE(receiver != NULL);
516 	EXPECT_EQ(receiver->receiver_permissions.receiver, own_id);
517 
518 	/* Copy into the return buffer. */
519 	if (memory_region_ret != NULL) {
520 		memcpy_s(memory_region_ret, memory_region_max_size,
521 			 memory_region, fragment_length);
522 	}
523 
524 	/*
525 	 * Release the RX buffer now that we have read everything we need from
526 	 * it.
527 	 */
528 	memory_region = NULL;
529 	ASSERT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
530 
531 	/* Retrieve the remaining fragments. */
532 	memory_region_desc_from_rx_fragments(fragment_length, total_length,
533 					     handle, memory_region_ret,
534 					     recv_buf, memory_region_max_size);
535 }
536 
537 /*
538  * Use the retrieve request from the receive buffer (`recv_buf`) to retrieve a
539  * memory region which has been sent to us. Copies all the fragments into
540  * `memory_region_ret` if non-null, and checks that the total length of all
541  * fragments is no more than `memory_region_max_size`. Returns the sender, and
542  * the handle via `ret_handle`
543  */
retrieve_memory_from_message(void * recv_buf,void * send_buf,ffa_memory_handle_t * ret_handle,struct ffa_memory_region * memory_region_ret,size_t memory_region_max_size)544 ffa_id_t retrieve_memory_from_message(
545 	void *recv_buf, void *send_buf, ffa_memory_handle_t *ret_handle,
546 	struct ffa_memory_region *memory_region_ret,
547 	size_t memory_region_max_size)
548 {
549 	uint32_t msg_size;
550 	ffa_id_t sender;
551 	struct ffa_memory_region *retrieve_request;
552 	ffa_memory_handle_t retrieved_handle;
553 	const struct ffa_partition_msg *retrv_message =
554 		get_mailbox_message(recv_buf);
555 	ffa_id_t own_id = hf_vm_get_id();
556 
557 	ASSERT_TRUE(retrv_message != NULL);
558 
559 	sender = ffa_rxtx_header_sender(&retrv_message->header);
560 	msg_size = retrv_message->header.size;
561 
562 	retrieve_request = (struct ffa_memory_region *)retrv_message->payload;
563 
564 	retrieved_handle = retrieve_request->handle;
565 	if (ret_handle != NULL) {
566 		*ret_handle = retrieved_handle;
567 	}
568 	memcpy_s(send_buf, HF_MAILBOX_SIZE, retrv_message->payload, msg_size);
569 
570 	ASSERT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
571 
572 	retrieve_memory(recv_buf, retrieved_handle, memory_region_ret,
573 			memory_region_max_size, msg_size);
574 
575 	/*
576 	 * If the sender is a VM, and the receiver is an SP the NS bit
577 	 * should be set in the retrieve response.
578 	 */
579 	if (!ffa_is_vm_id(own_id) && ffa_is_vm_id(sender) &&
580 	    memory_region_ret != NULL) {
581 		enum ffa_memory_security retrieved_security =
582 			memory_region_ret->attributes.security;
583 
584 		EXPECT_EQ(retrieved_security, FFA_MEMORY_SECURITY_NON_SECURE);
585 	}
586 
587 	return sender;
588 }
589 
590 /*
591  * Use the retrieve request from the receive buffer to retrieve a memory region
592  * which has been sent to us, expecting it to fail with the given error code.
593  * Returns the sender.
594  */
retrieve_memory_from_message_expect_fail(void * recv_buf,void * send_buf,enum ffa_error expected_error)595 ffa_id_t retrieve_memory_from_message_expect_fail(void *recv_buf,
596 						  void *send_buf,
597 						  enum ffa_error expected_error)
598 {
599 	uint32_t msg_size;
600 	struct ffa_value ret;
601 	ffa_id_t sender;
602 	struct ffa_memory_region *retrieve_request;
603 	const struct ffa_partition_msg *retrv_message =
604 		get_mailbox_message(recv_buf);
605 
606 	ASSERT_TRUE(retrv_message != NULL);
607 
608 	sender = ffa_rxtx_header_sender(&retrv_message->header);
609 	msg_size = retrv_message->header.size;
610 
611 	retrieve_request = (struct ffa_memory_region *)retrv_message->payload;
612 
613 	memcpy_s(send_buf, HF_MAILBOX_SIZE, retrieve_request, msg_size);
614 	ASSERT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
615 
616 	ret = ffa_mem_retrieve_req(msg_size, msg_size);
617 	EXPECT_FFA_ERROR(ret, expected_error);
618 
619 	return sender;
620 }
621 
get_ffa_partition_info(struct ffa_uuid * uuid,struct ffa_partition_info * info,size_t info_size,void * recv)622 ffa_vm_count_t get_ffa_partition_info(struct ffa_uuid *uuid,
623 				      struct ffa_partition_info *info,
624 				      size_t info_size, void *recv)
625 {
626 	struct ffa_value ret;
627 	struct ffa_partition_info *ret_info = recv;
628 
629 	CHECK(uuid != NULL);
630 	CHECK(info != NULL);
631 
632 	ret = ffa_partition_info_get(uuid, 0);
633 
634 	if (ffa_func_id(ret) != FFA_SUCCESS_32) {
635 		return 0;
636 	}
637 
638 	if (ret.arg2 != 0) {
639 		size_t src_size = ret.arg2 * sizeof(struct ffa_partition_info);
640 		size_t dest_size =
641 			info_size * sizeof(struct ffa_partition_info);
642 
643 		memcpy_s(info, dest_size, ret_info, src_size);
644 	}
645 
646 	ffa_rx_release();
647 
648 	return ret.arg2;
649 }
650 
651 /**
652  * Dump the boot information passed to the partition.
653  */
dump_boot_info(struct ffa_boot_info_header * boot_info_header)654 void dump_boot_info(struct ffa_boot_info_header *boot_info_header)
655 {
656 	struct ffa_boot_info_desc *boot_info_desc;
657 
658 	if (boot_info_header == NULL) {
659 		HFTEST_LOG("SP doesn't have boot arguments!\n");
660 		return;
661 	}
662 
663 	HFTEST_LOG("SP boot info (%lx):", (uintptr_t)boot_info_header);
664 	HFTEST_LOG("  Signature: %x", boot_info_header->signature);
665 	HFTEST_LOG("  Version: %x", boot_info_header->version);
666 	HFTEST_LOG("  Blob Size: %u", boot_info_header->info_blob_size);
667 	HFTEST_LOG("  Descriptor Size: %u", boot_info_header->desc_size);
668 	HFTEST_LOG("  Descriptor Count: %u", boot_info_header->desc_count);
669 
670 	boot_info_desc = boot_info_header->boot_info;
671 
672 	if (boot_info_desc == NULL) {
673 		dlog_error("Boot data arguments error...");
674 		return;
675 	}
676 
677 	for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
678 		HFTEST_LOG("      Type: %u", boot_info_desc[i].type);
679 		HFTEST_LOG("      Flags:");
680 		HFTEST_LOG("        Name Format: %x",
681 			   ffa_boot_info_name_format(&boot_info_desc[i]));
682 		HFTEST_LOG("        Content Format: %x",
683 			   ffa_boot_info_content_format(&boot_info_desc[i]));
684 		HFTEST_LOG("      Size: %u", boot_info_desc[i].size);
685 		HFTEST_LOG("      Value: %lx", boot_info_desc[i].content);
686 	}
687 }
688 
689 /**
690  * Retrieve the boot info descriptor related to the provided type and type ID.
691  */
get_boot_info_desc(struct ffa_boot_info_header * boot_info_header,uint8_t type,uint8_t type_id)692 struct ffa_boot_info_desc *get_boot_info_desc(
693 	struct ffa_boot_info_header *boot_info_header, uint8_t type,
694 	uint8_t type_id)
695 {
696 	struct ffa_boot_info_desc *boot_info_desc;
697 
698 	assert(boot_info_header != NULL);
699 
700 	ASSERT_EQ(boot_info_header->signature, 0xFFAU);
701 	ASSERT_GE(boot_info_header->version, 0x10001U);
702 	ASSERT_EQ(boot_info_header->desc_size,
703 		  sizeof(struct ffa_boot_info_desc));
704 	ASSERT_EQ((uintptr_t)boot_info_header + boot_info_header->desc_offset,
705 		  (uintptr_t)boot_info_header->boot_info);
706 
707 	boot_info_desc = boot_info_header->boot_info;
708 
709 	for (uint32_t i = 0; i < boot_info_header->desc_count; i++) {
710 		if (ffa_boot_info_type_id(&boot_info_desc[i]) == type_id &&
711 		    ffa_boot_info_type(&boot_info_desc[i]) == type) {
712 			return &boot_info_desc[i];
713 		}
714 	}
715 
716 	return NULL;
717 }
718 
send_indirect_message(ffa_id_t from,ffa_id_t to,void * send,const void * payload,size_t payload_size,uint32_t send_flags)719 struct ffa_value send_indirect_message(ffa_id_t from, ffa_id_t to, void *send,
720 				       const void *payload, size_t payload_size,
721 				       uint32_t send_flags)
722 {
723 	struct ffa_partition_msg *message = (struct ffa_partition_msg *)send;
724 
725 	/* Initialize message header. */
726 	ffa_rxtx_header_init(from, to, payload_size, &message->header);
727 
728 	/* Fill TX buffer with payload. */
729 	memcpy_s(message->payload, FFA_PARTITION_MSG_PAYLOAD_MAX, payload,
730 		 payload_size);
731 
732 	/* Send the message. */
733 	return ffa_msg_send2(send_flags);
734 }
735 
receive_indirect_message(void * buffer,size_t buffer_size,void * recv,ffa_id_t * sender)736 void receive_indirect_message(void *buffer, size_t buffer_size, void *recv,
737 			      ffa_id_t *sender)
738 {
739 	const struct ffa_partition_msg *message;
740 	struct ffa_partition_rxtx_header header;
741 	ffa_id_t source_vm_id;
742 	const uint32_t *payload;
743 	struct ffa_value ret;
744 	ffa_notifications_bitmap_t fwk_notif;
745 	const ffa_id_t own_id = hf_vm_get_id();
746 
747 	EXPECT_LE(buffer_size, FFA_MSG_PAYLOAD_MAX);
748 
749 	/* Check notification */
750 	ret = ffa_notification_get(own_id, 0,
751 				   FFA_NOTIFICATION_FLAG_BITMAP_SPM |
752 					   FFA_NOTIFICATION_FLAG_BITMAP_HYP);
753 	ASSERT_EQ(ret.func, FFA_SUCCESS_32);
754 
755 	fwk_notif = ffa_notification_get_from_framework(ret);
756 
757 	if (fwk_notif == 0U) {
758 		FAIL("Expected Rx buffer full notification.");
759 	}
760 
761 	message = (const struct ffa_partition_msg *)recv;
762 	memcpy_s(&header, sizeof(header), message,
763 		 sizeof(struct ffa_partition_rxtx_header));
764 
765 	source_vm_id = ffa_rxtx_header_sender(&header);
766 
767 	if (is_ffa_hyp_buffer_full_notification(fwk_notif)) {
768 		EXPECT_TRUE(ffa_is_vm_id(source_vm_id));
769 	} else if (is_ffa_spm_buffer_full_notification(fwk_notif)) {
770 		EXPECT_FALSE(ffa_is_vm_id(source_vm_id));
771 	}
772 
773 	/* Check receiver ID against own ID. */
774 	ASSERT_EQ(ffa_rxtx_header_receiver(&header), own_id);
775 	ASSERT_LE(header.size, buffer_size);
776 
777 	payload = (const uint32_t *)message->payload;
778 
779 	/* Get message to free the RX buffer. */
780 	memcpy_s(buffer, buffer_size, payload, header.size);
781 
782 	EXPECT_EQ(ffa_rx_release().func, FFA_SUCCESS_32);
783 
784 	if (sender != NULL) {
785 		*sender = source_vm_id;
786 	}
787 }
788 
ffa_partition_info_regs_get_part_info(struct ffa_value args,uint8_t idx,struct ffa_partition_info * partition_info)789 bool ffa_partition_info_regs_get_part_info(
790 	struct ffa_value args, uint8_t idx,
791 	struct ffa_partition_info *partition_info)
792 {
793 	/* list of pointers to args in return value */
794 	uint64_t *arg_ptrs[15] = {
795 		&args.arg3,
796 		&args.arg4,
797 		&args.arg5,
798 		&args.arg6,
799 		&args.arg7,
800 		&args.extended_val.arg8,
801 		&args.extended_val.arg9,
802 		&args.extended_val.arg10,
803 		&args.extended_val.arg11,
804 		&args.extended_val.arg12,
805 		&args.extended_val.arg13,
806 		&args.extended_val.arg14,
807 		&args.extended_val.arg15,
808 		&args.extended_val.arg16,
809 		&args.extended_val.arg17,
810 	};
811 
812 	/*
813 	 * Each partition information is encoded in 3 registers, so there can be
814 	 * a maximum of 5 entries.
815 	 */
816 	if (idx >= 5 || !partition_info) {
817 		return false;
818 	}
819 
820 	uint64_t info = *(arg_ptrs[(ptrdiff_t)(idx * 3)]);
821 	uint64_t uuid_lo = *(arg_ptrs[(ptrdiff_t)(idx * 3) + 1]);
822 	uint64_t uuid_high = *(arg_ptrs[(ptrdiff_t)(idx * 3) + 2]);
823 
824 	partition_info->vm_id = info & 0xFFFF;
825 	partition_info->vcpu_count = (info >> 16) & 0xFFFF;
826 	partition_info->properties = (info >> 32);
827 	partition_info->uuid.uuid[0] = uuid_lo & 0xFFFFFFFF;
828 	partition_info->uuid.uuid[1] = (uuid_lo >> 32) & 0xFFFFFFFF;
829 	partition_info->uuid.uuid[2] = uuid_high & 0xFFFFFFFF;
830 	partition_info->uuid.uuid[3] = (uuid_high >> 32) & 0xFFFFFFFF;
831 
832 	return true;
833 }
834 
835 /*
836  * Update security state on S1 page table based on attributes
837  * set in the memory region structure.
838  */
update_mm_security_state(struct ffa_composite_memory_region * composite,ffa_memory_attributes_t attributes)839 void update_mm_security_state(struct ffa_composite_memory_region *composite,
840 			      ffa_memory_attributes_t attributes)
841 {
842 	if (attributes.security == FFA_MEMORY_SECURITY_NON_SECURE &&
843 	    !ffa_is_vm_id(hf_vm_get_id())) {
844 		for (uint32_t i = 0; i < composite->constituent_count; i++) {
845 			uint32_t mode;
846 
847 			if (!hftest_mm_get_mode(
848 				    // NOLINTNEXTLINE(performance-no-int-to-ptr)
849 				    (const void *)composite->constituents[i]
850 					    .address,
851 				    FFA_PAGE_SIZE * composite->constituents[i]
852 							    .page_count,
853 				    &mode)) {
854 				FAIL("Couldn't get the mode of the "
855 				     "composite.\n");
856 			}
857 
858 			hftest_mm_identity_map(
859 				// NOLINTNEXTLINE(performance-no-int-to-ptr)
860 				(const void *)composite->constituents[i]
861 					.address,
862 				FFA_PAGE_SIZE *
863 					composite->constituents[i].page_count,
864 				mode | MM_MODE_NS);
865 		}
866 	}
867 }
868