1 /*
2  * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <inttypes.h>
9 #include <stdint.h>
10 
11 #include "../../services/std_svc/spm/el3_spmc/spmc.h"
12 #include "../../services/std_svc/spm/el3_spmc/spmc_shared_mem.h"
13 #include <arch_features.h>
14 #include <arch_helpers.h>
15 #include <bl32/tsp/tsp.h>
16 #include <common/bl_common.h>
17 #include <common/debug.h>
18 #include "ffa_helpers.h"
19 #include <lib/psci/psci.h>
20 #include <lib/spinlock.h>
21 #include <lib/xlat_tables/xlat_tables_defs.h>
22 #include <lib/xlat_tables/xlat_tables_v2.h>
23 #include <plat/common/platform.h>
24 #include <platform_tsp.h>
25 #include <services/ffa_svc.h>
26 #include "tsp_private.h"
27 
28 #include <platform_def.h>
29 
30 static ffa_endpoint_id16_t tsp_id, spmc_id;
31 uint8_t mem_region_buffer[4096 * 2]  __aligned(PAGE_SIZE);
32 
33 /* Partition Mailbox. */
34 static uint8_t send_page[PAGE_SIZE] __aligned(PAGE_SIZE);
35 static uint8_t recv_page[PAGE_SIZE] __aligned(PAGE_SIZE);
36 
37 /*
38  * Declare a global mailbox for use within the TSP.
39  * This will be initialized appropriately when the buffers
40  * are mapped with the SPMC.
41  */
42 static struct mailbox mailbox;
43 
44 /*******************************************************************************
45  * This enum is used to handle test cases driven from the FF-A Test Driver.
46  ******************************************************************************/
47 /* Keep in Sync with FF-A Test Driver. */
48 enum message_t {
49 	/* Partition Only Messages. */
50 	FF_A_RELAY_MESSAGE = 0,
51 
52 	/* Basic Functionality. */
53 	FF_A_ECHO_MESSAGE,
54 	FF_A_RELAY_MESSAGE_EL3,
55 
56 	/* Memory Sharing. */
57 	FF_A_MEMORY_SHARE,
58 	FF_A_MEMORY_SHARE_FRAGMENTED,
59 	FF_A_MEMORY_LEND,
60 	FF_A_MEMORY_LEND_FRAGMENTED,
61 
62 	FF_A_MEMORY_SHARE_MULTI_ENDPOINT,
63 	FF_A_MEMORY_LEND_MULTI_ENDPOINT,
64 
65 	LAST,
66 	FF_A_RUN_ALL = 255,
67 	FF_A_OP_MAX = 256
68 };
69 
70 #if SPMC_AT_EL3
71 extern void tsp_cpu_on_entry(void);
72 #endif
73 
74 /*******************************************************************************
75  * Test Functions.
76  ******************************************************************************/
77 
78 /*******************************************************************************
79  * Enable the TSP to forward the received message to another partition and ask
80  * it to echo the value back in order to validate direct messages functionality.
81  ******************************************************************************/
ffa_test_relay(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)82 static int ffa_test_relay(uint64_t arg0,
83 			  uint64_t arg1,
84 			  uint64_t arg2,
85 			  uint64_t arg3,
86 			  uint64_t arg4,
87 			  uint64_t arg5,
88 			  uint64_t arg6,
89 			  uint64_t arg7)
90 {
91 	smc_args_t ffa_forward_result;
92 	ffa_endpoint_id16_t receiver = arg5;
93 
94 	ffa_forward_result = ffa_msg_send_direct_req(ffa_endpoint_source(arg1),
95 						     receiver,
96 						     FF_A_ECHO_MESSAGE, arg4,
97 						     0, 0, 0);
98 	return ffa_forward_result._regs[3];
99 }
100 
101 /*******************************************************************************
102  * This function handles memory management tests, currently share and lend.
103  * This test supports the use of FRAG_RX to use memory descriptors that do not
104  * fit in a single 4KB buffer.
105  ******************************************************************************/
test_memory_send(ffa_endpoint_id16_t sender,uint64_t handle,ffa_mtd_flag32_t flags,bool multi_endpoint)106 static int test_memory_send(ffa_endpoint_id16_t sender, uint64_t handle,
107 			    ffa_mtd_flag32_t flags, bool multi_endpoint)
108 {
109 	struct ffa_mtd *m;
110 	struct ffa_emad_v1_0 *receivers;
111 	struct ffa_comp_mrd *composite;
112 	int ret, status = 0;
113 	unsigned int mem_attrs;
114 	char *ptr;
115 	ffa_endpoint_id16_t source = sender;
116 	uint32_t total_length, recv_length = 0;
117 
118 	/*
119 	 * In the case that we're testing multiple endpoints choose a partition
120 	 * ID that resides in the normal world so the SPMC won't detect it as
121 	 * invalid.
122 	 * TODO: Should get endpoint receiver id and flag as input from NWd.
123 	 */
124 	uint32_t receiver_count = multi_endpoint ? 2 : 1;
125 	ffa_endpoint_id16_t test_receivers[2] = { tsp_id, 0x10 };
126 
127 	/* Ensure that the sender ID resides in the normal world. */
128 	if (ffa_is_secure_world_id(sender)) {
129 		ERROR("Invalid sender ID 0x%x.\n", sender);
130 		return FFA_ERROR_DENIED;
131 	}
132 
133 	if (!memory_retrieve(&mailbox, &m, handle, source, test_receivers,
134 			     receiver_count, flags, &recv_length,
135 			     &total_length)) {
136 		return FFA_ERROR_INVALID_PARAMETER;
137 	}
138 
139 	receivers = (struct ffa_emad_v1_0 *)
140 		    ((uint8_t *) m + m->emad_offset);
141 	while (total_length != recv_length) {
142 		smc_args_t ffa_return;
143 		uint32_t frag_length;
144 
145 		ffa_return = ffa_mem_frag_rx(handle, recv_length);
146 
147 		if (ffa_return._regs[0] == FFA_ERROR) {
148 			WARN("TSP: failed to resume mem with handle %lx\n",
149 			     handle);
150 			return ffa_return._regs[2];
151 		}
152 		frag_length = ffa_return._regs[3];
153 
154 		/* Validate frag_length is less than total_length and mailbox size. */
155 		if (frag_length > total_length ||
156 				frag_length > (mailbox.rxtx_page_count * PAGE_SIZE)) {
157 			ERROR("Invalid parameters!\n");
158 			return FFA_ERROR_INVALID_PARAMETER;
159 		}
160 
161 		/* Validate frag_length is less than remaining mem_region_buffer size. */
162 		if (frag_length + recv_length >= REGION_BUF_SIZE) {
163 			ERROR("Out of memory!\n");
164 			return FFA_ERROR_INVALID_PARAMETER;
165 		}
166 
167 		memcpy(&mem_region_buffer[recv_length], mailbox.rx_buffer,
168 		       frag_length);
169 
170 		if (ffa_rx_release()) {
171 			ERROR("Failed to release buffer!\n");
172 			return FFA_ERROR_DENIED;
173 		}
174 
175 		recv_length += frag_length;
176 
177 		assert(recv_length <= total_length);
178 	}
179 
180 	composite = ffa_memory_region_get_composite(m, 0);
181 	if (composite == NULL) {
182 		WARN("Failed to get composite descriptor!\n");
183 		return FFA_ERROR_INVALID_PARAMETER;
184 	}
185 
186 	VERBOSE("Address: %p; page_count: %x %lx\n",
187 		(void *)composite->address_range_array[0].address,
188 		composite->address_range_array[0].page_count, PAGE_SIZE);
189 
190 	/* This test is only concerned with RW permissions. */
191 	if (ffa_get_data_access_attr(
192 	    receivers[0].mapd.memory_access_permissions) != FFA_MEM_PERM_RW) {
193 		ERROR("Data permission in retrieve response %x does not match share/lend %x!\n",
194 		      ffa_get_data_access_attr(receivers[0].mapd.memory_access_permissions),
195 		      FFA_MEM_PERM_RW);
196 		return FFA_ERROR_INVALID_PARAMETER;
197 	}
198 
199 	mem_attrs = MT_RW_DATA | MT_EXECUTE_NEVER;
200 
201 	/* Only expecting to be sent memory from NWd so map accordingly. */
202 	mem_attrs |= MT_NS;
203 
204 	for (uint32_t i = 0U; i < composite->address_range_count; i++) {
205 		size_t size = composite->address_range_array[i].page_count * PAGE_SIZE;
206 
207 		ptr = (char *) composite->address_range_array[i].address;
208 		ret = mmap_add_dynamic_region(
209 				(uint64_t)ptr,
210 				(uint64_t)ptr,
211 				size, mem_attrs);
212 
213 		if (ret != 0) {
214 			ERROR("Failed [%u] mmap_add_dynamic_region %u (%lx) (%lx) (%x)!\n",
215 				i, ret,
216 				(uint64_t)composite->address_range_array[i].address,
217 				size, mem_attrs);
218 
219 			/* Remove mappings created in this transaction. */
220 			for (i--; i >= 0U; i--) {
221 				ret = mmap_remove_dynamic_region(
222 					(uint64_t)ptr,
223 					composite->address_range_array[i].page_count * PAGE_SIZE);
224 
225 				if (ret != 0) {
226 					ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
227 					panic();
228 				}
229 			}
230 			return FFA_ERROR_NO_MEMORY;
231 		}
232 
233 		/* Increment memory region for validation purposes. */
234 		++(*ptr);
235 
236 		/*
237 		 * Read initial magic number from memory region for
238 		 * validation purposes.
239 		 */
240 		if (!i) {
241 			status = *ptr;
242 		}
243 	}
244 
245 	for (uint32_t i = 0U; i < composite->address_range_count; i++) {
246 		ret = mmap_remove_dynamic_region(
247 			(uint64_t)composite->address_range_array[i].address,
248 			composite->address_range_array[i].page_count * PAGE_SIZE);
249 
250 		if (ret != 0) {
251 			ERROR("Failed [%d] mmap_remove_dynamic_region!\n", i);
252 			return FFA_ERROR_NO_MEMORY;
253 		}
254 	}
255 
256 	if (!memory_relinquish((struct ffa_mem_relinquish_descriptor *)mailbox.tx_buffer,
257 				m->handle, tsp_id)) {
258 		ERROR("Failed to relinquish memory region!\n");
259 		return FFA_ERROR_INVALID_PARAMETER;
260 	}
261 	return status;
262 }
263 
send_ffa_pm_success(void)264 static smc_args_t *send_ffa_pm_success(void)
265 {
266 	return set_smc_args(FFA_MSG_SEND_DIRECT_RESP_SMC32,
267 			    ((tsp_id & FFA_DIRECT_MSG_ENDPOINT_ID_MASK)
268 			    << FFA_DIRECT_MSG_SOURCE_SHIFT) | spmc_id,
269 			    FFA_FWK_MSG_BIT |
270 			    (FFA_PM_MSG_PM_RESP & FFA_FWK_MSG_MASK),
271 			    0, 0, 0, 0, 0);
272 }
273 
274 /*******************************************************************************
275  * This function performs any remaining book keeping in the test secure payload
276  * before this cpu is turned off in response to a psci cpu_off request.
277  ******************************************************************************/
tsp_cpu_off_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)278 smc_args_t *tsp_cpu_off_main(uint64_t arg0,
279 			     uint64_t arg1,
280 			     uint64_t arg2,
281 			     uint64_t arg3,
282 			     uint64_t arg4,
283 			     uint64_t arg5,
284 			     uint64_t arg6,
285 			     uint64_t arg7)
286 {
287 	uint32_t linear_id = plat_my_core_pos();
288 
289 	/*
290 	 * This cpu is being turned off, so disable the timer to prevent the
291 	 * secure timer interrupt from interfering with power down. A pending
292 	 * interrupt will be lost but we do not care as we are turning off.
293 	 */
294 	tsp_generic_timer_stop();
295 
296 	/* Update this cpu's statistics. */
297 	tsp_stats[linear_id].smc_count++;
298 	tsp_stats[linear_id].eret_count++;
299 	tsp_stats[linear_id].cpu_off_count++;
300 
301 	INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
302 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
303 		read_mpidr(),
304 		tsp_stats[linear_id].smc_count,
305 		tsp_stats[linear_id].eret_count,
306 		tsp_stats[linear_id].cpu_off_count);
307 
308 	return send_ffa_pm_success();
309 }
310 
311 /*******************************************************************************
312  * This function performs any book keeping in the test secure payload before
313  * this cpu's architectural state is saved in response to an earlier psci
314  * cpu_suspend request.
315  ******************************************************************************/
tsp_cpu_suspend_main(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)316 smc_args_t *tsp_cpu_suspend_main(uint64_t arg0,
317 				 uint64_t arg1,
318 				 uint64_t arg2,
319 				 uint64_t arg3,
320 				 uint64_t arg4,
321 				 uint64_t arg5,
322 				 uint64_t arg6,
323 				 uint64_t arg7)
324 {
325 	uint32_t linear_id = plat_my_core_pos();
326 
327 	/*
328 	 * Save the time context and disable it to prevent the secure timer
329 	 * interrupt from interfering with wakeup from the suspend state.
330 	 */
331 	tsp_generic_timer_save();
332 	tsp_generic_timer_stop();
333 
334 	/* Update this cpu's statistics. */
335 	tsp_stats[linear_id].smc_count++;
336 	tsp_stats[linear_id].eret_count++;
337 	tsp_stats[linear_id].cpu_suspend_count++;
338 
339 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
340 		read_mpidr(),
341 		tsp_stats[linear_id].smc_count,
342 		tsp_stats[linear_id].eret_count,
343 		tsp_stats[linear_id].cpu_suspend_count);
344 
345 	return send_ffa_pm_success();
346 }
347 
348 /*******************************************************************************
349  * This function performs any bookkeeping in the test secure payload after this
350  * cpu's architectural state has been restored after wakeup from an earlier psci
351  * cpu_suspend request.
352  ******************************************************************************/
tsp_cpu_resume_main(uint64_t max_off_pwrlvl,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)353 smc_args_t *tsp_cpu_resume_main(uint64_t max_off_pwrlvl,
354 				uint64_t arg1,
355 				uint64_t arg2,
356 				uint64_t arg3,
357 				uint64_t arg4,
358 				uint64_t arg5,
359 				uint64_t arg6,
360 				uint64_t arg7)
361 {
362 	uint32_t linear_id = plat_my_core_pos();
363 
364 	/* Restore the generic timer context. */
365 	tsp_generic_timer_restore();
366 
367 	/* Update this cpu's statistics. */
368 	tsp_stats[linear_id].smc_count++;
369 	tsp_stats[linear_id].eret_count++;
370 	tsp_stats[linear_id].cpu_resume_count++;
371 
372 	INFO("TSP: cpu 0x%lx resumed. maximum off power level %" PRId64 "\n",
373 	     read_mpidr(), max_off_pwrlvl);
374 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu resume requests\n",
375 		read_mpidr(),
376 		tsp_stats[linear_id].smc_count,
377 		tsp_stats[linear_id].eret_count,
378 		tsp_stats[linear_id].cpu_resume_count);
379 
380 	return send_ffa_pm_success();
381 }
382 
383 /*******************************************************************************
384  * This function handles framework messages. Currently only PM.
385  ******************************************************************************/
handle_framework_message(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)386 static smc_args_t *handle_framework_message(uint64_t arg0,
387 					    uint64_t arg1,
388 					    uint64_t arg2,
389 					    uint64_t arg3,
390 					    uint64_t arg4,
391 					    uint64_t arg5,
392 					    uint64_t arg6,
393 					    uint64_t arg7)
394 {
395 	/* Check if it is a power management message from the SPMC. */
396 	if (ffa_endpoint_source(arg1) != spmc_id) {
397 		goto err;
398 	}
399 
400 	/* Check if it is a PM request message. */
401 	if ((arg2 & FFA_FWK_MSG_MASK) == FFA_FWK_MSG_PSCI) {
402 		/* Check if it is a PSCI CPU_OFF request. */
403 		if (arg3 == PSCI_CPU_OFF) {
404 			return tsp_cpu_off_main(arg0, arg1, arg2, arg3,
405 						arg4, arg5, arg6, arg7);
406 		} else if (arg3 == PSCI_CPU_SUSPEND_AARCH64) {
407 			return tsp_cpu_suspend_main(arg0, arg1, arg2, arg3,
408 						arg4, arg5, arg6, arg7);
409 		}
410 	} else if ((arg2 & FFA_FWK_MSG_MASK) == FFA_PM_MSG_WB_REQ) {
411 		/* Check it is a PSCI Warm Boot request. */
412 		if (arg3 == FFA_WB_TYPE_NOTS2RAM) {
413 			return tsp_cpu_resume_main(arg0, arg1, arg2, arg3,
414 						arg4, arg5, arg6, arg7);
415 		}
416 	}
417 
418 err:
419 	ERROR("%s: Unknown framework message!\n", __func__);
420 	panic();
421 }
422 
423 /*******************************************************************************
424  * Handles partition messages. Exercised from the FF-A Test Driver.
425  ******************************************************************************/
handle_partition_message(uint64_t arg0,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)426 static smc_args_t *handle_partition_message(uint64_t arg0,
427 					    uint64_t arg1,
428 					    uint64_t arg2,
429 					    uint64_t arg3,
430 					    uint64_t arg4,
431 					    uint64_t arg5,
432 					    uint64_t arg6,
433 					    uint64_t arg7)
434 {
435 	uint16_t sender = ffa_endpoint_source(arg1);
436 	uint16_t receiver = ffa_endpoint_destination(arg1);
437 	int status = -1;
438 	const bool multi_endpoint = true;
439 
440 	switch (arg3) {
441 	case FF_A_MEMORY_SHARE:
442 		INFO("TSP Tests: Memory Share Request--\n");
443 		status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, !multi_endpoint);
444 		break;
445 
446 	case FF_A_MEMORY_LEND:
447 		INFO("TSP Tests: Memory Lend Request--\n");
448 		status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, !multi_endpoint);
449 		break;
450 
451 	case FF_A_MEMORY_SHARE_MULTI_ENDPOINT:
452 		INFO("TSP Tests: Multi Endpoint Memory Share Request--\n");
453 		status = test_memory_send(sender, arg4, FFA_FLAG_SHARE_MEMORY, multi_endpoint);
454 		break;
455 
456 	case FF_A_MEMORY_LEND_MULTI_ENDPOINT:
457 		INFO("TSP Tests: Multi Endpoint Memory Lend Request--\n");
458 		status = test_memory_send(sender, arg4, FFA_FLAG_LEND_MEMORY, multi_endpoint);
459 		break;
460 	case FF_A_RELAY_MESSAGE:
461 		INFO("TSP Tests: Relaying message--\n");
462 		status = ffa_test_relay(arg0, arg1, arg2, arg3, arg4,
463 					arg5, arg6, arg7);
464 		break;
465 
466 	case FF_A_ECHO_MESSAGE:
467 		INFO("TSP Tests: echo message--\n");
468 		status = arg4;
469 		break;
470 
471 	default:
472 		INFO("TSP Tests: Unknown request ID %d--\n", (int) arg3);
473 	}
474 
475 	/* Swap the sender and receiver in the response. */
476 	return ffa_msg_send_direct_resp(receiver, sender, status, 0, 0, 0, 0);
477 }
478 
479 /*******************************************************************************
480  * This function implements the event loop for handling FF-A ABI invocations.
481  ******************************************************************************/
tsp_event_loop(uint64_t smc_fid,uint64_t arg1,uint64_t arg2,uint64_t arg3,uint64_t arg4,uint64_t arg5,uint64_t arg6,uint64_t arg7)482 static smc_args_t *tsp_event_loop(uint64_t smc_fid,
483 				  uint64_t arg1,
484 				  uint64_t arg2,
485 				  uint64_t arg3,
486 				  uint64_t arg4,
487 				  uint64_t arg5,
488 				  uint64_t arg6,
489 				  uint64_t arg7)
490 {
491 	/* Panic if the SPMC did not forward an FF-A call. */
492 	if (!is_ffa_fid(smc_fid)) {
493 		ERROR("%s: Unknown SMC FID (0x%lx)\n", __func__, smc_fid);
494 		panic();
495 	}
496 
497 	switch (smc_fid) {
498 	case FFA_INTERRUPT:
499 		/*
500 		 * IRQs were enabled upon re-entry into the TSP. The interrupt
501 		 * must have been handled by now. Return to the SPMC indicating
502 		 * the same.
503 		 */
504 		return set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0);
505 
506 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
507 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
508 		/* Check if a framework message, handle accordingly. */
509 		if ((arg2 & FFA_FWK_MSG_BIT)) {
510 			return handle_framework_message(smc_fid, arg1, arg2, arg3,
511 							arg4, arg5, arg6, arg7);
512 		}
513 		return handle_partition_message(smc_fid, arg1, arg2, arg3,
514 							arg4, arg5, arg6, arg7);
515 	}
516 
517 	ERROR("%s: Unsupported FF-A FID (0x%lx)\n", __func__, smc_fid);
518 	panic();
519 }
520 
tsp_loop(smc_args_t * args)521 static smc_args_t *tsp_loop(smc_args_t *args)
522 {
523 	smc_args_t ret;
524 
525 	do {
526 		/* --------------------------------------------
527 		 * Mask FIQ interrupts to avoid preemption
528 		 * in case EL3 SPMC delegates an IRQ next or a
529 		 * managed exit. Lastly, unmask IRQs so that
530 		 * they can be handled immediately upon re-entry.
531 		 *  ---------------------------------------------
532 		 */
533 		write_daifset(DAIF_FIQ_BIT);
534 		write_daifclr(DAIF_IRQ_BIT);
535 		ret = smc_helper(args->_regs[0], args->_regs[1], args->_regs[2],
536 			       args->_regs[3], args->_regs[4], args->_regs[5],
537 			       args->_regs[6], args->_regs[7]);
538 		args = tsp_event_loop(ret._regs[0], ret._regs[1], ret._regs[2],
539 				ret._regs[3], ret._regs[4], ret._regs[5],
540 				ret._regs[6], ret._regs[7]);
541 	} while (1);
542 
543 	/* Not Reached. */
544 	return NULL;
545 }
546 
547 /*******************************************************************************
548  * TSP main entry point where it gets the opportunity to initialize its secure
549  * state/applications. Once the state is initialized, it must return to the
550  * SPD with a pointer to the 'tsp_vector_table' jump table.
551  ******************************************************************************/
tsp_main(void)552 uint64_t tsp_main(void)
553 {
554 	smc_args_t smc_args = {0};
555 
556 	NOTICE("TSP: %s\n", version_string);
557 	NOTICE("TSP: %s\n", build_message);
558 	INFO("TSP: Total memory base : 0x%lx\n", (unsigned long) BL32_BASE);
559 	INFO("TSP: Total memory size : 0x%lx bytes\n", BL32_TOTAL_SIZE);
560 	uint32_t linear_id = plat_my_core_pos();
561 
562 	/* Initialize the platform. */
563 	tsp_platform_setup();
564 
565 	/* Initialize secure/applications state here. */
566 	tsp_generic_timer_start();
567 
568 	/* Register secondary entrypoint with the SPMC. */
569 	smc_args = smc_helper(FFA_SECONDARY_EP_REGISTER_SMC64,
570 			(uint64_t) tsp_cpu_on_entry,
571 			0, 0, 0, 0, 0, 0);
572 	if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
573 		ERROR("TSP could not register secondary ep (0x%lx)\n",
574 				smc_args._regs[2]);
575 		panic();
576 	}
577 	/* Get TSP's endpoint id. */
578 	smc_args = smc_helper(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0);
579 	if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
580 		ERROR("TSP could not get own ID (0x%lx) on core%d\n",
581 				smc_args._regs[2], linear_id);
582 		panic();
583 	}
584 
585 	tsp_id = smc_args._regs[2];
586 	INFO("TSP FF-A endpoint id = 0x%x\n", tsp_id);
587 
588 	/* Get the SPMC ID. */
589 	smc_args = smc_helper(FFA_SPM_ID_GET, 0, 0, 0, 0, 0, 0, 0);
590 	if (smc_args._regs[SMC_ARG0] != FFA_SUCCESS_SMC32) {
591 		ERROR("TSP could not get SPMC ID (0x%lx) on core%d\n",
592 				smc_args._regs[2], linear_id);
593 		panic();
594 	}
595 
596 	spmc_id = smc_args._regs[2];
597 
598 	/* Call RXTX_MAP to map a 4k RX and TX buffer. */
599 	if (ffa_rxtx_map((uintptr_t) send_page,
600 			 (uintptr_t) recv_page, 1)) {
601 		ERROR("TSP could not map it's RX/TX Buffers\n");
602 		panic();
603 	}
604 
605 	mailbox.tx_buffer = send_page;
606 	mailbox.rx_buffer = recv_page;
607 	mailbox.rxtx_page_count = 1;
608 
609 	/* Update this cpu's statistics. */
610 	tsp_stats[linear_id].smc_count++;
611 	tsp_stats[linear_id].eret_count++;
612 	tsp_stats[linear_id].cpu_on_count++;
613 
614 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
615 			read_mpidr(),
616 			tsp_stats[linear_id].smc_count,
617 			tsp_stats[linear_id].eret_count,
618 			tsp_stats[linear_id].cpu_on_count);
619 
620 	/* Tell SPMD that we are done initialising. */
621 	tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
622 
623 	/* Not reached. */
624 	return 0;
625 }
626 
627 /*******************************************************************************
628  * This function performs any remaining book keeping in the test secure payload
629  * after this cpu's architectural state has been setup in response to an earlier
630  * psci cpu_on request.
631  ******************************************************************************/
tsp_cpu_on_main(void)632 smc_args_t *tsp_cpu_on_main(void)
633 {
634 	uint32_t linear_id = plat_my_core_pos();
635 
636 	/* Initialize secure/applications state here. */
637 	tsp_generic_timer_start();
638 
639 	/* Update this cpu's statistics. */
640 	tsp_stats[linear_id].smc_count++;
641 	tsp_stats[linear_id].eret_count++;
642 	tsp_stats[linear_id].cpu_on_count++;
643 	INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
644 	INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
645 			read_mpidr(),
646 			tsp_stats[linear_id].smc_count,
647 			tsp_stats[linear_id].eret_count,
648 			tsp_stats[linear_id].cpu_on_count);
649 	/* ---------------------------------------------
650 	 * Jump to the main event loop to return to EL3
651 	 * and be ready for the next request on this cpu.
652 	 * ---------------------------------------------
653 	 */
654 	return tsp_loop(set_smc_args(FFA_MSG_WAIT, 0, 0, 0, 0, 0, 0, 0));
655 }
656