1 /*
2  * Copyright (c) 2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 
10 #include <arch_helpers.h>
11 #include <bl31/bl31.h>
12 #include <bl31/ehf.h>
13 #include <bl31/interrupt_mgmt.h>
14 #include <common/debug.h>
15 #include <common/fdt_wrappers.h>
16 #include <common/runtime_svc.h>
17 #include <common/uuid.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/smccc.h>
20 #include <lib/utils.h>
21 #include <lib/xlat_tables/xlat_tables_v2.h>
22 #include <libfdt.h>
23 #include <plat/common/platform.h>
24 #include <services/el3_spmc_logical_sp.h>
25 #include <services/ffa_svc.h>
26 #include <services/spmc_svc.h>
27 #include <services/spmd_svc.h>
28 #include "spmc.h"
29 #include "spmc_shared_mem.h"
30 
31 #include <platform_def.h>
32 
33 /* Declare the maximum number of SPs and El3 LPs. */
34 #define MAX_SP_LP_PARTITIONS SECURE_PARTITION_COUNT + MAX_EL3_LP_DESCS_COUNT
35 
36 /*
37  * Allocate a secure partition descriptor to describe each SP in the system that
38  * does not reside at EL3.
39  */
40 static struct secure_partition_desc sp_desc[SECURE_PARTITION_COUNT];
41 
42 /*
43  * Allocate an NS endpoint descriptor to describe each VM and the Hypervisor in
44  * the system that interacts with a SP. It is used to track the Hypervisor
45  * buffer pair, version and ID for now. It could be extended to track VM
46  * properties when the SPMC supports indirect messaging.
47  */
48 static struct ns_endpoint_desc ns_ep_desc[NS_PARTITION_COUNT];
49 
50 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
51 					  uint32_t flags,
52 					  void *handle,
53 					  void *cookie);
54 
55 /*
56  * Helper function to obtain the array storing the EL3
57  * Logical Partition descriptors.
58  */
get_el3_lp_array(void)59 struct el3_lp_desc *get_el3_lp_array(void)
60 {
61 	return (struct el3_lp_desc *) EL3_LP_DESCS_START;
62 }
63 
64 /*
65  * Helper function to obtain the descriptor of the last SP to whom control was
66  * handed to on this physical cpu. Currently, we assume there is only one SP.
67  * TODO: Expand to track multiple partitions when required.
68  */
spmc_get_current_sp_ctx(void)69 struct secure_partition_desc *spmc_get_current_sp_ctx(void)
70 {
71 	return &(sp_desc[ACTIVE_SP_DESC_INDEX]);
72 }
73 
74 /*
75  * Helper function to obtain the execution context of an SP on the
76  * current physical cpu.
77  */
spmc_get_sp_ec(struct secure_partition_desc * sp)78 struct sp_exec_ctx *spmc_get_sp_ec(struct secure_partition_desc *sp)
79 {
80 	return &(sp->ec[get_ec_index(sp)]);
81 }
82 
83 /* Helper function to get pointer to SP context from its ID. */
spmc_get_sp_ctx(uint16_t id)84 struct secure_partition_desc *spmc_get_sp_ctx(uint16_t id)
85 {
86 	/* Check for Secure World Partitions. */
87 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
88 		if (sp_desc[i].sp_id == id) {
89 			return &(sp_desc[i]);
90 		}
91 	}
92 	return NULL;
93 }
94 
95 /*
96  * Helper function to obtain the descriptor of the Hypervisor or OS kernel.
97  * We assume that the first descriptor is reserved for this entity.
98  */
spmc_get_hyp_ctx(void)99 struct ns_endpoint_desc *spmc_get_hyp_ctx(void)
100 {
101 	return &(ns_ep_desc[0]);
102 }
103 
104 /*
105  * Helper function to obtain the RX/TX buffer pair descriptor of the Hypervisor
106  * or OS kernel in the normal world or the last SP that was run.
107  */
spmc_get_mbox_desc(bool secure_origin)108 struct mailbox *spmc_get_mbox_desc(bool secure_origin)
109 {
110 	/* Obtain the RX/TX buffer pair descriptor. */
111 	if (secure_origin) {
112 		return &(spmc_get_current_sp_ctx()->mailbox);
113 	} else {
114 		return &(spmc_get_hyp_ctx()->mailbox);
115 	}
116 }
117 
118 /******************************************************************************
119  * This function returns to the place where spmc_sp_synchronous_entry() was
120  * called originally.
121  ******************************************************************************/
spmc_sp_synchronous_exit(struct sp_exec_ctx * ec,uint64_t rc)122 __dead2 void spmc_sp_synchronous_exit(struct sp_exec_ctx *ec, uint64_t rc)
123 {
124 	/*
125 	 * The SPM must have initiated the original request through a
126 	 * synchronous entry into the secure partition. Jump back to the
127 	 * original C runtime context with the value of rc in x0;
128 	 */
129 	spm_secure_partition_exit(ec->c_rt_ctx, rc);
130 
131 	panic();
132 }
133 
134 /*******************************************************************************
135  * Return FFA_ERROR with specified error code.
136  ******************************************************************************/
spmc_ffa_error_return(void * handle,int error_code)137 uint64_t spmc_ffa_error_return(void *handle, int error_code)
138 {
139 	SMC_RET8(handle, FFA_ERROR,
140 		 FFA_TARGET_INFO_MBZ, error_code,
141 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ, FFA_PARAM_MBZ,
142 		 FFA_PARAM_MBZ, FFA_PARAM_MBZ);
143 }
144 
145 /******************************************************************************
146  * Helper function to validate a secure partition ID to ensure it does not
147  * conflict with any other FF-A component and follows the convention to
148  * indicate it resides within the secure world.
149  ******************************************************************************/
is_ffa_secure_id_valid(uint16_t partition_id)150 bool is_ffa_secure_id_valid(uint16_t partition_id)
151 {
152 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
153 
154 	/* Ensure the ID is not the invalid partition ID. */
155 	if (partition_id == INV_SP_ID) {
156 		return false;
157 	}
158 
159 	/* Ensure the ID is not the SPMD ID. */
160 	if (partition_id == SPMD_DIRECT_MSG_ENDPOINT_ID) {
161 		return false;
162 	}
163 
164 	/*
165 	 * Ensure the ID follows the convention to indicate it resides
166 	 * in the secure world.
167 	 */
168 	if (!ffa_is_secure_world_id(partition_id)) {
169 		return false;
170 	}
171 
172 	/* Ensure we don't conflict with the SPMC partition ID. */
173 	if (partition_id == FFA_SPMC_ID) {
174 		return false;
175 	}
176 
177 	/* Ensure we do not already have an SP context with this ID. */
178 	if (spmc_get_sp_ctx(partition_id)) {
179 		return false;
180 	}
181 
182 	/* Ensure we don't clash with any Logical SP's. */
183 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
184 		if (el3_lp_descs[i].sp_id == partition_id) {
185 			return false;
186 		}
187 	}
188 
189 	return true;
190 }
191 
192 /*******************************************************************************
193  * This function either forwards the request to the other world or returns
194  * with an ERET depending on the source of the call.
195  * We can assume that the destination is for an entity at a lower exception
196  * level as any messages destined for a logical SP resident in EL3 will have
197  * already been taken care of by the SPMC before entering this function.
198  ******************************************************************************/
spmc_smc_return(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle,void * cookie,uint64_t flags,uint16_t dst_id)199 static uint64_t spmc_smc_return(uint32_t smc_fid,
200 				bool secure_origin,
201 				uint64_t x1,
202 				uint64_t x2,
203 				uint64_t x3,
204 				uint64_t x4,
205 				void *handle,
206 				void *cookie,
207 				uint64_t flags,
208 				uint16_t dst_id)
209 {
210 	/* If the destination is in the normal world always go via the SPMD. */
211 	if (ffa_is_normal_world_id(dst_id)) {
212 		return spmd_smc_handler(smc_fid, x1, x2, x3, x4,
213 					cookie, handle, flags);
214 	}
215 	/*
216 	 * If the caller is secure and we want to return to the secure world,
217 	 * ERET directly.
218 	 */
219 	else if (secure_origin && ffa_is_secure_world_id(dst_id)) {
220 		SMC_RET5(handle, smc_fid, x1, x2, x3, x4);
221 	}
222 	/* If we originated in the normal world then switch contexts. */
223 	else if (!secure_origin && ffa_is_secure_world_id(dst_id)) {
224 		return spmd_smc_switch_state(smc_fid, secure_origin, x1, x2,
225 					     x3, x4, handle);
226 	} else {
227 		/* Unknown State. */
228 		panic();
229 	}
230 
231 	/* Shouldn't be Reached. */
232 	return 0;
233 }
234 
235 /*******************************************************************************
236  * FF-A ABI Handlers.
237  ******************************************************************************/
238 
239 /*******************************************************************************
240  * Helper function to validate arg2 as part of a direct message.
241  ******************************************************************************/
direct_msg_validate_arg2(uint64_t x2)242 static inline bool direct_msg_validate_arg2(uint64_t x2)
243 {
244 	/* Check message type. */
245 	if (x2 & FFA_FWK_MSG_BIT) {
246 		/* We have a framework message, ensure it is a known message. */
247 		if (x2 & ~(FFA_FWK_MSG_MASK | FFA_FWK_MSG_BIT)) {
248 			VERBOSE("Invalid message format 0x%lx.\n", x2);
249 			return false;
250 		}
251 	} else {
252 		/* We have a partition messages, ensure x2 is not set. */
253 		if (x2 != (uint64_t) 0) {
254 			VERBOSE("Arg2 MBZ for partition messages. (0x%lx).\n",
255 				x2);
256 			return false;
257 		}
258 	}
259 	return true;
260 }
261 
262 /*******************************************************************************
263  * Handle direct request messages and route to the appropriate destination.
264  ******************************************************************************/
direct_req_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)265 static uint64_t direct_req_smc_handler(uint32_t smc_fid,
266 				       bool secure_origin,
267 				       uint64_t x1,
268 				       uint64_t x2,
269 				       uint64_t x3,
270 				       uint64_t x4,
271 				       void *cookie,
272 				       void *handle,
273 				       uint64_t flags)
274 {
275 	uint16_t dst_id = ffa_endpoint_destination(x1);
276 	struct el3_lp_desc *el3_lp_descs;
277 	struct secure_partition_desc *sp;
278 	unsigned int idx;
279 
280 	/* Check if arg2 has been populated correctly based on message type. */
281 	if (!direct_msg_validate_arg2(x2)) {
282 		return spmc_ffa_error_return(handle,
283 					     FFA_ERROR_INVALID_PARAMETER);
284 	}
285 
286 	el3_lp_descs = get_el3_lp_array();
287 
288 	/* Check if the request is destined for a Logical Partition. */
289 	for (unsigned int i = 0U; i < MAX_EL3_LP_DESCS_COUNT; i++) {
290 		if (el3_lp_descs[i].sp_id == dst_id) {
291 			return el3_lp_descs[i].direct_req(
292 					smc_fid, secure_origin, x1, x2, x3, x4,
293 					cookie, handle, flags);
294 		}
295 	}
296 
297 	/*
298 	 * If the request was not targeted to a LSP and from the secure world
299 	 * then it is invalid since a SP cannot call into the Normal world and
300 	 * there is no other SP to call into. If there are other SPs in future
301 	 * then the partition runtime model would need to be validated as well.
302 	 */
303 	if (secure_origin) {
304 		VERBOSE("Direct request not supported to the Normal World.\n");
305 		return spmc_ffa_error_return(handle,
306 					     FFA_ERROR_INVALID_PARAMETER);
307 	}
308 
309 	/* Check if the SP ID is valid. */
310 	sp = spmc_get_sp_ctx(dst_id);
311 	if (sp == NULL) {
312 		VERBOSE("Direct request to unknown partition ID (0x%x).\n",
313 			dst_id);
314 		return spmc_ffa_error_return(handle,
315 					     FFA_ERROR_INVALID_PARAMETER);
316 	}
317 
318 	/*
319 	 * Check that the target execution context is in a waiting state before
320 	 * forwarding the direct request to it.
321 	 */
322 	idx = get_ec_index(sp);
323 	if (sp->ec[idx].rt_state != RT_STATE_WAITING) {
324 		VERBOSE("SP context on core%u is not waiting (%u).\n",
325 			idx, sp->ec[idx].rt_model);
326 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
327 	}
328 
329 	/*
330 	 * Everything checks out so forward the request to the SP after updating
331 	 * its state and runtime model.
332 	 */
333 	sp->ec[idx].rt_state = RT_STATE_RUNNING;
334 	sp->ec[idx].rt_model = RT_MODEL_DIR_REQ;
335 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
336 			       handle, cookie, flags, dst_id);
337 }
338 
339 /*******************************************************************************
340  * Handle direct response messages and route to the appropriate destination.
341  ******************************************************************************/
direct_resp_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)342 static uint64_t direct_resp_smc_handler(uint32_t smc_fid,
343 					bool secure_origin,
344 					uint64_t x1,
345 					uint64_t x2,
346 					uint64_t x3,
347 					uint64_t x4,
348 					void *cookie,
349 					void *handle,
350 					uint64_t flags)
351 {
352 	uint16_t dst_id = ffa_endpoint_destination(x1);
353 	struct secure_partition_desc *sp;
354 	unsigned int idx;
355 
356 	/* Check if arg2 has been populated correctly based on message type. */
357 	if (!direct_msg_validate_arg2(x2)) {
358 		return spmc_ffa_error_return(handle,
359 					     FFA_ERROR_INVALID_PARAMETER);
360 	}
361 
362 	/* Check that the response did not originate from the Normal world. */
363 	if (!secure_origin) {
364 		VERBOSE("Direct Response not supported from Normal World.\n");
365 		return spmc_ffa_error_return(handle,
366 					     FFA_ERROR_INVALID_PARAMETER);
367 	}
368 
369 	/*
370 	 * Check that the response is either targeted to the Normal world or the
371 	 * SPMC e.g. a PM response.
372 	 */
373 	if ((dst_id != FFA_SPMC_ID) && ffa_is_secure_world_id(dst_id)) {
374 		VERBOSE("Direct response to invalid partition ID (0x%x).\n",
375 			dst_id);
376 		return spmc_ffa_error_return(handle,
377 					     FFA_ERROR_INVALID_PARAMETER);
378 	}
379 
380 	/* Obtain the SP descriptor and update its runtime state. */
381 	sp = spmc_get_sp_ctx(ffa_endpoint_source(x1));
382 	if (sp == NULL) {
383 		VERBOSE("Direct response to unknown partition ID (0x%x).\n",
384 			dst_id);
385 		return spmc_ffa_error_return(handle,
386 					     FFA_ERROR_INVALID_PARAMETER);
387 	}
388 
389 	/* Sanity check state is being tracked correctly in the SPMC. */
390 	idx = get_ec_index(sp);
391 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
392 
393 	/* Ensure SP execution context was in the right runtime model. */
394 	if (sp->ec[idx].rt_model != RT_MODEL_DIR_REQ) {
395 		VERBOSE("SP context on core%u not handling direct req (%u).\n",
396 			idx, sp->ec[idx].rt_model);
397 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
398 	}
399 
400 	/* Update the state of the SP execution context. */
401 	sp->ec[idx].rt_state = RT_STATE_WAITING;
402 
403 	/*
404 	 * If the receiver is not the SPMC then forward the response to the
405 	 * Normal world.
406 	 */
407 	if (dst_id == FFA_SPMC_ID) {
408 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
409 		/* Should not get here. */
410 		panic();
411 	}
412 
413 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
414 			       handle, cookie, flags, dst_id);
415 }
416 
417 /*******************************************************************************
418  * This function handles the FFA_MSG_WAIT SMC to allow an SP to relinquish its
419  * cycles.
420  ******************************************************************************/
msg_wait_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)421 static uint64_t msg_wait_handler(uint32_t smc_fid,
422 				 bool secure_origin,
423 				 uint64_t x1,
424 				 uint64_t x2,
425 				 uint64_t x3,
426 				 uint64_t x4,
427 				 void *cookie,
428 				 void *handle,
429 				 uint64_t flags)
430 {
431 	struct secure_partition_desc *sp;
432 	unsigned int idx;
433 
434 	/*
435 	 * Check that the response did not originate from the Normal world as
436 	 * only the secure world can call this ABI.
437 	 */
438 	if (!secure_origin) {
439 		VERBOSE("Normal world cannot call FFA_MSG_WAIT.\n");
440 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
441 	}
442 
443 	/* Get the descriptor of the SP that invoked FFA_MSG_WAIT. */
444 	sp = spmc_get_current_sp_ctx();
445 	if (sp == NULL) {
446 		return spmc_ffa_error_return(handle,
447 					     FFA_ERROR_INVALID_PARAMETER);
448 	}
449 
450 	/*
451 	 * Get the execution context of the SP that invoked FFA_MSG_WAIT.
452 	 */
453 	idx = get_ec_index(sp);
454 
455 	/* Ensure SP execution context was in the right runtime model. */
456 	if (sp->ec[idx].rt_model == RT_MODEL_DIR_REQ) {
457 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
458 	}
459 
460 	/* Sanity check the state is being tracked correctly in the SPMC. */
461 	assert(sp->ec[idx].rt_state == RT_STATE_RUNNING);
462 
463 	/*
464 	 * Perform a synchronous exit if the partition was initialising. The
465 	 * state is updated after the exit.
466 	 */
467 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
468 		spmc_sp_synchronous_exit(&sp->ec[idx], x4);
469 		/* Should not get here */
470 		panic();
471 	}
472 
473 	/* Update the state of the SP execution context. */
474 	sp->ec[idx].rt_state = RT_STATE_WAITING;
475 
476 	/* Resume normal world if a secure interrupt was handled. */
477 	if (sp->ec[idx].rt_model == RT_MODEL_INTR) {
478 		/* FFA_MSG_WAIT can only be called from the secure world. */
479 		unsigned int secure_state_in = SECURE;
480 		unsigned int secure_state_out = NON_SECURE;
481 
482 		cm_el1_sysregs_context_save(secure_state_in);
483 		cm_el1_sysregs_context_restore(secure_state_out);
484 		cm_set_next_eret_context(secure_state_out);
485 		SMC_RET0(cm_get_context(secure_state_out));
486 	}
487 
488 	/* Forward the response to the Normal world. */
489 	return spmc_smc_return(smc_fid, secure_origin, x1, x2, x3, x4,
490 			       handle, cookie, flags, FFA_NWD_ID);
491 }
492 
ffa_error_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)493 static uint64_t ffa_error_handler(uint32_t smc_fid,
494 				 bool secure_origin,
495 				 uint64_t x1,
496 				 uint64_t x2,
497 				 uint64_t x3,
498 				 uint64_t x4,
499 				 void *cookie,
500 				 void *handle,
501 				 uint64_t flags)
502 {
503 	struct secure_partition_desc *sp;
504 	unsigned int idx;
505 
506 	/* Check that the response did not originate from the Normal world. */
507 	if (!secure_origin) {
508 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
509 	}
510 
511 	/* Get the descriptor of the SP that invoked FFA_ERROR. */
512 	sp = spmc_get_current_sp_ctx();
513 	if (sp == NULL) {
514 		return spmc_ffa_error_return(handle,
515 					     FFA_ERROR_INVALID_PARAMETER);
516 	}
517 
518 	/* Get the execution context of the SP that invoked FFA_ERROR. */
519 	idx = get_ec_index(sp);
520 
521 	/*
522 	 * We only expect FFA_ERROR to be received during SP initialisation
523 	 * otherwise this is an invalid call.
524 	 */
525 	if (sp->ec[idx].rt_model == RT_MODEL_INIT) {
526 		ERROR("SP 0x%x failed to initialize.\n", sp->sp_id);
527 		spmc_sp_synchronous_exit(&sp->ec[idx], x2);
528 		/* Should not get here. */
529 		panic();
530 	}
531 
532 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
533 }
534 
ffa_version_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)535 static uint64_t ffa_version_handler(uint32_t smc_fid,
536 				    bool secure_origin,
537 				    uint64_t x1,
538 				    uint64_t x2,
539 				    uint64_t x3,
540 				    uint64_t x4,
541 				    void *cookie,
542 				    void *handle,
543 				    uint64_t flags)
544 {
545 	uint32_t requested_version = x1 & FFA_VERSION_MASK;
546 
547 	if (requested_version & FFA_VERSION_BIT31_MASK) {
548 		/* Invalid encoding, return an error. */
549 		SMC_RET1(handle, FFA_ERROR_NOT_SUPPORTED);
550 		/* Execution stops here. */
551 	}
552 
553 	/* Determine the caller to store the requested version. */
554 	if (secure_origin) {
555 		/*
556 		 * Ensure that the SP is reporting the same version as
557 		 * specified in its manifest. If these do not match there is
558 		 * something wrong with the SP.
559 		 * TODO: Should we abort the SP? For now assert this is not
560 		 *       case.
561 		 */
562 		assert(requested_version ==
563 		       spmc_get_current_sp_ctx()->ffa_version);
564 	} else {
565 		/*
566 		 * If this is called by the normal world, record this
567 		 * information in its descriptor.
568 		 */
569 		spmc_get_hyp_ctx()->ffa_version = requested_version;
570 	}
571 
572 	SMC_RET1(handle, MAKE_FFA_VERSION(FFA_VERSION_MAJOR,
573 					  FFA_VERSION_MINOR));
574 }
575 
576 /*******************************************************************************
577  * Helper function to obtain the FF-A version of the calling partition.
578  ******************************************************************************/
get_partition_ffa_version(bool secure_origin)579 uint32_t get_partition_ffa_version(bool secure_origin)
580 {
581 	if (secure_origin) {
582 		return spmc_get_current_sp_ctx()->ffa_version;
583 	} else {
584 		return spmc_get_hyp_ctx()->ffa_version;
585 	}
586 }
587 
rxtx_map_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)588 static uint64_t rxtx_map_handler(uint32_t smc_fid,
589 				 bool secure_origin,
590 				 uint64_t x1,
591 				 uint64_t x2,
592 				 uint64_t x3,
593 				 uint64_t x4,
594 				 void *cookie,
595 				 void *handle,
596 				 uint64_t flags)
597 {
598 	int ret;
599 	uint32_t error_code;
600 	uint32_t mem_atts = secure_origin ? MT_SECURE : MT_NS;
601 	struct mailbox *mbox;
602 	uintptr_t tx_address = x1;
603 	uintptr_t rx_address = x2;
604 	uint32_t page_count = x3 & FFA_RXTX_PAGE_COUNT_MASK; /* Bits [5:0] */
605 	uint32_t buf_size = page_count * FFA_PAGE_SIZE;
606 
607 	/*
608 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
609 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
610 	 * ABI on behalf of a VM and reject it if this is the case.
611 	 */
612 	if (tx_address == 0 || rx_address == 0) {
613 		WARN("Mapping RX/TX Buffers on behalf of VM not supported.\n");
614 		return spmc_ffa_error_return(handle,
615 					     FFA_ERROR_INVALID_PARAMETER);
616 	}
617 
618 	/* Ensure the specified buffers are not the same. */
619 	if (tx_address == rx_address) {
620 		WARN("TX Buffer must not be the same as RX Buffer.\n");
621 		return spmc_ffa_error_return(handle,
622 					     FFA_ERROR_INVALID_PARAMETER);
623 	}
624 
625 	/* Ensure the buffer size is not 0. */
626 	if (buf_size == 0U) {
627 		WARN("Buffer size must not be 0\n");
628 		return spmc_ffa_error_return(handle,
629 					     FFA_ERROR_INVALID_PARAMETER);
630 	}
631 
632 	/*
633 	 * Ensure the buffer size is a multiple of the translation granule size
634 	 * in TF-A.
635 	 */
636 	if (buf_size % PAGE_SIZE != 0U) {
637 		WARN("Buffer size must be aligned to translation granule.\n");
638 		return spmc_ffa_error_return(handle,
639 					     FFA_ERROR_INVALID_PARAMETER);
640 	}
641 
642 	/* Obtain the RX/TX buffer pair descriptor. */
643 	mbox = spmc_get_mbox_desc(secure_origin);
644 
645 	spin_lock(&mbox->lock);
646 
647 	/* Check if buffers have already been mapped. */
648 	if (mbox->rx_buffer != 0 || mbox->tx_buffer != 0) {
649 		WARN("RX/TX Buffers already mapped (%p/%p)\n",
650 		     (void *) mbox->rx_buffer, (void *)mbox->tx_buffer);
651 		error_code = FFA_ERROR_DENIED;
652 		goto err;
653 	}
654 
655 	/* memmap the TX buffer as read only. */
656 	ret = mmap_add_dynamic_region(tx_address, /* PA */
657 			tx_address, /* VA */
658 			buf_size, /* size */
659 			mem_atts | MT_RO_DATA); /* attrs */
660 	if (ret != 0) {
661 		/* Return the correct error code. */
662 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
663 						FFA_ERROR_INVALID_PARAMETER;
664 		WARN("Unable to map TX buffer: %d\n", error_code);
665 		goto err;
666 	}
667 
668 	/* memmap the RX buffer as read write. */
669 	ret = mmap_add_dynamic_region(rx_address, /* PA */
670 			rx_address, /* VA */
671 			buf_size, /* size */
672 			mem_atts | MT_RW_DATA); /* attrs */
673 
674 	if (ret != 0) {
675 		error_code = (ret == -ENOMEM) ? FFA_ERROR_NO_MEMORY :
676 						FFA_ERROR_INVALID_PARAMETER;
677 		WARN("Unable to map RX buffer: %d\n", error_code);
678 		/* Unmap the TX buffer again. */
679 		mmap_remove_dynamic_region(tx_address, buf_size);
680 		goto err;
681 	}
682 
683 	mbox->tx_buffer = (void *) tx_address;
684 	mbox->rx_buffer = (void *) rx_address;
685 	mbox->rxtx_page_count = page_count;
686 	spin_unlock(&mbox->lock);
687 
688 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
689 	/* Execution stops here. */
690 err:
691 	spin_unlock(&mbox->lock);
692 	return spmc_ffa_error_return(handle, error_code);
693 }
694 
rxtx_unmap_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)695 static uint64_t rxtx_unmap_handler(uint32_t smc_fid,
696 				   bool secure_origin,
697 				   uint64_t x1,
698 				   uint64_t x2,
699 				   uint64_t x3,
700 				   uint64_t x4,
701 				   void *cookie,
702 				   void *handle,
703 				   uint64_t flags)
704 {
705 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
706 	uint32_t buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
707 
708 	/*
709 	 * The SPMC does not support mapping of VM RX/TX pairs to facilitate
710 	 * indirect messaging with SPs. Check if the Hypervisor has invoked this
711 	 * ABI on behalf of a VM and reject it if this is the case.
712 	 */
713 	if (x1 != 0UL) {
714 		return spmc_ffa_error_return(handle,
715 					     FFA_ERROR_INVALID_PARAMETER);
716 	}
717 
718 	spin_lock(&mbox->lock);
719 
720 	/* Check if buffers are currently mapped. */
721 	if (mbox->rx_buffer == 0 || mbox->tx_buffer == 0) {
722 		spin_unlock(&mbox->lock);
723 		return spmc_ffa_error_return(handle,
724 					     FFA_ERROR_INVALID_PARAMETER);
725 	}
726 
727 	/* Unmap RX Buffer */
728 	if (mmap_remove_dynamic_region((uintptr_t) mbox->rx_buffer,
729 				       buf_size) != 0) {
730 		WARN("Unable to unmap RX buffer!\n");
731 	}
732 
733 	mbox->rx_buffer = 0;
734 
735 	/* Unmap TX Buffer */
736 	if (mmap_remove_dynamic_region((uintptr_t) mbox->tx_buffer,
737 				       buf_size) != 0) {
738 		WARN("Unable to unmap TX buffer!\n");
739 	}
740 
741 	mbox->tx_buffer = 0;
742 	mbox->rxtx_page_count = 0;
743 
744 	spin_unlock(&mbox->lock);
745 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
746 }
747 
748 /*
749  * Collate the partition information in a v1.1 partition information
750  * descriptor format, this will be converter later if required.
751  */
partition_info_get_handler_v1_1(uint32_t * uuid,struct ffa_partition_info_v1_1 * partitions,uint32_t max_partitions,uint32_t * partition_count)752 static int partition_info_get_handler_v1_1(uint32_t *uuid,
753 					   struct ffa_partition_info_v1_1
754 						  *partitions,
755 					   uint32_t max_partitions,
756 					   uint32_t *partition_count)
757 {
758 	uint32_t index;
759 	struct ffa_partition_info_v1_1 *desc;
760 	bool null_uuid = is_null_uuid(uuid);
761 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
762 
763 	/* Deal with Logical Partitions. */
764 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
765 		if (null_uuid || uuid_match(uuid, el3_lp_descs[index].uuid)) {
766 			/* Found a matching UUID, populate appropriately. */
767 			if (*partition_count >= max_partitions) {
768 				return FFA_ERROR_NO_MEMORY;
769 			}
770 
771 			desc = &partitions[*partition_count];
772 			desc->ep_id = el3_lp_descs[index].sp_id;
773 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
774 			desc->properties = el3_lp_descs[index].properties;
775 			if (null_uuid) {
776 				copy_uuid(desc->uuid, el3_lp_descs[index].uuid);
777 			}
778 			(*partition_count)++;
779 		}
780 	}
781 
782 	/* Deal with physical SP's. */
783 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
784 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
785 			/* Found a matching UUID, populate appropriately. */
786 			if (*partition_count >= max_partitions) {
787 				return FFA_ERROR_NO_MEMORY;
788 			}
789 
790 			desc = &partitions[*partition_count];
791 			desc->ep_id = sp_desc[index].sp_id;
792 			/*
793 			 * Execution context count must match No. cores for
794 			 * S-EL1 SPs.
795 			 */
796 			desc->execution_ctx_count = PLATFORM_CORE_COUNT;
797 			desc->properties = sp_desc[index].properties;
798 			if (null_uuid) {
799 				copy_uuid(desc->uuid, sp_desc[index].uuid);
800 			}
801 			(*partition_count)++;
802 		}
803 	}
804 	return 0;
805 }
806 
807 /*
808  * Handle the case where that caller only wants the count of partitions
809  * matching a given UUID and does not want the corresponding descriptors
810  * populated.
811  */
partition_info_get_handler_count_only(uint32_t * uuid)812 static uint32_t partition_info_get_handler_count_only(uint32_t *uuid)
813 {
814 	uint32_t index = 0;
815 	uint32_t partition_count = 0;
816 	bool null_uuid = is_null_uuid(uuid);
817 	struct el3_lp_desc *el3_lp_descs = get_el3_lp_array();
818 
819 	/* Deal with Logical Partitions. */
820 	for (index = 0U; index < EL3_LP_DESCS_COUNT; index++) {
821 		if (null_uuid ||
822 		    uuid_match(uuid, el3_lp_descs[index].uuid)) {
823 			(partition_count)++;
824 		}
825 	}
826 
827 	/* Deal with physical SP's. */
828 	for (index = 0U; index < SECURE_PARTITION_COUNT; index++) {
829 		if (null_uuid || uuid_match(uuid, sp_desc[index].uuid)) {
830 			(partition_count)++;
831 		}
832 	}
833 	return partition_count;
834 }
835 
836 /*
837  * If the caller of the PARTITION_INFO_GET ABI was a v1.0 caller, populate
838  * the coresponding descriptor format from the v1.1 descriptor array.
839  */
partition_info_populate_v1_0(struct ffa_partition_info_v1_1 * partitions,struct mailbox * mbox,int partition_count)840 static uint64_t partition_info_populate_v1_0(struct ffa_partition_info_v1_1
841 					     *partitions,
842 					     struct mailbox *mbox,
843 					     int partition_count)
844 {
845 	uint32_t index;
846 	uint32_t buf_size;
847 	uint32_t descriptor_size;
848 	struct ffa_partition_info_v1_0 *v1_0_partitions =
849 		(struct ffa_partition_info_v1_0 *) mbox->rx_buffer;
850 
851 	buf_size = mbox->rxtx_page_count * FFA_PAGE_SIZE;
852 	descriptor_size = partition_count *
853 			  sizeof(struct ffa_partition_info_v1_0);
854 
855 	if (descriptor_size > buf_size) {
856 		return FFA_ERROR_NO_MEMORY;
857 	}
858 
859 	for (index = 0U; index < partition_count; index++) {
860 		v1_0_partitions[index].ep_id = partitions[index].ep_id;
861 		v1_0_partitions[index].execution_ctx_count =
862 			partitions[index].execution_ctx_count;
863 		v1_0_partitions[index].properties =
864 			partitions[index].properties;
865 	}
866 	return 0;
867 }
868 
869 /*
870  * Main handler for FFA_PARTITION_INFO_GET which supports both FF-A v1.1 and
871  * v1.0 implementations.
872  */
partition_info_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)873 static uint64_t partition_info_get_handler(uint32_t smc_fid,
874 					   bool secure_origin,
875 					   uint64_t x1,
876 					   uint64_t x2,
877 					   uint64_t x3,
878 					   uint64_t x4,
879 					   void *cookie,
880 					   void *handle,
881 					   uint64_t flags)
882 {
883 	int ret;
884 	uint32_t partition_count = 0;
885 	uint32_t size = 0;
886 	uint32_t ffa_version = get_partition_ffa_version(secure_origin);
887 	struct mailbox *mbox;
888 	uint64_t info_get_flags;
889 	bool count_only;
890 	uint32_t uuid[4];
891 
892 	uuid[0] = x1;
893 	uuid[1] = x2;
894 	uuid[2] = x3;
895 	uuid[3] = x4;
896 
897 	/* Determine if the Partition descriptors should be populated. */
898 	info_get_flags = SMC_GET_GP(handle, CTX_GPREG_X5);
899 	count_only = (info_get_flags & FFA_PARTITION_INFO_GET_COUNT_FLAG_MASK);
900 
901 	/* Handle the case where we don't need to populate the descriptors. */
902 	if (count_only) {
903 		partition_count = partition_info_get_handler_count_only(uuid);
904 		if (partition_count == 0) {
905 			return spmc_ffa_error_return(handle,
906 						FFA_ERROR_INVALID_PARAMETER);
907 		}
908 	} else {
909 		struct ffa_partition_info_v1_1 partitions[MAX_SP_LP_PARTITIONS];
910 
911 		/*
912 		 * Handle the case where the partition descriptors are required,
913 		 * check we have the buffers available and populate the
914 		 * appropriate structure version.
915 		 */
916 
917 		/* Obtain the v1.1 format of the descriptors. */
918 		ret = partition_info_get_handler_v1_1(uuid, partitions,
919 						      MAX_SP_LP_PARTITIONS,
920 						      &partition_count);
921 
922 		/* Check if an error occurred during discovery. */
923 		if (ret != 0) {
924 			goto err;
925 		}
926 
927 		/* If we didn't find any matches the UUID is unknown. */
928 		if (partition_count == 0) {
929 			ret = FFA_ERROR_INVALID_PARAMETER;
930 			goto err;
931 		}
932 
933 		/* Obtain the partition mailbox RX/TX buffer pair descriptor. */
934 		mbox = spmc_get_mbox_desc(secure_origin);
935 
936 		/*
937 		 * If the caller has not bothered registering its RX/TX pair
938 		 * then return an error code.
939 		 */
940 		spin_lock(&mbox->lock);
941 		if (mbox->rx_buffer == NULL) {
942 			ret = FFA_ERROR_BUSY;
943 			goto err_unlock;
944 		}
945 
946 		/* Ensure the RX buffer is currently free. */
947 		if (mbox->state != MAILBOX_STATE_EMPTY) {
948 			ret = FFA_ERROR_BUSY;
949 			goto err_unlock;
950 		}
951 
952 		/* Zero the RX buffer before populating. */
953 		(void)memset(mbox->rx_buffer, 0,
954 			     mbox->rxtx_page_count * FFA_PAGE_SIZE);
955 
956 		/*
957 		 * Depending on the FF-A version of the requesting partition
958 		 * we may need to convert to a v1.0 format otherwise we can copy
959 		 * directly.
960 		 */
961 		if (ffa_version == MAKE_FFA_VERSION(U(1), U(0))) {
962 			ret = partition_info_populate_v1_0(partitions,
963 							   mbox,
964 							   partition_count);
965 			if (ret != 0) {
966 				goto err_unlock;
967 			}
968 		} else {
969 			uint32_t buf_size = mbox->rxtx_page_count *
970 					    FFA_PAGE_SIZE;
971 
972 			/* Ensure the descriptor will fit in the buffer. */
973 			size = sizeof(struct ffa_partition_info_v1_1);
974 			if (partition_count * size  > buf_size) {
975 				ret = FFA_ERROR_NO_MEMORY;
976 				goto err_unlock;
977 			}
978 			memcpy(mbox->rx_buffer, partitions,
979 			       partition_count * size);
980 		}
981 
982 		mbox->state = MAILBOX_STATE_FULL;
983 		spin_unlock(&mbox->lock);
984 	}
985 	SMC_RET4(handle, FFA_SUCCESS_SMC32, 0, partition_count, size);
986 
987 err_unlock:
988 	spin_unlock(&mbox->lock);
989 err:
990 	return spmc_ffa_error_return(handle, ret);
991 }
992 
ffa_feature_success(void * handle,uint32_t arg2)993 static uint64_t ffa_feature_success(void *handle, uint32_t arg2)
994 {
995 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0, arg2);
996 }
997 
ffa_features_retrieve_request(bool secure_origin,uint32_t input_properties,void * handle)998 static uint64_t ffa_features_retrieve_request(bool secure_origin,
999 					      uint32_t input_properties,
1000 					      void *handle)
1001 {
1002 	/*
1003 	 * If we're called by the normal world we don't support any
1004 	 * additional features.
1005 	 */
1006 	if (!secure_origin) {
1007 		if ((input_properties & FFA_FEATURES_RET_REQ_NS_BIT) != 0U) {
1008 			return spmc_ffa_error_return(handle,
1009 						     FFA_ERROR_NOT_SUPPORTED);
1010 		}
1011 
1012 	} else {
1013 		struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1014 		/*
1015 		 * If v1.1 the NS bit must be set otherwise it is an invalid
1016 		 * call. If v1.0 check and store whether the SP has requested
1017 		 * the use of the NS bit.
1018 		 */
1019 		if (sp->ffa_version == MAKE_FFA_VERSION(1, 1)) {
1020 			if ((input_properties &
1021 			     FFA_FEATURES_RET_REQ_NS_BIT) == 0U) {
1022 				return spmc_ffa_error_return(handle,
1023 						       FFA_ERROR_NOT_SUPPORTED);
1024 			}
1025 			return ffa_feature_success(handle,
1026 						   FFA_FEATURES_RET_REQ_NS_BIT);
1027 		} else {
1028 			sp->ns_bit_requested = (input_properties &
1029 					       FFA_FEATURES_RET_REQ_NS_BIT) !=
1030 					       0U;
1031 		}
1032 		if (sp->ns_bit_requested) {
1033 			return ffa_feature_success(handle,
1034 						   FFA_FEATURES_RET_REQ_NS_BIT);
1035 		}
1036 	}
1037 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1038 }
1039 
ffa_features_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1040 static uint64_t ffa_features_handler(uint32_t smc_fid,
1041 				     bool secure_origin,
1042 				     uint64_t x1,
1043 				     uint64_t x2,
1044 				     uint64_t x3,
1045 				     uint64_t x4,
1046 				     void *cookie,
1047 				     void *handle,
1048 				     uint64_t flags)
1049 {
1050 	uint32_t function_id = (uint32_t) x1;
1051 	uint32_t input_properties = (uint32_t) x2;
1052 
1053 	/* Check if a Feature ID was requested. */
1054 	if ((function_id & FFA_FEATURES_BIT31_MASK) == 0U) {
1055 		/* We currently don't support any additional features. */
1056 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1057 	}
1058 
1059 	/*
1060 	 * Handle the cases where we have separate handlers due to additional
1061 	 * properties.
1062 	 */
1063 	switch (function_id) {
1064 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1065 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1066 		return ffa_features_retrieve_request(secure_origin,
1067 						     input_properties,
1068 						     handle);
1069 	}
1070 
1071 	/*
1072 	 * We don't currently support additional input properties for these
1073 	 * other ABIs therefore ensure this value is set to 0.
1074 	 */
1075 	if (input_properties != 0U) {
1076 		return spmc_ffa_error_return(handle,
1077 					     FFA_ERROR_NOT_SUPPORTED);
1078 	}
1079 
1080 	/* Report if any other FF-A ABI is supported. */
1081 	switch (function_id) {
1082 	/* Supported features from both worlds. */
1083 	case FFA_ERROR:
1084 	case FFA_SUCCESS_SMC32:
1085 	case FFA_INTERRUPT:
1086 	case FFA_SPM_ID_GET:
1087 	case FFA_ID_GET:
1088 	case FFA_FEATURES:
1089 	case FFA_VERSION:
1090 	case FFA_RX_RELEASE:
1091 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1092 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1093 	case FFA_PARTITION_INFO_GET:
1094 	case FFA_RXTX_MAP_SMC32:
1095 	case FFA_RXTX_MAP_SMC64:
1096 	case FFA_RXTX_UNMAP:
1097 	case FFA_MEM_FRAG_TX:
1098 	case FFA_MSG_RUN:
1099 
1100 		/*
1101 		 * We are relying on the fact that the other registers
1102 		 * will be set to 0 as these values align with the
1103 		 * currently implemented features of the SPMC. If this
1104 		 * changes this function must be extended to handle
1105 		 * reporting the additional functionality.
1106 		 */
1107 
1108 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1109 		/* Execution stops here. */
1110 
1111 	/* Supported ABIs only from the secure world. */
1112 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1113 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1114 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1115 	case FFA_MEM_RELINQUISH:
1116 	case FFA_MSG_WAIT:
1117 
1118 		if (!secure_origin) {
1119 			return spmc_ffa_error_return(handle,
1120 				FFA_ERROR_NOT_SUPPORTED);
1121 		}
1122 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1123 		/* Execution stops here. */
1124 
1125 	/* Supported features only from the normal world. */
1126 	case FFA_MEM_SHARE_SMC32:
1127 	case FFA_MEM_SHARE_SMC64:
1128 	case FFA_MEM_LEND_SMC32:
1129 	case FFA_MEM_LEND_SMC64:
1130 	case FFA_MEM_RECLAIM:
1131 	case FFA_MEM_FRAG_RX:
1132 
1133 		if (secure_origin) {
1134 			return spmc_ffa_error_return(handle,
1135 					FFA_ERROR_NOT_SUPPORTED);
1136 		}
1137 		SMC_RET1(handle, FFA_SUCCESS_SMC32);
1138 		/* Execution stops here. */
1139 
1140 	default:
1141 		return spmc_ffa_error_return(handle,
1142 					FFA_ERROR_NOT_SUPPORTED);
1143 	}
1144 }
1145 
ffa_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1146 static uint64_t ffa_id_get_handler(uint32_t smc_fid,
1147 				   bool secure_origin,
1148 				   uint64_t x1,
1149 				   uint64_t x2,
1150 				   uint64_t x3,
1151 				   uint64_t x4,
1152 				   void *cookie,
1153 				   void *handle,
1154 				   uint64_t flags)
1155 {
1156 	if (secure_origin) {
1157 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1158 			 spmc_get_current_sp_ctx()->sp_id);
1159 	} else {
1160 		SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0,
1161 			 spmc_get_hyp_ctx()->ns_ep_id);
1162 	}
1163 }
1164 
1165 /*
1166  * Enable an SP to query the ID assigned to the SPMC.
1167  */
ffa_spm_id_get_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1168 static uint64_t ffa_spm_id_get_handler(uint32_t smc_fid,
1169 				       bool secure_origin,
1170 				       uint64_t x1,
1171 				       uint64_t x2,
1172 				       uint64_t x3,
1173 				       uint64_t x4,
1174 				       void *cookie,
1175 				       void *handle,
1176 				       uint64_t flags)
1177 {
1178 	assert(x1 == 0UL);
1179 	assert(x2 == 0UL);
1180 	assert(x3 == 0UL);
1181 	assert(x4 == 0UL);
1182 	assert(SMC_GET_GP(handle, CTX_GPREG_X5) == 0UL);
1183 	assert(SMC_GET_GP(handle, CTX_GPREG_X6) == 0UL);
1184 	assert(SMC_GET_GP(handle, CTX_GPREG_X7) == 0UL);
1185 
1186 	SMC_RET3(handle, FFA_SUCCESS_SMC32, 0x0, FFA_SPMC_ID);
1187 }
1188 
ffa_run_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1189 static uint64_t ffa_run_handler(uint32_t smc_fid,
1190 				bool secure_origin,
1191 				uint64_t x1,
1192 				uint64_t x2,
1193 				uint64_t x3,
1194 				uint64_t x4,
1195 				void *cookie,
1196 				void *handle,
1197 				uint64_t flags)
1198 {
1199 	struct secure_partition_desc *sp;
1200 	uint16_t target_id = FFA_RUN_EP_ID(x1);
1201 	uint16_t vcpu_id = FFA_RUN_VCPU_ID(x1);
1202 	unsigned int idx;
1203 	unsigned int *rt_state;
1204 	unsigned int *rt_model;
1205 
1206 	/* Can only be called from the normal world. */
1207 	if (secure_origin) {
1208 		ERROR("FFA_RUN can only be called from NWd.\n");
1209 		return spmc_ffa_error_return(handle,
1210 					     FFA_ERROR_INVALID_PARAMETER);
1211 	}
1212 
1213 	/* Cannot run a Normal world partition. */
1214 	if (ffa_is_normal_world_id(target_id)) {
1215 		ERROR("Cannot run a NWd partition (0x%x).\n", target_id);
1216 		return spmc_ffa_error_return(handle,
1217 					     FFA_ERROR_INVALID_PARAMETER);
1218 	}
1219 
1220 	/* Check that the target SP exists. */
1221 	sp = spmc_get_sp_ctx(target_id);
1222 		ERROR("Unknown partition ID (0x%x).\n", target_id);
1223 	if (sp == NULL) {
1224 		return spmc_ffa_error_return(handle,
1225 					     FFA_ERROR_INVALID_PARAMETER);
1226 	}
1227 
1228 	idx = get_ec_index(sp);
1229 	if (idx != vcpu_id) {
1230 		ERROR("Cannot run vcpu %d != %d.\n", idx, vcpu_id);
1231 		return spmc_ffa_error_return(handle,
1232 					     FFA_ERROR_INVALID_PARAMETER);
1233 	}
1234 	rt_state = &((sp->ec[idx]).rt_state);
1235 	rt_model = &((sp->ec[idx]).rt_model);
1236 	if (*rt_state == RT_STATE_RUNNING) {
1237 		ERROR("Partition (0x%x) is already running.\n", target_id);
1238 		return spmc_ffa_error_return(handle, FFA_ERROR_BUSY);
1239 	}
1240 
1241 	/*
1242 	 * Sanity check that if the execution context was not waiting then it
1243 	 * was either in the direct request or the run partition runtime model.
1244 	 */
1245 	if (*rt_state == RT_STATE_PREEMPTED || *rt_state == RT_STATE_BLOCKED) {
1246 		assert(*rt_model == RT_MODEL_RUN ||
1247 		       *rt_model == RT_MODEL_DIR_REQ);
1248 	}
1249 
1250 	/*
1251 	 * If the context was waiting then update the partition runtime model.
1252 	 */
1253 	if (*rt_state == RT_STATE_WAITING) {
1254 		*rt_model = RT_MODEL_RUN;
1255 	}
1256 
1257 	/*
1258 	 * Forward the request to the correct SP vCPU after updating
1259 	 * its state.
1260 	 */
1261 	*rt_state = RT_STATE_RUNNING;
1262 
1263 	return spmc_smc_return(smc_fid, secure_origin, x1, 0, 0, 0,
1264 			       handle, cookie, flags, target_id);
1265 }
1266 
rx_release_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1267 static uint64_t rx_release_handler(uint32_t smc_fid,
1268 				   bool secure_origin,
1269 				   uint64_t x1,
1270 				   uint64_t x2,
1271 				   uint64_t x3,
1272 				   uint64_t x4,
1273 				   void *cookie,
1274 				   void *handle,
1275 				   uint64_t flags)
1276 {
1277 	struct mailbox *mbox = spmc_get_mbox_desc(secure_origin);
1278 
1279 	spin_lock(&mbox->lock);
1280 
1281 	if (mbox->state != MAILBOX_STATE_FULL) {
1282 		spin_unlock(&mbox->lock);
1283 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1284 	}
1285 
1286 	mbox->state = MAILBOX_STATE_EMPTY;
1287 	spin_unlock(&mbox->lock);
1288 
1289 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1290 }
1291 
1292 /*
1293  * Perform initial validation on the provided secondary entry point.
1294  * For now ensure it does not lie within the BL31 Image or the SP's
1295  * RX/TX buffers as these are mapped within EL3.
1296  * TODO: perform validation for additional invalid memory regions.
1297  */
validate_secondary_ep(uintptr_t ep,struct secure_partition_desc * sp)1298 static int validate_secondary_ep(uintptr_t ep, struct secure_partition_desc *sp)
1299 {
1300 	struct mailbox *mb;
1301 	uintptr_t buffer_size;
1302 	uintptr_t sp_rx_buffer;
1303 	uintptr_t sp_tx_buffer;
1304 	uintptr_t sp_rx_buffer_limit;
1305 	uintptr_t sp_tx_buffer_limit;
1306 
1307 	mb = &sp->mailbox;
1308 	buffer_size = (uintptr_t) (mb->rxtx_page_count * FFA_PAGE_SIZE);
1309 	sp_rx_buffer = (uintptr_t) mb->rx_buffer;
1310 	sp_tx_buffer = (uintptr_t) mb->tx_buffer;
1311 	sp_rx_buffer_limit = sp_rx_buffer + buffer_size;
1312 	sp_tx_buffer_limit = sp_tx_buffer + buffer_size;
1313 
1314 	/*
1315 	 * Check if the entry point lies within BL31, or the
1316 	 * SP's RX or TX buffer.
1317 	 */
1318 	if ((ep >= BL31_BASE && ep < BL31_LIMIT) ||
1319 	    (ep >= sp_rx_buffer && ep < sp_rx_buffer_limit) ||
1320 	    (ep >= sp_tx_buffer && ep < sp_tx_buffer_limit)) {
1321 		return -EINVAL;
1322 	}
1323 	return 0;
1324 }
1325 
1326 /*******************************************************************************
1327  * This function handles the FFA_SECONDARY_EP_REGISTER SMC to allow an SP to
1328  *  register an entry point for initialization during a secondary cold boot.
1329  ******************************************************************************/
ffa_sec_ep_register_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1330 static uint64_t ffa_sec_ep_register_handler(uint32_t smc_fid,
1331 					    bool secure_origin,
1332 					    uint64_t x1,
1333 					    uint64_t x2,
1334 					    uint64_t x3,
1335 					    uint64_t x4,
1336 					    void *cookie,
1337 					    void *handle,
1338 					    uint64_t flags)
1339 {
1340 	struct secure_partition_desc *sp;
1341 	struct sp_exec_ctx *sp_ctx;
1342 
1343 	/* This request cannot originate from the Normal world. */
1344 	if (!secure_origin) {
1345 		WARN("%s: Can only be called from SWd.\n", __func__);
1346 		return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1347 	}
1348 
1349 	/* Get the context of the current SP. */
1350 	sp = spmc_get_current_sp_ctx();
1351 	if (sp == NULL) {
1352 		WARN("%s: Cannot find SP context.\n", __func__);
1353 		return spmc_ffa_error_return(handle,
1354 					     FFA_ERROR_INVALID_PARAMETER);
1355 	}
1356 
1357 	/* Only an S-EL1 SP should be invoking this ABI. */
1358 	if (sp->runtime_el != S_EL1) {
1359 		WARN("%s: Can only be called for a S-EL1 SP.\n", __func__);
1360 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1361 	}
1362 
1363 	/* Ensure the SP is in its initialization state. */
1364 	sp_ctx = spmc_get_sp_ec(sp);
1365 	if (sp_ctx->rt_model != RT_MODEL_INIT) {
1366 		WARN("%s: Can only be called during SP initialization.\n",
1367 		     __func__);
1368 		return spmc_ffa_error_return(handle, FFA_ERROR_DENIED);
1369 	}
1370 
1371 	/* Perform initial validation of the secondary entry point. */
1372 	if (validate_secondary_ep(x1, sp)) {
1373 		WARN("%s: Invalid entry point provided (0x%lx).\n",
1374 		     __func__, x1);
1375 		return spmc_ffa_error_return(handle,
1376 					     FFA_ERROR_INVALID_PARAMETER);
1377 	}
1378 
1379 	/*
1380 	 * Update the secondary entrypoint in SP context.
1381 	 * We don't need a lock here as during partition initialization there
1382 	 * will only be a single core online.
1383 	 */
1384 	sp->secondary_ep = x1;
1385 	VERBOSE("%s: 0x%lx\n", __func__, sp->secondary_ep);
1386 
1387 	SMC_RET1(handle, FFA_SUCCESS_SMC32);
1388 }
1389 
1390 /*******************************************************************************
1391  * This function will parse the Secure Partition Manifest. From manifest, it
1392  * will fetch details for preparing Secure partition image context and secure
1393  * partition image boot arguments if any.
1394  ******************************************************************************/
sp_manifest_parse(void * sp_manifest,int offset,struct secure_partition_desc * sp,entry_point_info_t * ep_info,int32_t * boot_info_reg)1395 static int sp_manifest_parse(void *sp_manifest, int offset,
1396 			     struct secure_partition_desc *sp,
1397 			     entry_point_info_t *ep_info,
1398 			     int32_t *boot_info_reg)
1399 {
1400 	int32_t ret, node;
1401 	uint32_t config_32;
1402 
1403 	/*
1404 	 * Look for the mandatory fields that are expected to be present in
1405 	 * the SP manifests.
1406 	 */
1407 	node = fdt_path_offset(sp_manifest, "/");
1408 	if (node < 0) {
1409 		ERROR("Did not find root node.\n");
1410 		return node;
1411 	}
1412 
1413 	ret = fdt_read_uint32_array(sp_manifest, node, "uuid",
1414 				    ARRAY_SIZE(sp->uuid), sp->uuid);
1415 	if (ret != 0) {
1416 		ERROR("Missing Secure Partition UUID.\n");
1417 		return ret;
1418 	}
1419 
1420 	ret = fdt_read_uint32(sp_manifest, node, "exception-level", &config_32);
1421 	if (ret != 0) {
1422 		ERROR("Missing SP Exception Level information.\n");
1423 		return ret;
1424 	}
1425 
1426 	sp->runtime_el = config_32;
1427 
1428 	ret = fdt_read_uint32(sp_manifest, node, "ffa-version", &config_32);
1429 	if (ret != 0) {
1430 		ERROR("Missing Secure Partition FF-A Version.\n");
1431 		return ret;
1432 	}
1433 
1434 	sp->ffa_version = config_32;
1435 
1436 	ret = fdt_read_uint32(sp_manifest, node, "execution-state", &config_32);
1437 	if (ret != 0) {
1438 		ERROR("Missing Secure Partition Execution State.\n");
1439 		return ret;
1440 	}
1441 
1442 	sp->execution_state = config_32;
1443 
1444 	ret = fdt_read_uint32(sp_manifest, node,
1445 			      "messaging-method", &config_32);
1446 	if (ret != 0) {
1447 		ERROR("Missing Secure Partition messaging method.\n");
1448 		return ret;
1449 	}
1450 
1451 	/* Validate this entry, we currently only support direct messaging. */
1452 	if ((config_32 & ~(FFA_PARTITION_DIRECT_REQ_RECV |
1453 			  FFA_PARTITION_DIRECT_REQ_SEND)) != 0U) {
1454 		WARN("Invalid Secure Partition messaging method (0x%x)\n",
1455 		     config_32);
1456 		return -EINVAL;
1457 	}
1458 
1459 	sp->properties = config_32;
1460 
1461 	ret = fdt_read_uint32(sp_manifest, node,
1462 			      "execution-ctx-count", &config_32);
1463 
1464 	if (ret != 0) {
1465 		ERROR("Missing SP Execution Context Count.\n");
1466 		return ret;
1467 	}
1468 
1469 	/*
1470 	 * Ensure this field is set correctly in the manifest however
1471 	 * since this is currently a hardcoded value for S-EL1 partitions
1472 	 * we don't need to save it here, just validate.
1473 	 */
1474 	if (config_32 != PLATFORM_CORE_COUNT) {
1475 		ERROR("SP Execution Context Count (%u) must be %u.\n",
1476 			config_32, PLATFORM_CORE_COUNT);
1477 		return -EINVAL;
1478 	}
1479 
1480 	/*
1481 	 * Look for the optional fields that are expected to be present in
1482 	 * an SP manifest.
1483 	 */
1484 	ret = fdt_read_uint32(sp_manifest, node, "id", &config_32);
1485 	if (ret != 0) {
1486 		WARN("Missing Secure Partition ID.\n");
1487 	} else {
1488 		if (!is_ffa_secure_id_valid(config_32)) {
1489 			ERROR("Invalid Secure Partition ID (0x%x).\n",
1490 			      config_32);
1491 			return -EINVAL;
1492 		}
1493 		sp->sp_id = config_32;
1494 	}
1495 
1496 	ret = fdt_read_uint32(sp_manifest, node,
1497 			      "power-management-messages", &config_32);
1498 	if (ret != 0) {
1499 		WARN("Missing Power Management Messages entry.\n");
1500 	} else {
1501 		/*
1502 		 * Ensure only the currently supported power messages have
1503 		 * been requested.
1504 		 */
1505 		if (config_32 & ~(FFA_PM_MSG_SUB_CPU_OFF |
1506 				  FFA_PM_MSG_SUB_CPU_SUSPEND |
1507 				  FFA_PM_MSG_SUB_CPU_SUSPEND_RESUME)) {
1508 			ERROR("Requested unsupported PM messages (%x)\n",
1509 			      config_32);
1510 			return -EINVAL;
1511 		}
1512 		sp->pwr_mgmt_msgs = config_32;
1513 	}
1514 
1515 	ret = fdt_read_uint32(sp_manifest, node,
1516 			      "gp-register-num", &config_32);
1517 	if (ret != 0) {
1518 		WARN("Missing boot information register.\n");
1519 	} else {
1520 		/* Check if a register number between 0-3 is specified. */
1521 		if (config_32 < 4) {
1522 			*boot_info_reg = config_32;
1523 		} else {
1524 			WARN("Incorrect boot information register (%u).\n",
1525 			     config_32);
1526 		}
1527 	}
1528 
1529 	return 0;
1530 }
1531 
1532 /*******************************************************************************
1533  * This function gets the Secure Partition Manifest base and maps the manifest
1534  * region.
1535  * Currently only one Secure Partition manifest is considered which is used to
1536  * prepare the context for the single Secure Partition.
1537  ******************************************************************************/
find_and_prepare_sp_context(void)1538 static int find_and_prepare_sp_context(void)
1539 {
1540 	void *sp_manifest;
1541 	uintptr_t manifest_base;
1542 	uintptr_t manifest_base_align;
1543 	entry_point_info_t *next_image_ep_info;
1544 	int32_t ret, boot_info_reg = -1;
1545 	struct secure_partition_desc *sp;
1546 
1547 	next_image_ep_info = bl31_plat_get_next_image_ep_info(SECURE);
1548 	if (next_image_ep_info == NULL) {
1549 		WARN("No Secure Partition image provided by BL2.\n");
1550 		return -ENOENT;
1551 	}
1552 
1553 	sp_manifest = (void *)next_image_ep_info->args.arg0;
1554 	if (sp_manifest == NULL) {
1555 		WARN("Secure Partition manifest absent.\n");
1556 		return -ENOENT;
1557 	}
1558 
1559 	manifest_base = (uintptr_t)sp_manifest;
1560 	manifest_base_align = page_align(manifest_base, DOWN);
1561 
1562 	/*
1563 	 * Map the secure partition manifest region in the EL3 translation
1564 	 * regime.
1565 	 * Map an area equal to (2 * PAGE_SIZE) for now. During manifest base
1566 	 * alignment the region of 1 PAGE_SIZE from manifest align base may
1567 	 * not completely accommodate the secure partition manifest region.
1568 	 */
1569 	ret = mmap_add_dynamic_region((unsigned long long)manifest_base_align,
1570 				      manifest_base_align,
1571 				      PAGE_SIZE * 2,
1572 				      MT_RO_DATA);
1573 	if (ret != 0) {
1574 		ERROR("Error while mapping SP manifest (%d).\n", ret);
1575 		return ret;
1576 	}
1577 
1578 	ret = fdt_node_offset_by_compatible(sp_manifest, -1,
1579 					    "arm,ffa-manifest-1.0");
1580 	if (ret < 0) {
1581 		ERROR("Error happened in SP manifest reading.\n");
1582 		return -EINVAL;
1583 	}
1584 
1585 	/*
1586 	 * Store the size of the manifest so that it can be used later to pass
1587 	 * the manifest as boot information later.
1588 	 */
1589 	next_image_ep_info->args.arg1 = fdt_totalsize(sp_manifest);
1590 	INFO("Manifest size = %lu bytes.\n", next_image_ep_info->args.arg1);
1591 
1592 	/*
1593 	 * Select an SP descriptor for initialising the partition's execution
1594 	 * context on the primary CPU.
1595 	 */
1596 	sp = spmc_get_current_sp_ctx();
1597 
1598 	/* Initialize entry point information for the SP */
1599 	SET_PARAM_HEAD(next_image_ep_info, PARAM_EP, VERSION_1,
1600 		       SECURE | EP_ST_ENABLE);
1601 
1602 	/* Parse the SP manifest. */
1603 	ret = sp_manifest_parse(sp_manifest, ret, sp, next_image_ep_info,
1604 				&boot_info_reg);
1605 	if (ret != 0) {
1606 		ERROR("Error in Secure Partition manifest parsing.\n");
1607 		return ret;
1608 	}
1609 
1610 	/* Check that the runtime EL in the manifest was correct. */
1611 	if (sp->runtime_el != S_EL1) {
1612 		ERROR("Unexpected runtime EL: %d\n", sp->runtime_el);
1613 		return -EINVAL;
1614 	}
1615 
1616 	/* Perform any common initialisation. */
1617 	spmc_sp_common_setup(sp, next_image_ep_info, boot_info_reg);
1618 
1619 	/* Perform any initialisation specific to S-EL1 SPs. */
1620 	spmc_el1_sp_setup(sp, next_image_ep_info);
1621 
1622 	/* Initialize the SP context with the required ep info. */
1623 	spmc_sp_common_ep_commit(sp, next_image_ep_info);
1624 
1625 	return 0;
1626 }
1627 
1628 /*******************************************************************************
1629  * This function takes an SP context pointer and performs a synchronous entry
1630  * into it.
1631  ******************************************************************************/
logical_sp_init(void)1632 static int32_t logical_sp_init(void)
1633 {
1634 	int32_t rc = 0;
1635 	struct el3_lp_desc *el3_lp_descs;
1636 
1637 	/* Perform initial validation of the Logical Partitions. */
1638 	rc = el3_sp_desc_validate();
1639 	if (rc != 0) {
1640 		ERROR("Logical Partition validation failed!\n");
1641 		return rc;
1642 	}
1643 
1644 	el3_lp_descs = get_el3_lp_array();
1645 
1646 	INFO("Logical Secure Partition init start.\n");
1647 	for (unsigned int i = 0U; i < EL3_LP_DESCS_COUNT; i++) {
1648 		rc = el3_lp_descs[i].init();
1649 		if (rc != 0) {
1650 			ERROR("Logical SP (0x%x) Failed to Initialize\n",
1651 			      el3_lp_descs[i].sp_id);
1652 			return rc;
1653 		}
1654 		VERBOSE("Logical SP (0x%x) Initialized\n",
1655 			      el3_lp_descs[i].sp_id);
1656 	}
1657 
1658 	INFO("Logical Secure Partition init completed.\n");
1659 
1660 	return rc;
1661 }
1662 
spmc_sp_synchronous_entry(struct sp_exec_ctx * ec)1663 uint64_t spmc_sp_synchronous_entry(struct sp_exec_ctx *ec)
1664 {
1665 	uint64_t rc;
1666 
1667 	assert(ec != NULL);
1668 
1669 	/* Assign the context of the SP to this CPU */
1670 	cm_set_context(&(ec->cpu_ctx), SECURE);
1671 
1672 	/* Restore the context assigned above */
1673 	cm_el1_sysregs_context_restore(SECURE);
1674 	cm_set_next_eret_context(SECURE);
1675 
1676 	/* Invalidate TLBs at EL1. */
1677 	tlbivmalle1();
1678 	dsbish();
1679 
1680 	/* Enter Secure Partition */
1681 	rc = spm_secure_partition_enter(&ec->c_rt_ctx);
1682 
1683 	/* Save secure state */
1684 	cm_el1_sysregs_context_save(SECURE);
1685 
1686 	return rc;
1687 }
1688 
1689 /*******************************************************************************
1690  * SPMC Helper Functions.
1691  ******************************************************************************/
sp_init(void)1692 static int32_t sp_init(void)
1693 {
1694 	uint64_t rc;
1695 	struct secure_partition_desc *sp;
1696 	struct sp_exec_ctx *ec;
1697 
1698 	sp = spmc_get_current_sp_ctx();
1699 	ec = spmc_get_sp_ec(sp);
1700 	ec->rt_model = RT_MODEL_INIT;
1701 	ec->rt_state = RT_STATE_RUNNING;
1702 
1703 	INFO("Secure Partition (0x%x) init start.\n", sp->sp_id);
1704 
1705 	rc = spmc_sp_synchronous_entry(ec);
1706 	if (rc != 0) {
1707 		/* Indicate SP init was not successful. */
1708 		ERROR("SP (0x%x) failed to initialize (%lu).\n",
1709 		      sp->sp_id, rc);
1710 		return 0;
1711 	}
1712 
1713 	ec->rt_state = RT_STATE_WAITING;
1714 	INFO("Secure Partition initialized.\n");
1715 
1716 	return 1;
1717 }
1718 
initalize_sp_descs(void)1719 static void initalize_sp_descs(void)
1720 {
1721 	struct secure_partition_desc *sp;
1722 
1723 	for (unsigned int i = 0U; i < SECURE_PARTITION_COUNT; i++) {
1724 		sp = &sp_desc[i];
1725 		sp->sp_id = INV_SP_ID;
1726 		sp->mailbox.rx_buffer = NULL;
1727 		sp->mailbox.tx_buffer = NULL;
1728 		sp->mailbox.state = MAILBOX_STATE_EMPTY;
1729 		sp->secondary_ep = 0;
1730 	}
1731 }
1732 
initalize_ns_ep_descs(void)1733 static void initalize_ns_ep_descs(void)
1734 {
1735 	struct ns_endpoint_desc *ns_ep;
1736 
1737 	for (unsigned int i = 0U; i < NS_PARTITION_COUNT; i++) {
1738 		ns_ep = &ns_ep_desc[i];
1739 		/*
1740 		 * Clashes with the Hypervisor ID but will not be a
1741 		 * problem in practice.
1742 		 */
1743 		ns_ep->ns_ep_id = 0;
1744 		ns_ep->ffa_version = 0;
1745 		ns_ep->mailbox.rx_buffer = NULL;
1746 		ns_ep->mailbox.tx_buffer = NULL;
1747 		ns_ep->mailbox.state = MAILBOX_STATE_EMPTY;
1748 	}
1749 }
1750 
1751 /*******************************************************************************
1752  * Initialize SPMC attributes for the SPMD.
1753  ******************************************************************************/
spmc_populate_attrs(spmc_manifest_attribute_t * spmc_attrs)1754 void spmc_populate_attrs(spmc_manifest_attribute_t *spmc_attrs)
1755 {
1756 	spmc_attrs->major_version = FFA_VERSION_MAJOR;
1757 	spmc_attrs->minor_version = FFA_VERSION_MINOR;
1758 	spmc_attrs->exec_state = MODE_RW_64;
1759 	spmc_attrs->spmc_id = FFA_SPMC_ID;
1760 }
1761 
1762 /*******************************************************************************
1763  * Initialize contexts of all Secure Partitions.
1764  ******************************************************************************/
spmc_setup(void)1765 int32_t spmc_setup(void)
1766 {
1767 	int32_t ret;
1768 	uint32_t flags;
1769 
1770 	/* Initialize endpoint descriptors */
1771 	initalize_sp_descs();
1772 	initalize_ns_ep_descs();
1773 
1774 	/*
1775 	 * Retrieve the information of the datastore for tracking shared memory
1776 	 * requests allocated by platform code and zero the region if available.
1777 	 */
1778 	ret = plat_spmc_shmem_datastore_get(&spmc_shmem_obj_state.data,
1779 					    &spmc_shmem_obj_state.data_size);
1780 	if (ret != 0) {
1781 		ERROR("Failed to obtain memory descriptor backing store!\n");
1782 		return ret;
1783 	}
1784 	memset(spmc_shmem_obj_state.data, 0, spmc_shmem_obj_state.data_size);
1785 
1786 	/* Setup logical SPs. */
1787 	ret = logical_sp_init();
1788 	if (ret != 0) {
1789 		ERROR("Failed to initialize Logical Partitions.\n");
1790 		return ret;
1791 	}
1792 
1793 	/* Perform physical SP setup. */
1794 
1795 	/* Disable MMU at EL1 (initialized by BL2) */
1796 	disable_mmu_icache_el1();
1797 
1798 	/* Initialize context of the SP */
1799 	INFO("Secure Partition context setup start.\n");
1800 
1801 	ret = find_and_prepare_sp_context();
1802 	if (ret != 0) {
1803 		ERROR("Error in SP finding and context preparation.\n");
1804 		return ret;
1805 	}
1806 
1807 	/* Register power management hooks with PSCI */
1808 	psci_register_spd_pm_hook(&spmc_pm);
1809 
1810 	/*
1811 	 * Register an interrupt handler for S-EL1 interrupts
1812 	 * when generated during code executing in the
1813 	 * non-secure state.
1814 	 */
1815 	flags = 0;
1816 	set_interrupt_rm_flag(flags, NON_SECURE);
1817 	ret = register_interrupt_type_handler(INTR_TYPE_S_EL1,
1818 					      spmc_sp_interrupt_handler,
1819 					      flags);
1820 	if (ret != 0) {
1821 		ERROR("Failed to register interrupt handler! (%d)\n", ret);
1822 		panic();
1823 	}
1824 
1825 	/* Register init function for deferred init.  */
1826 	bl31_register_bl32_init(&sp_init);
1827 
1828 	INFO("Secure Partition setup done.\n");
1829 
1830 	return 0;
1831 }
1832 
1833 /*******************************************************************************
1834  * Secure Partition Manager SMC handler.
1835  ******************************************************************************/
spmc_smc_handler(uint32_t smc_fid,bool secure_origin,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)1836 uint64_t spmc_smc_handler(uint32_t smc_fid,
1837 			  bool secure_origin,
1838 			  uint64_t x1,
1839 			  uint64_t x2,
1840 			  uint64_t x3,
1841 			  uint64_t x4,
1842 			  void *cookie,
1843 			  void *handle,
1844 			  uint64_t flags)
1845 {
1846 	switch (smc_fid) {
1847 
1848 	case FFA_VERSION:
1849 		return ffa_version_handler(smc_fid, secure_origin, x1, x2, x3,
1850 					   x4, cookie, handle, flags);
1851 
1852 	case FFA_SPM_ID_GET:
1853 		return ffa_spm_id_get_handler(smc_fid, secure_origin, x1, x2,
1854 					     x3, x4, cookie, handle, flags);
1855 
1856 	case FFA_ID_GET:
1857 		return ffa_id_get_handler(smc_fid, secure_origin, x1, x2, x3,
1858 					  x4, cookie, handle, flags);
1859 
1860 	case FFA_FEATURES:
1861 		return ffa_features_handler(smc_fid, secure_origin, x1, x2, x3,
1862 					    x4, cookie, handle, flags);
1863 
1864 	case FFA_SECONDARY_EP_REGISTER_SMC64:
1865 		return ffa_sec_ep_register_handler(smc_fid, secure_origin, x1,
1866 						   x2, x3, x4, cookie, handle,
1867 						   flags);
1868 
1869 	case FFA_MSG_SEND_DIRECT_REQ_SMC32:
1870 	case FFA_MSG_SEND_DIRECT_REQ_SMC64:
1871 		return direct_req_smc_handler(smc_fid, secure_origin, x1, x2,
1872 					      x3, x4, cookie, handle, flags);
1873 
1874 	case FFA_MSG_SEND_DIRECT_RESP_SMC32:
1875 	case FFA_MSG_SEND_DIRECT_RESP_SMC64:
1876 		return direct_resp_smc_handler(smc_fid, secure_origin, x1, x2,
1877 					       x3, x4, cookie, handle, flags);
1878 
1879 	case FFA_RXTX_MAP_SMC32:
1880 	case FFA_RXTX_MAP_SMC64:
1881 		return rxtx_map_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1882 					cookie, handle, flags);
1883 
1884 	case FFA_RXTX_UNMAP:
1885 		return rxtx_unmap_handler(smc_fid, secure_origin, x1, x2, x3,
1886 					  x4, cookie, handle, flags);
1887 
1888 	case FFA_PARTITION_INFO_GET:
1889 		return partition_info_get_handler(smc_fid, secure_origin, x1,
1890 						  x2, x3, x4, cookie, handle,
1891 						  flags);
1892 
1893 	case FFA_RX_RELEASE:
1894 		return rx_release_handler(smc_fid, secure_origin, x1, x2, x3,
1895 					  x4, cookie, handle, flags);
1896 
1897 	case FFA_MSG_WAIT:
1898 		return msg_wait_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1899 					cookie, handle, flags);
1900 
1901 	case FFA_ERROR:
1902 		return ffa_error_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1903 					cookie, handle, flags);
1904 
1905 	case FFA_MSG_RUN:
1906 		return ffa_run_handler(smc_fid, secure_origin, x1, x2, x3, x4,
1907 				       cookie, handle, flags);
1908 
1909 	case FFA_MEM_SHARE_SMC32:
1910 	case FFA_MEM_SHARE_SMC64:
1911 	case FFA_MEM_LEND_SMC32:
1912 	case FFA_MEM_LEND_SMC64:
1913 		return spmc_ffa_mem_send(smc_fid, secure_origin, x1, x2, x3, x4,
1914 					 cookie, handle, flags);
1915 
1916 	case FFA_MEM_FRAG_TX:
1917 		return spmc_ffa_mem_frag_tx(smc_fid, secure_origin, x1, x2, x3,
1918 					    x4, cookie, handle, flags);
1919 
1920 	case FFA_MEM_FRAG_RX:
1921 		return spmc_ffa_mem_frag_rx(smc_fid, secure_origin, x1, x2, x3,
1922 					    x4, cookie, handle, flags);
1923 
1924 	case FFA_MEM_RETRIEVE_REQ_SMC32:
1925 	case FFA_MEM_RETRIEVE_REQ_SMC64:
1926 		return spmc_ffa_mem_retrieve_req(smc_fid, secure_origin, x1, x2,
1927 						 x3, x4, cookie, handle, flags);
1928 
1929 	case FFA_MEM_RELINQUISH:
1930 		return spmc_ffa_mem_relinquish(smc_fid, secure_origin, x1, x2,
1931 					       x3, x4, cookie, handle, flags);
1932 
1933 	case FFA_MEM_RECLAIM:
1934 		return spmc_ffa_mem_reclaim(smc_fid, secure_origin, x1, x2, x3,
1935 					    x4, cookie, handle, flags);
1936 
1937 	default:
1938 		WARN("Unsupported FF-A call 0x%08x.\n", smc_fid);
1939 		break;
1940 	}
1941 	return spmc_ffa_error_return(handle, FFA_ERROR_NOT_SUPPORTED);
1942 }
1943 
1944 /*******************************************************************************
1945  * This function is the handler registered for S-EL1 interrupts by the SPMC. It
1946  * validates the interrupt and upon success arranges entry into the SP for
1947  * handling the interrupt.
1948  ******************************************************************************/
spmc_sp_interrupt_handler(uint32_t id,uint32_t flags,void * handle,void * cookie)1949 static uint64_t spmc_sp_interrupt_handler(uint32_t id,
1950 					  uint32_t flags,
1951 					  void *handle,
1952 					  void *cookie)
1953 {
1954 	struct secure_partition_desc *sp = spmc_get_current_sp_ctx();
1955 	struct sp_exec_ctx *ec;
1956 	uint32_t linear_id = plat_my_core_pos();
1957 
1958 	/* Sanity check for a NULL pointer dereference. */
1959 	assert(sp != NULL);
1960 
1961 	/* Check the security state when the exception was generated. */
1962 	assert(get_interrupt_src_ss(flags) == NON_SECURE);
1963 
1964 	/* Panic if not an S-EL1 Partition. */
1965 	if (sp->runtime_el != S_EL1) {
1966 		ERROR("Interrupt received for a non S-EL1 SP on core%u.\n",
1967 		      linear_id);
1968 		panic();
1969 	}
1970 
1971 	/* Obtain a reference to the SP execution context. */
1972 	ec = spmc_get_sp_ec(sp);
1973 
1974 	/* Ensure that the execution context is in waiting state else panic. */
1975 	if (ec->rt_state != RT_STATE_WAITING) {
1976 		ERROR("SP EC on core%u is not waiting (%u), it is (%u).\n",
1977 		      linear_id, RT_STATE_WAITING, ec->rt_state);
1978 		panic();
1979 	}
1980 
1981 	/* Update the runtime model and state of the partition. */
1982 	ec->rt_model = RT_MODEL_INTR;
1983 	ec->rt_state = RT_STATE_RUNNING;
1984 
1985 	VERBOSE("SP (0x%x) interrupt start on core%u.\n", sp->sp_id, linear_id);
1986 
1987 	/*
1988 	 * Forward the interrupt to the S-EL1 SP. The interrupt ID is not
1989 	 * populated as the SP can determine this by itself.
1990 	 */
1991 	return spmd_smc_switch_state(FFA_INTERRUPT, false,
1992 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1993 				     FFA_PARAM_MBZ, FFA_PARAM_MBZ,
1994 				     handle);
1995 }
1996