1 /*
2  * Copyright (c) 2021-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <errno.h>
9 #include <inttypes.h>
10 #include <stdint.h>
11 #include <string.h>
12 
13 #include <arch_helpers.h>
14 #include <arch_features.h>
15 #include <bl31/bl31.h>
16 #include <common/debug.h>
17 #include <common/runtime_svc.h>
18 #include <context.h>
19 #include <lib/el3_runtime/context_mgmt.h>
20 #include <lib/el3_runtime/pubsub.h>
21 #include <lib/gpt_rme/gpt_rme.h>
22 
23 #include <lib/spinlock.h>
24 #include <lib/utils.h>
25 #include <lib/xlat_tables/xlat_tables_v2.h>
26 #include <plat/common/common_def.h>
27 #include <plat/common/platform.h>
28 #include <platform_def.h>
29 #include <services/rmmd_svc.h>
30 #include <smccc_helpers.h>
31 #include <lib/extensions/sve.h>
32 #include "rmmd_initial_context.h"
33 #include "rmmd_private.h"
34 
35 /*******************************************************************************
36  * RMM boot failure flag
37  ******************************************************************************/
38 static bool rmm_boot_failed;
39 
40 /*******************************************************************************
41  * RMM context information.
42  ******************************************************************************/
43 rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT];
44 
45 /*******************************************************************************
46  * RMM entry point information. Discovered on the primary core and reused
47  * on secondary cores.
48  ******************************************************************************/
49 static entry_point_info_t *rmm_ep_info;
50 
51 /*******************************************************************************
52  * Static function declaration.
53  ******************************************************************************/
54 static int32_t rmm_init(void);
55 
56 /*******************************************************************************
57  * This function takes an RMM context pointer and performs a synchronous entry
58  * into it.
59  ******************************************************************************/
rmmd_rmm_sync_entry(rmmd_rmm_context_t * rmm_ctx)60 uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx)
61 {
62 	uint64_t rc;
63 
64 	assert(rmm_ctx != NULL);
65 
66 	cm_set_context(&(rmm_ctx->cpu_ctx), REALM);
67 
68 	/* Restore the realm context assigned above */
69 	cm_el1_sysregs_context_restore(REALM);
70 	cm_el2_sysregs_context_restore(REALM);
71 	cm_set_next_eret_context(REALM);
72 
73 	/* Enter RMM */
74 	rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx);
75 
76 	/*
77 	 * Save realm context. EL1 and EL2 Non-secure
78 	 * contexts will be restored before exiting to
79 	 * Non-secure world, therefore there is no need
80 	 * to clear EL1 and EL2 context registers.
81 	 */
82 	cm_el1_sysregs_context_save(REALM);
83 	cm_el2_sysregs_context_save(REALM);
84 
85 	return rc;
86 }
87 
88 /*******************************************************************************
89  * This function returns to the place where rmmd_rmm_sync_entry() was
90  * called originally.
91  ******************************************************************************/
rmmd_rmm_sync_exit(uint64_t rc)92 __dead2 void rmmd_rmm_sync_exit(uint64_t rc)
93 {
94 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
95 
96 	/* Get context of the RMM in use by this CPU. */
97 	assert(cm_get_context(REALM) == &(ctx->cpu_ctx));
98 
99 	/*
100 	 * The RMMD must have initiated the original request through a
101 	 * synchronous entry into RMM. Jump back to the original C runtime
102 	 * context with the value of rc in x0;
103 	 */
104 	rmmd_rmm_exit(ctx->c_rt_ctx, rc);
105 
106 	panic();
107 }
108 
rmm_el2_context_init(el2_sysregs_t * regs)109 static void rmm_el2_context_init(el2_sysregs_t *regs)
110 {
111 	regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2;
112 	regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1;
113 }
114 
115 /*******************************************************************************
116  * Enable architecture extensions on first entry to Realm world.
117  ******************************************************************************/
manage_extensions_realm(cpu_context_t * ctx)118 static void manage_extensions_realm(cpu_context_t *ctx)
119 {
120 #if ENABLE_SVE_FOR_NS
121 	/*
122 	 * Enable SVE and FPU in realm context when it is enabled for NS.
123 	 * Realm manager must ensure that the SVE and FPU register
124 	 * contexts are properly managed.
125 	 */
126 	sve_enable(ctx);
127 #else
128 	/*
129 	 * Disable SVE and FPU in realm context when it is disabled for NS.
130 	 */
131 	sve_disable(ctx);
132 #endif /* ENABLE_SVE_FOR_NS */
133 }
134 
135 /*******************************************************************************
136  * Jump to the RMM for the first time.
137  ******************************************************************************/
rmm_init(void)138 static int32_t rmm_init(void)
139 {
140 	long rc;
141 	rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()];
142 
143 	INFO("RMM init start.\n");
144 
145 	/* Enable architecture extensions */
146 	manage_extensions_realm(&ctx->cpu_ctx);
147 
148 	/* Initialize RMM EL2 context. */
149 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
150 
151 	rc = rmmd_rmm_sync_entry(ctx);
152 	if (rc != E_RMM_BOOT_SUCCESS) {
153 		ERROR("RMM init failed: %ld\n", rc);
154 		/* Mark the boot as failed for all the CPUs */
155 		rmm_boot_failed = true;
156 		return 0;
157 	}
158 
159 	INFO("RMM init end.\n");
160 
161 	return 1;
162 }
163 
164 /*******************************************************************************
165  * Load and read RMM manifest, setup RMM.
166  ******************************************************************************/
rmmd_setup(void)167 int rmmd_setup(void)
168 {
169 	size_t shared_buf_size __unused;
170 	uintptr_t shared_buf_base;
171 	uint32_t ep_attr;
172 	unsigned int linear_id = plat_my_core_pos();
173 	rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id];
174 	rmm_manifest_t *manifest;
175 	int rc;
176 
177 	/* Make sure RME is supported. */
178 	assert(get_armv9_2_feat_rme_support() != 0U);
179 
180 	rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM);
181 	if (rmm_ep_info == NULL) {
182 		WARN("No RMM image provided by BL2 boot loader, Booting "
183 		     "device without RMM initialization. SMCs destined for "
184 		     "RMM will return SMC_UNK\n");
185 		return -ENOENT;
186 	}
187 
188 	/* Under no circumstances will this parameter be 0 */
189 	assert(rmm_ep_info->pc == RMM_BASE);
190 
191 	/* Initialise an entrypoint to set up the CPU context */
192 	ep_attr = EP_REALM;
193 	if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) {
194 		ep_attr |= EP_EE_BIG;
195 	}
196 
197 	SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr);
198 	rmm_ep_info->spsr = SPSR_64(MODE_EL2,
199 					MODE_SP_ELX,
200 					DISABLE_ALL_EXCEPTIONS);
201 
202 	shared_buf_size =
203 			plat_rmmd_get_el3_rmm_shared_mem(&shared_buf_base);
204 
205 	assert((shared_buf_size == SZ_4K) &&
206 					((void *)shared_buf_base != NULL));
207 
208 	/* Load the boot manifest at the beginning of the shared area */
209 	manifest = (rmm_manifest_t *)shared_buf_base;
210 	rc = plat_rmmd_load_manifest(manifest);
211 	if (rc != 0) {
212 		ERROR("Error loading RMM Boot Manifest (%i)\n", rc);
213 		return rc;
214 	}
215 	flush_dcache_range((uintptr_t)shared_buf_base, shared_buf_size);
216 
217 	/*
218 	 * Prepare coldboot arguments for RMM:
219 	 * arg0: This CPUID (primary processor).
220 	 * arg1: Version for this Boot Interface.
221 	 * arg2: PLATFORM_CORE_COUNT.
222 	 * arg3: Base address for the EL3 <-> RMM shared area. The boot
223 	 *       manifest will be stored at the beginning of this area.
224 	 */
225 	rmm_ep_info->args.arg0 = linear_id;
226 	rmm_ep_info->args.arg1 = RMM_EL3_INTERFACE_VERSION;
227 	rmm_ep_info->args.arg2 = PLATFORM_CORE_COUNT;
228 	rmm_ep_info->args.arg3 = shared_buf_base;
229 
230 	/* Initialise RMM context with this entry point information */
231 	cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info);
232 
233 	INFO("RMM setup done.\n");
234 
235 	/* Register init function for deferred init.  */
236 	bl31_register_rmm_init(&rmm_init);
237 
238 	return 0;
239 }
240 
241 /*******************************************************************************
242  * Forward SMC to the other security state
243  ******************************************************************************/
rmmd_smc_forward(uint32_t src_sec_state,uint32_t dst_sec_state,uint64_t x0,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * handle)244 static uint64_t	rmmd_smc_forward(uint32_t src_sec_state,
245 				 uint32_t dst_sec_state, uint64_t x0,
246 				 uint64_t x1, uint64_t x2, uint64_t x3,
247 				 uint64_t x4, void *handle)
248 {
249 	cpu_context_t *ctx = cm_get_context(dst_sec_state);
250 
251 	/* Save incoming security state */
252 	cm_el1_sysregs_context_save(src_sec_state);
253 	cm_el2_sysregs_context_save(src_sec_state);
254 
255 	/* Restore outgoing security state */
256 	cm_el1_sysregs_context_restore(dst_sec_state);
257 	cm_el2_sysregs_context_restore(dst_sec_state);
258 	cm_set_next_eret_context(dst_sec_state);
259 
260 	/*
261 	 * As per SMCCCv1.2, we need to preserve x4 to x7 unless
262 	 * being used as return args. Hence we differentiate the
263 	 * onward and backward path. Support upto 8 args in the
264 	 * onward path and 4 args in return path.
265 	 * Register x4 will be preserved by RMM in case it is not
266 	 * used in return path.
267 	 */
268 	if (src_sec_state == NON_SECURE) {
269 		SMC_RET8(ctx, x0, x1, x2, x3, x4,
270 			 SMC_GET_GP(handle, CTX_GPREG_X5),
271 			 SMC_GET_GP(handle, CTX_GPREG_X6),
272 			 SMC_GET_GP(handle, CTX_GPREG_X7));
273 	}
274 
275 	SMC_RET5(ctx, x0, x1, x2, x3, x4);
276 }
277 
278 /*******************************************************************************
279  * This function handles all SMCs in the range reserved for RMI. Each call is
280  * either forwarded to the other security state or handled by the RMM dispatcher
281  ******************************************************************************/
rmmd_rmi_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)282 uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
283 			  uint64_t x3, uint64_t x4, void *cookie,
284 			  void *handle, uint64_t flags)
285 {
286 	uint32_t src_sec_state;
287 
288 	/* If RMM failed to boot, treat any RMI SMC as unknown */
289 	if (rmm_boot_failed) {
290 		WARN("RMMD: Failed to boot up RMM. Ignoring RMI call\n");
291 		SMC_RET1(handle, SMC_UNK);
292 	}
293 
294 	/* Determine which security state this SMC originated from */
295 	src_sec_state = caller_sec_state(flags);
296 
297 	/* RMI must not be invoked by the Secure world */
298 	if (src_sec_state == SMC_FROM_SECURE) {
299 		WARN("RMMD: RMI invoked by secure world.\n");
300 		SMC_RET1(handle, SMC_UNK);
301 	}
302 
303 	/*
304 	 * Forward an RMI call from the Normal world to the Realm world as it
305 	 * is.
306 	 */
307 	if (src_sec_state == SMC_FROM_NON_SECURE) {
308 		VERBOSE("RMMD: RMI call from non-secure world.\n");
309 		return rmmd_smc_forward(NON_SECURE, REALM, smc_fid,
310 					x1, x2, x3, x4, handle);
311 	}
312 
313 	if (src_sec_state != SMC_FROM_REALM) {
314 		SMC_RET1(handle, SMC_UNK);
315 	}
316 
317 	switch (smc_fid) {
318 	case RMM_RMI_REQ_COMPLETE: {
319 		uint64_t x5 = SMC_GET_GP(handle, CTX_GPREG_X5);
320 
321 		return rmmd_smc_forward(REALM, NON_SECURE, x1,
322 					x2, x3, x4, x5, handle);
323 	}
324 	default:
325 		WARN("RMMD: Unsupported RMM call 0x%08x\n", smc_fid);
326 		SMC_RET1(handle, SMC_UNK);
327 	}
328 }
329 
330 /*******************************************************************************
331  * This cpu has been turned on. Enter RMM to initialise R-EL2.  Entry into RMM
332  * is done after initialising minimal architectural state that guarantees safe
333  * execution.
334  ******************************************************************************/
rmmd_cpu_on_finish_handler(const void * arg)335 static void *rmmd_cpu_on_finish_handler(const void *arg)
336 {
337 	long rc;
338 	uint32_t linear_id = plat_my_core_pos();
339 	rmmd_rmm_context_t *ctx = &rmm_context[linear_id];
340 
341 	if (rmm_boot_failed) {
342 		/* RMM Boot failed on a previous CPU. Abort. */
343 		ERROR("RMM Failed to initialize. Ignoring for CPU%d\n",
344 								linear_id);
345 		return NULL;
346 	}
347 
348 	/*
349 	 * Prepare warmboot arguments for RMM:
350 	 * arg0: This CPUID.
351 	 * arg1 to arg3: Not used.
352 	 */
353 	rmm_ep_info->args.arg0 = linear_id;
354 	rmm_ep_info->args.arg1 = 0ULL;
355 	rmm_ep_info->args.arg2 = 0ULL;
356 	rmm_ep_info->args.arg3 = 0ULL;
357 
358 	/* Initialise RMM context with this entry point information */
359 	cm_setup_context(&ctx->cpu_ctx, rmm_ep_info);
360 
361 	/* Enable architecture extensions */
362 	manage_extensions_realm(&ctx->cpu_ctx);
363 
364 	/* Initialize RMM EL2 context. */
365 	rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx);
366 
367 	rc = rmmd_rmm_sync_entry(ctx);
368 
369 	if (rc != E_RMM_BOOT_SUCCESS) {
370 		ERROR("RMM init failed on CPU%d: %ld\n", linear_id, rc);
371 		/* Mark the boot as failed for any other booting CPU */
372 		rmm_boot_failed = true;
373 	}
374 
375 	return NULL;
376 }
377 
378 /* Subscribe to PSCI CPU on to initialize RMM on secondary */
379 SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler);
380 
381 /* Convert GPT lib error to RMMD GTS error */
gpt_to_gts_error(int error,uint32_t smc_fid,uint64_t address)382 static int gpt_to_gts_error(int error, uint32_t smc_fid, uint64_t address)
383 {
384 	int ret;
385 
386 	if (error == 0) {
387 		return E_RMM_OK;
388 	}
389 
390 	if (error == -EINVAL) {
391 		ret = E_RMM_BAD_ADDR;
392 	} else {
393 		/* This is the only other error code we expect */
394 		assert(error == -EPERM);
395 		ret = E_RMM_BAD_PAS;
396 	}
397 
398 	ERROR("RMMD: PAS Transition failed. GPT ret = %d, PA: 0x%"PRIx64 ", FID = 0x%x\n",
399 				error, address, smc_fid);
400 	return ret;
401 }
402 
403 /*******************************************************************************
404  * This function handles RMM-EL3 interface SMCs
405  ******************************************************************************/
rmmd_rmm_el3_handler(uint32_t smc_fid,uint64_t x1,uint64_t x2,uint64_t x3,uint64_t x4,void * cookie,void * handle,uint64_t flags)406 uint64_t rmmd_rmm_el3_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2,
407 				uint64_t x3, uint64_t x4, void *cookie,
408 				void *handle, uint64_t flags)
409 {
410 	uint32_t src_sec_state;
411 	int ret;
412 
413 	/* If RMM failed to boot, treat any RMM-EL3 interface SMC as unknown */
414 	if (rmm_boot_failed) {
415 		WARN("RMMD: Failed to boot up RMM. Ignoring RMM-EL3 call\n");
416 		SMC_RET1(handle, SMC_UNK);
417 	}
418 
419 	/* Determine which security state this SMC originated from */
420 	src_sec_state = caller_sec_state(flags);
421 
422 	if (src_sec_state != SMC_FROM_REALM) {
423 		WARN("RMMD: RMM-EL3 call originated from secure or normal world\n");
424 		SMC_RET1(handle, SMC_UNK);
425 	}
426 
427 	switch (smc_fid) {
428 	case RMM_GTSI_DELEGATE:
429 		ret = gpt_delegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
430 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
431 	case RMM_GTSI_UNDELEGATE:
432 		ret = gpt_undelegate_pas(x1, PAGE_SIZE_4KB, SMC_FROM_REALM);
433 		SMC_RET1(handle, gpt_to_gts_error(ret, smc_fid, x1));
434 	case RMM_ATTEST_GET_PLAT_TOKEN:
435 		ret = rmmd_attest_get_platform_token(x1, &x2, x3);
436 		SMC_RET2(handle, ret, x2);
437 	case RMM_ATTEST_GET_REALM_KEY:
438 		ret = rmmd_attest_get_signing_key(x1, &x2, x3);
439 		SMC_RET2(handle, ret, x2);
440 
441 	case RMM_BOOT_COMPLETE:
442 		VERBOSE("RMMD: running rmmd_rmm_sync_exit\n");
443 		rmmd_rmm_sync_exit(x1);
444 
445 	default:
446 		WARN("RMMD: Unsupported RMM-EL3 call 0x%08x\n", smc_fid);
447 		SMC_RET1(handle, SMC_UNK);
448 	}
449 }
450