1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * SPDX-FileCopyrightText: Copyright TF-RMM Contributors.
4  */
5 
6 #include <app.h>
7 #include <app_common.h>
8 #include <app_header.h>
9 #include <app_header_private.h>
10 #include <app_services.h>
11 #include <arch.h>
12 #include <arch_helpers.h>
13 #include <assert.h>
14 #include <buffer.h>
15 #include <debug.h>
16 #include <errno.h>
17 #include <granule.h>
18 #include <import_sym.h>
19 #include <utils_def.h>
20 #include <xlat_contexts.h>
21 #include <xlat_tables.h>
22 
23 #define GRANULE_PA_IDX_APP_REG_CTX	0U
24 #define GRANULE_PA_IDX_APP_PAGE_TABLE	1U
25 #define GRANULE_PA_IDX_COUNT		2U
26 
27 #define RMM_APP_APP_REG_CTX_MMAP_IDX	0U
28 #define RMM_APP_TEXT_MMAP_IDX		1U
29 #define RMM_APP_RODATA_MMAP_IDX		2U
30 #define RMM_APP_DATA_MMAP_IDX		3U
31 #define RMM_APP_BSS_MMAP_IDX		4U
32 #define RMM_APP_SHARED_IDX		5U
33 #define RMM_APP_HEAP_IDX		6U
34 #define RMM_APP_STACK_IDX		7U
35 #define RMM_APP_MMAP_REGION_COUNT	8U
36 
37 #define ASID_SIZE_NO_FEAT_ASID16	8U
38 
39 struct app_id_data {
40 	struct xlat_ctx_cfg app_va_xlat_ctx_cfg_base;
41 	struct xlat_mmap_region mm_regions_array[RMM_APP_MMAP_REGION_COUNT];
42 	uintptr_t el0_shared_page_va;
43 	uintptr_t heap_va;
44 	uintptr_t stack_buf_start_va;
45 };
46 
47 static struct app_id_data app_id_data_array[APP_COUNT];
48 
49 struct app_bss_memory_t {
50 	uintptr_t pa;
51 	size_t size;
52 };
53 static struct app_bss_memory_t app_bss_memory_array[APP_COUNT];
54 
55 /* This function is implemented in assembly */
56 /* TODO: get this declarations properly from a header */
57 int run_app(struct app_reg_ctx *app_reg_ctx, uint64_t heap_properties);
58 
59 IMPORT_SYM(uintptr_t, rmm_rw_start, RMM_RW_RANGE_START); /* NOLINT */
60 IMPORT_SYM(uintptr_t, rmm_rw_end, RMM_RW_RANGE_END); /* NOLINT */
61 
in_rmm_rw_range(uintptr_t address)62 static bool in_rmm_rw_range(uintptr_t address)
63 {
64 	return (address >= RMM_RW_RANGE_START) && (address < RMM_RW_RANGE_END);
65 }
66 
map_page_to_slot(uintptr_t pa,enum buffer_slot slot)67 static void *map_page_to_slot(uintptr_t pa, enum buffer_slot slot)
68 {
69 	/* See whether the pa is in the rmm RW area */
70 	if (in_rmm_rw_range(pa)) {
71 		return (void *)pa;
72 	}
73 	/*
74 	 * It is assumed that the caller has provided the list of granules
75 	 * after validating they belong to the particular type : REC_AUX or
76 	 * PDEV_AUX.
77 	 */
78 	/* First assume delegated REC_AUX granule */
79 	struct granule *app_data_granule = find_lock_granule(pa, GRANULE_STATE_REC_AUX);
80 
81 	if (app_data_granule == NULL) {
82 		/* Try PDEV_AUX Granule next */
83 		app_data_granule = find_lock_granule(pa, GRANULE_STATE_PDEV_AUX);
84 		if (app_data_granule == NULL) {
85 			ERROR("ERROR %s:%d\n", __func__, __LINE__);
86 			return NULL;
87 		}
88 	}
89 	return buffer_granule_map(app_data_granule, slot);
90 }
91 
slot_map_app_pagetable(uintptr_t pa)92 static void *slot_map_app_pagetable(uintptr_t pa)
93 {
94 	return map_page_to_slot(pa, SLOT_APP_PAGE_TABLE);
95 }
96 
slot_map_page_to_init(uintptr_t pa)97 static void *slot_map_page_to_init(uintptr_t pa)
98 {
99 	return map_page_to_slot(pa, SLOT_APP_INIT);
100 }
101 
slot_map_app_reg_ctx_page(uintptr_t pa)102 static void *slot_map_app_reg_ctx_page(uintptr_t pa)
103 {
104 	return map_page_to_slot(pa, SLOT_APP_INIT);
105 }
106 
unmap_page(uintptr_t pa,void * va)107 static void unmap_page(uintptr_t pa, void *va)
108 {
109 	struct granule *g;
110 
111 	if (in_rmm_rw_range(pa)) {
112 		return;
113 	}
114 	buffer_unmap(va);
115 	g = find_granule(pa);
116 	granule_unlock(g);
117 }
118 
init_app_translation(size_t app_id,struct app_data_cfg * app_data,uintptr_t page_table_pa,void * page_table)119 static int init_app_translation(size_t app_id,
120 				struct app_data_cfg *app_data,
121 				uintptr_t page_table_pa,
122 				void *page_table)
123 {
124 	int ret;
125 	size_t app_index;
126 
127 	if (!GRANULE_ALIGNED(page_table_pa)) {
128 		return -EINVAL;
129 	}
130 
131 	ret = app_get_index(app_id, &app_index);
132 	if (ret != 0) {
133 		return ret;
134 	}
135 
136 	/* To prevent array subscript <unknown> is outside array bounds warning */
137 	/* cppcheck-suppress unsignedPositive
138 	 * As app_index is unsigned, app_index >= APP_COUNT is always true if
139 	 * APP_COUNT is zero.
140 	 */
141 	/* coverity[no_effect:SUPPRESS] */
142 	/* coverity[misra_c_2012_rule_14_3_violation:SUPPRESS] */
143 	if (app_index >= APP_COUNT) {
144 		return -EINVAL;
145 	}
146 
147 	/* Copy the prepared base config into the app instance's own config */
148 	/* coverity[deadcode:SUPPRESS] */
149 	/* coverity[misra_c_2012_rule_14_3_violation:SUPPRESS] */
150 	app_data->app_va_xlat_ctx_cfg = app_id_data_array[app_index].app_va_xlat_ctx_cfg_base;
151 	app_data->el0_shared_page_va = app_id_data_array[app_index].el0_shared_page_va;
152 	app_data->heap_va = app_id_data_array[app_index].heap_va;
153 	app_data->stack_buf_start_va = app_id_data_array[app_index].stack_buf_start_va;
154 
155 	/*
156 	 * Initialize the translation tables for the APP.
157 	 */
158 	ret = xlat_ctx_init(&app_data->app_va_xlat_ctx,
159 				&app_data->app_va_xlat_ctx_cfg,
160 				&app_data->app_va_tbls,
161 				page_table,
162 				APP_XLAT_TABLE_COUNT,
163 				page_table_pa);
164 	if (ret != 0) {
165 		return ret;
166 	}
167 
168 	ret = xlat_arch_setup_mmu_cfg(&app_data->app_va_xlat_ctx, &app_data->mmu_config);
169 	if (ret != 0) {
170 		return ret;
171 	}
172 
173 	/*
174 	 * TODO: This limits the max APP VA size. (i.e. a single 3rd level table
175 	 * is used). This is 2MB of address space. Provide a more general
176 	 * solution (updating the cache when mapping the pages and llt changes,
177 	 * etc.)
178 	 */
179 	return xlat_get_llt_from_va(&app_data->cached_app_llt_info,
180 					&app_data->app_va_xlat_ctx,
181 					APP_VA_START);
182 }
183 
184 /* Map a page in the transient region in the APP VA space */
app_xlat_map(struct app_data_cfg * app_data,uintptr_t va,uintptr_t pa,uint64_t attr)185 static int app_xlat_map(struct app_data_cfg *app_data,
186 			  uintptr_t va,
187 			  uintptr_t pa,
188 			  uint64_t attr)
189 {
190 	struct xlat_llt_info *entry = &app_data->cached_app_llt_info;
191 
192 	assert(GRANULE_ALIGNED(pa));
193 	/* TODO: Some xlat_... functions assume they are modifying the
194 	 * in-context xlat tables (and hence does all dsb, isb) , but these are
195 	 * not required when modifying an out of context xlat table.
196 	 */
197 	return xlat_map_memory_page_with_attrs(entry, va, pa, attr);
198 }
199 
allocate_bss(size_t app_id,size_t bss_size,uintptr_t * pa)200 static int allocate_bss(size_t app_id, size_t bss_size, uintptr_t *pa)
201 {
202 	/* TODO: For each application RMM should allocate the required
203 	 * amount of zero initialised memory (from EL3). As currently this
204 	 * allocation mechanism is not available, as a temporary workaround the
205 	 * BSS memory for an app is allocated in the app's rmm_stub library.
206 	 */
207 	int ret __unused;
208 	size_t app_index;
209 	struct app_header *app_header;
210 
211 	(void)bss_size;
212 
213 	ret = app_get_index(app_id, &app_index);
214 	if (ret != 0) {
215 		return ret;
216 	}
217 	ret = app_get_header_ptr_at_index(app_index, &app_header);
218 	assert(ret == 0);
219 	if (app_bss_memory_array[app_index].size != bss_size) {
220 		ERROR("App id %lu requested %lu bytes, got %lu bytes.\n",
221 			app_id, bss_size, app_bss_memory_array[app_index].size);
222 		assert(false);
223 	}
224 	*pa = app_bss_memory_array[app_index].pa;
225 	return 0;
226 }
227 
app_get_required_granule_count(unsigned long app_id)228 size_t app_get_required_granule_count(unsigned long app_id)
229 {
230 	struct app_header *app_header;
231 	int ret;
232 
233 	ret = app_get_header_ptr(app_id, &app_header);
234 	if (ret != 0) {
235 		return 0UL;
236 	}
237 
238 	return app_get_required_granule_count_from_header(app_header);
239 }
240 
section_start_pa(uintptr_t app_header,size_t section_offset)241 static uintptr_t section_start_pa(uintptr_t app_header, size_t section_offset)
242 {
243 	return app_header +
244 	       APP_HEADER_SIZE +    /* Skip the padded app header */
245 	       section_offset;
246 }
247 
app_map_shared_page(struct app_data_cfg * app_data)248 void app_map_shared_page(struct app_data_cfg *app_data)
249 {
250 	assert(app_data->el2_shared_page == NULL);
251 	app_data->el2_shared_page = map_page_to_slot(app_data->shared_page_pa, SLOT_APP_SHARED);
252 }
253 
app_unmap_shared_page(struct app_data_cfg * app_data)254 void app_unmap_shared_page(struct app_data_cfg *app_data)
255 {
256 	assert(app_data->el2_shared_page != NULL);
257 	unmap_page(app_data->shared_page_pa, app_data->el2_shared_page);
258 	app_data->el2_shared_page = NULL;
259 }
260 
app_rw_page_xlat_map(struct app_data_cfg * app_data,uintptr_t va,size_t section_size,const char * section_name,size_t * next_granule_idx,uintptr_t granule_pas[],size_t granule_count)261 static int app_rw_page_xlat_map(struct app_data_cfg *app_data,
262 	      uintptr_t va,
263 	      size_t section_size,
264 	      const char *section_name,
265 	      size_t *next_granule_idx,
266 	      uintptr_t granule_pas[],
267 	      size_t granule_count)
268 {
269 	size_t section_bytes_mapped;
270 
271 	for (section_bytes_mapped = 0;
272 	     section_bytes_mapped < section_size;
273 	     section_bytes_mapped += GRANULE_SIZE) {
274 		int ret;
275 
276 		if (*next_granule_idx >= granule_count) {
277 			return -EINVAL;
278 		}
279 
280 		LOG_APP_FW("    mapping %s page: 0x%lx -> 0x%lx\n",
281 			section_name, granule_pas[*next_granule_idx], va);
282 		ret = app_xlat_map(
283 			app_data,
284 			va,
285 			granule_pas[*next_granule_idx],
286 			(MT_RW_DATA | MT_REALM | MT_AP_UNPRIV | MT_NG));
287 		if (ret != 0) {
288 			return ret;
289 		}
290 		*next_granule_idx += 1UL;
291 		va += GRANULE_SIZE;
292 	}
293 	return 0;
294 
295 }
296 
app_shared_xlat_map(struct app_data_cfg * app_data,uintptr_t va,size_t * next_granule_idx,uintptr_t granule_pas[],size_t granule_count)297 static int app_shared_xlat_map(struct app_data_cfg *app_data,
298 	       uintptr_t va,
299 	       size_t *next_granule_idx,
300 	       uintptr_t granule_pas[],
301 	       size_t granule_count)
302 {
303 
304 	size_t shared_page_idx = *next_granule_idx;
305 	int ret;
306 
307 	ret = app_rw_page_xlat_map(app_data, va, GRANULE_SIZE, ".shared",
308 	      next_granule_idx, granule_pas, granule_count);
309 	if (ret != 0) {
310 		return ret;
311 	}
312 	app_data->shared_page_pa = granule_pas[shared_page_idx];
313 	return ret;
314 }
315 
app_stack_xlat_map(struct app_data_cfg * app_data,uintptr_t va,size_t stack_size,size_t * next_granule_idx,uintptr_t granule_pas[],size_t granule_count)316 static int app_stack_xlat_map(struct app_data_cfg *app_data,
317 	      uintptr_t va,
318 	      size_t stack_size,
319 	      size_t *next_granule_idx,
320 	      uintptr_t granule_pas[],
321 	      size_t granule_count)
322 {
323 	return app_rw_page_xlat_map(app_data, va, stack_size, ".stack",
324 	      next_granule_idx, granule_pas, granule_count);
325 }
326 
app_heap_xlat_map(struct app_data_cfg * app_data,uintptr_t va,size_t heap_size,size_t * next_granule_idx,uintptr_t granule_pas[],size_t granule_count)327 static int app_heap_xlat_map(struct app_data_cfg *app_data,
328 	      uintptr_t va,
329 	      size_t heap_size,
330 	      size_t *next_granule_idx,
331 	      uintptr_t granule_pas[],
332 	      size_t granule_count)
333 {
334 	return app_rw_page_xlat_map(app_data, va, heap_size, ".heap",
335 	      next_granule_idx, granule_pas, granule_count);
336 }
337 
init_app_reg_ctx(struct app_data_cfg * app_data)338 static int init_app_reg_ctx(struct app_data_cfg *app_data)
339 {
340 
341 	struct app_reg_ctx *app_reg_ctx =
342 		(struct app_reg_ctx *)slot_map_page_to_init(app_data->app_reg_ctx_pa);
343 
344 	if (app_reg_ctx == NULL) {
345 		ERROR("%s (%u): Failed to map app_reg_ctx page\n", __func__, __LINE__);
346 		return -EINVAL;
347 	}
348 
349 	app_reg_ctx->app_ttbr1_el2 = app_data->mmu_config.ttbrx;
350 	app_reg_ctx->sp_el0 = app_data->stack_top;
351 	app_reg_ctx->pstate = SPSR_EL2_MODE_EL0t |
352 				       SPSR_EL2_nRW_AARCH64 |
353 				       SPSR_EL2_F_BIT |
354 				       SPSR_EL2_I_BIT |
355 				       SPSR_EL2_A_BIT |
356 				       SPSR_EL2_D_BIT;
357 	app_reg_ctx->pc = app_data->entry_point;
358 
359 	unmap_page(app_data->app_reg_ctx_pa, app_reg_ctx);
360 	return 0;
361 }
362 
app_init_data(struct app_data_cfg * app_data,unsigned long app_id,uintptr_t granule_pas[],size_t granule_count,void * granule_va_start)363 int app_init_data(struct app_data_cfg *app_data,
364 		      unsigned long app_id,
365 		      uintptr_t granule_pas[],
366 		      size_t granule_count,
367 		      void *granule_va_start)
368 {
369 	struct app_header *app_header = NULL;
370 	int ret = 0;
371 	/* idx 0 and 1 is used for app_reg_ctx and for page table */;
372 	size_t next_granule_idx = GRANULE_PA_IDX_COUNT;
373 
374 	LOG_APP_FW("Initialising app %lu\n", app_id);
375 
376 	if (app_data == NULL) {
377 		ERROR("%s (%u): app data is NULL\n", __func__, __LINE__);
378 		return -EINVAL;
379 	}
380 
381 	if (app_get_header_ptr(app_id, &app_header) < 0) {
382 		ERROR("%s (%u): failed to get header ptr for app_id %lu:\n",
383 			__func__, __LINE__, app_id);
384 		return -EINVAL;
385 	}
386 
387 	if (granule_count < app_get_required_granule_count(app_id)) {
388 		ERROR("%s (%u): Not enough RW pages: %lu instead of %lu\n",
389 			__func__, __LINE__, granule_count, app_get_required_granule_count(app_id));
390 		return -ENOMEM;
391 	}
392 
393 	/* Initialise the app_data structure */
394 	(void)memset(app_data, 0, sizeof(app_data[0]));
395 
396 	size_t stack_size = app_header->stack_page_count * GRANULE_SIZE;
397 	size_t heap_size = app_header->heap_page_count * GRANULE_SIZE;
398 
399 	LOG_APP_FW("    stack_size = %lu\n", stack_size);
400 	LOG_APP_FW("    heap_size = %lu\n", heap_size);
401 
402 	void *page_table = slot_map_app_pagetable(granule_pas[GRANULE_PA_IDX_APP_PAGE_TABLE]);
403 
404 	ret = init_app_translation(
405 		app_id, app_data, granule_pas[GRANULE_PA_IDX_APP_PAGE_TABLE], page_table);
406 	if (ret != 0) {
407 		goto unmap_page_table;
408 	}
409 
410 	/* Map the app_reg_ctx page to the dedicated transient region */
411 	ret = app_xlat_map(app_data,
412 			  APP_VA_START,
413 			  granule_pas[GRANULE_PA_IDX_APP_REG_CTX],
414 			  XLAT_NG_DATA_ATTR);
415 	if (ret != 0) {
416 		goto unmap_page_table;
417 	}
418 
419 	ret = app_shared_xlat_map(app_data, app_data->el0_shared_page_va,
420 		&next_granule_idx, granule_pas, granule_count);
421 	if (ret != 0) {
422 		goto unmap_page_table;
423 	}
424 	ret = app_stack_xlat_map(app_data, app_data->stack_buf_start_va, stack_size,
425 		&next_granule_idx, granule_pas, granule_count);
426 	if (ret != 0) {
427 		goto unmap_page_table;
428 	}
429 	app_data->stack_top = app_data->stack_buf_start_va + stack_size;
430 
431 	app_data->heap_size = heap_size;
432 	app_data->el2_heap_start = (void *)&(((char *)granule_va_start)[next_granule_idx * GRANULE_SIZE]);
433 	ret = app_heap_xlat_map(app_data, app_data->heap_va, app_data->heap_size,
434 		&next_granule_idx, granule_pas, granule_count);
435 	if (ret != 0) {
436 		goto unmap_page_table;
437 	}
438 
439 	/* Set up register initial values for entering the app */
440 	app_data->entry_point = app_header->section_text_va;
441 
442 	app_data->app_reg_ctx_pa = granule_pas[GRANULE_PA_IDX_APP_REG_CTX];
443 
444 	ret = init_app_reg_ctx(app_data);
445 	if (ret != 0) {
446 		goto unmap_page_table;
447 	}
448 
449 unmap_page_table:
450 	unmap_page(granule_pas[GRANULE_PA_IDX_APP_PAGE_TABLE], page_table);
451 	return ret;
452 }
453 
app_get_heap_ptr(struct app_data_cfg * app_data)454 void *app_get_heap_ptr(struct app_data_cfg *app_data)
455 {
456 	return app_data->el2_heap_start;
457 }
458 
459 /* TODO:
460  * Collect the bss memory addresses allocated by the app rmm stub.
461  * Remove this once RMM memory allocation is sorted out.
462  */
collect_app_bss(void)463 static void collect_app_bss(void)
464 {
465 	int ret __unused;
466 	size_t app_index;
467 
468 	void attest_app_get_bss(uintptr_t *bss_pa, size_t *bss_size);
469 	void random_app_get_bss(uintptr_t *bss_pa, size_t *bss_size);
470 	void dev_assign_app_get_bss(uintptr_t *bss_pa, size_t *bss_size);
471 
472 	ret = app_get_index(ATTESTATION_APP_ID, &app_index);
473 	assert(ret == 0);
474 	attest_app_get_bss(&app_bss_memory_array[app_index].pa,
475 		&app_bss_memory_array[app_index].size);
476 	ret = app_get_index(RMM_RANDOM_APP_ID, &app_index);
477 	assert(ret == 0);
478 	random_app_get_bss(&app_bss_memory_array[app_index].pa,
479 		&app_bss_memory_array[app_index].size);
480 	ret = app_get_index(RMM_DEV_ASSIGN_APP_ID, &app_index);
481 	assert(ret == 0);
482 	dev_assign_app_get_bss(&app_bss_memory_array[app_index].pa,
483 			&app_bss_memory_array[app_index].size);
484 }
485 
app_framework_setup(void)486 void app_framework_setup(void)
487 {
488 	size_t app_index;
489 	struct app_header *app_header;
490 	struct app_id_data *app_id_data;
491 
492 	/* coverity[misra_c_2012_rule_2_2_violation:SUPPRESS] */
493 	collect_app_bss();
494 
495 	/* cppcheck-suppress unsignedLessThanZero
496 	 * As app_index is unsigned, app_index < APP_COUNT cannot be true when
497 	 * APP_COUNT is 0.
498 	 */
499 	/* coverity[no_effect:SUPPRESS] */
500 	/* coverity[misra_c_2012_rule_14_3_violation:SUPPRESS] */
501 	for (app_index = 0; app_index < APP_COUNT; ++app_index) {
502 		/* coverity[deadcode:SUPPRESS] */
503 		/* coverity[misra_c_2012_rule_14_3_violation:SUPPRESS] */
504 		int ret __unused;
505 		uintptr_t bss_pa;
506 
507 		ret = app_get_header_ptr_at_index(app_index, &app_header);
508 		assert(ret == 0);
509 		app_id_data = &app_id_data_array[app_index];
510 
511 		struct xlat_mmap_region region_app_reg_ctx = MAP_REGION_TRANSIENT(
512 					APP_VA_START,
513 					GRANULE_SIZE,
514 					PAGE_SIZE);
515 		app_id_data->mm_regions_array[RMM_APP_APP_REG_CTX_MMAP_IDX] = region_app_reg_ctx;
516 
517 		struct xlat_mmap_region region_text = {
518 			section_start_pa((uintptr_t)app_header, app_header->section_text_offset),
519 			app_header->section_text_va,
520 			app_header->section_text_size,
521 			MT_CODE | MT_REALM | MT_EXEC_UNPRIV | MT_NG,
522 			PAGE_SIZE
523 		};
524 		app_id_data->mm_regions_array[RMM_APP_TEXT_MMAP_IDX] = region_text;
525 
526 		struct xlat_mmap_region region_rodata = {
527 			section_start_pa((uintptr_t)app_header, app_header->section_rodata_offset),
528 			app_header->section_rodata_va,
529 			app_header->section_rodata_size,
530 			MT_RO_DATA | MT_REALM | MT_AP_UNPRIV | MT_NG,
531 			PAGE_SIZE
532 		};
533 		app_id_data->mm_regions_array[RMM_APP_RODATA_MMAP_IDX] = region_rodata;
534 
535 		struct xlat_mmap_region region_data = {
536 			section_start_pa((uintptr_t)app_header, app_header->section_data_offset),
537 			app_header->section_data_va,
538 			app_header->section_data_size,
539 			(MT_RW_DATA | MT_REALM | MT_AP_UNPRIV | MT_NG),
540 			PAGE_SIZE
541 		};
542 		app_id_data->mm_regions_array[RMM_APP_DATA_MMAP_IDX] = region_data;
543 
544 		ret = allocate_bss(app_header->app_id, app_header->section_bss_size, &bss_pa);
545 		if (ret != 0) {
546 			panic();
547 		}
548 		struct xlat_mmap_region region_bss = {
549 			bss_pa,
550 			app_header->section_bss_va,
551 			app_header->section_bss_size,
552 			(MT_RW_DATA | MT_REALM | MT_AP_UNPRIV | MT_NG),
553 			PAGE_SIZE
554 		};
555 		app_id_data->mm_regions_array[RMM_APP_BSS_MMAP_IDX] = region_bss;
556 
557 		/* Pages for sections below are allocated per instantiation of
558 		 * the app.
559 		 */
560 		struct xlat_mmap_region region_shared = MAP_REGION_TRANSIENT(
561 			app_header->section_shared_va,
562 			GRANULE_SIZE,
563 			PAGE_SIZE);
564 		app_id_data->mm_regions_array[RMM_APP_SHARED_IDX] = region_shared;
565 		app_id_data->el0_shared_page_va = region_shared.base_va;
566 
567 		struct xlat_mmap_region region_heap = MAP_REGION_TRANSIENT(
568 			/* Additional granule offset to base_va for heap underflow protection */
569 			region_shared.base_va + region_shared.size + GRANULE_SIZE,
570 			app_header->heap_page_count * GRANULE_SIZE,
571 			PAGE_SIZE);
572 		app_id_data->mm_regions_array[RMM_APP_HEAP_IDX] = region_heap;
573 		app_id_data->heap_va = region_heap.base_va;
574 
575 		struct xlat_mmap_region region_stack = MAP_REGION_TRANSIENT(
576 			/* Additional granule offset to base_va for stack overflow protection */
577 			region_heap.base_va + region_heap.size + GRANULE_SIZE,
578 			app_header->stack_page_count * GRANULE_SIZE,
579 			PAGE_SIZE);
580 		app_id_data->mm_regions_array[RMM_APP_STACK_IDX] = region_stack;
581 		app_id_data->stack_buf_start_va = region_stack.base_va;
582 
583 		/* We are using here the same VA size that is configured for the high va
584 		 * range, so that we can skip setting up other registers than ttbrx_el2
585 		 * for mmu setup.
586 		 */
587 		ret = xlat_ctx_cfg_init(&app_id_data->app_va_xlat_ctx_cfg_base, VA_HIGH_REGION,
588 					app_id_data->mm_regions_array,
589 					RMM_APP_MMAP_REGION_COUNT,
590 					XLAT_HIGH_VA_SIZE,
591 					app_header->app_id);
592 		if (ret != 0) {
593 			panic();
594 		}
595 	}
596 }
597 
encode_heap_data(unsigned long heap_va,size_t heap_size)598 static uint64_t encode_heap_data(unsigned long heap_va, size_t heap_size)
599 {
600 	size_t heap_page_count = heap_size / GRANULE_SIZE;
601 
602 	assert((heap_va & HEAP_VA_MASK) == heap_va);
603 	assert((heap_page_count & HEAP_PAGE_COUNT_MASK) == heap_page_count);
604 	return heap_va | heap_page_count;
605 }
606 
app_run_internal(struct app_data_cfg * app_data,struct app_reg_ctx * app_reg_ctx)607 static void app_run_internal(struct app_data_cfg *app_data,
608 				struct app_reg_ctx *app_reg_ctx)
609 {
610 	unsigned long old_hcr_el2 = read_hcr_el2();
611 	unsigned long old_elr_el2 = read_elr_el2();
612 	unsigned long old_spsr_el2 = read_spsr_el2();
613 
614 	write_hcr_el2(HCR_EL2_INIT);
615 
616 	assert(app_reg_ctx != NULL);
617 
618 	write_elr_el2(app_reg_ctx->pc);
619 	write_spsr_el2(app_reg_ctx->pstate);
620 
621 	assert(!app_data->app_entered);
622 	app_data->app_entered = true;
623 
624 	while (true) {
625 		int app_exception_code;
626 		unsigned long esr;
627 
628 		app_exception_code = run_app(app_reg_ctx,
629 			encode_heap_data(app_data->heap_va, app_data->heap_size));
630 
631 		app_reg_ctx->pc = read_elr_el2();
632 		app_reg_ctx->pstate = read_spsr_el2();
633 
634 		esr = read_esr_el2();
635 
636 		if ((app_exception_code == ARM_EXCEPTION_SYNC_LEL) &&
637 		    ((esr & MASK(ESR_EL2_EC)) == ESR_EL2_EC_SVC)) {
638 			/* EL0 app called SVC as expected
639 			 * In case of SVC, the Low 16 bits contain the imm16
640 			 * value of the SVC instruction executed by the app.
641 			 */
642 			/* TODO: in future an app could be pre-empted by
643 			 * interrupt or there could be other valid exceptions.
644 			 */
645 			uint16_t imm16 = (uint16_t)EXTRACT(ESR_EL2_ISS, esr);
646 
647 			if (imm16 == APP_EXIT_CALL) {
648 				app_data->exit_flag = (uint32_t)APP_EXIT_SVC_EXIT_FLAG;
649 				break;
650 			} else if (imm16 == APP_YIELD_CALL) {
651 				app_data->exit_flag = (uint32_t)APP_EXIT_SVC_YIELD_FLAG;
652 				break;
653 			} else if (imm16 == APP_SERVICE_CALL) {
654 				app_data->exit_flag = (uint32_t)APP_EXIT_SVC_SERVICE_FLAG;
655 				app_reg_ctx->app_regs[0] =
656 					call_app_service(app_reg_ctx->app_regs[0],
657 							 app_data,
658 							 app_reg_ctx->app_regs[1],
659 							 app_reg_ctx->app_regs[2],
660 							 app_reg_ctx->app_regs[3],
661 							 app_reg_ctx->app_regs[4]);
662 				continue;
663 			}
664 		}
665 
666 		unsigned long elr_el2 = read_elr_el2();
667 
668 		ERROR("Failed to return properly from the EL0 app\n");
669 		ERROR("    ELR_EL2 = 0x%lx\n", elr_el2);
670 
671 		panic();
672 	}
673 
674 	assert(app_data->app_entered);
675 	app_data->app_entered = false;
676 
677 	write_hcr_el2(old_hcr_el2);
678 	write_elr_el2(old_elr_el2);
679 	write_spsr_el2(old_spsr_el2);
680 	isb();
681 }
682 
app_run(struct app_data_cfg * app_data,unsigned long app_func_id,unsigned long arg0,unsigned long arg1,unsigned long arg2,unsigned long arg3)683 unsigned long app_run(struct app_data_cfg *app_data,
684 			  unsigned long app_func_id,
685 			  unsigned long arg0,
686 			  unsigned long arg1,
687 			  unsigned long arg2,
688 			  unsigned long arg3)
689 {
690 	/* Special init pattern to detect incorrect use of retval when yielded */
691 	unsigned long retval = 0x0F0F0F0F;
692 	struct app_reg_ctx *app_reg_ctx =
693 		(struct app_reg_ctx *)
694 		slot_map_app_reg_ctx_page(app_data->app_reg_ctx_pa);
695 
696 	assert(app_reg_ctx != NULL);
697 
698 	/* This function should not be called if the EL0 app was yeilded */
699 	assert(app_data->exit_flag != APP_EXIT_SVC_YIELD_FLAG);
700 
701 	app_reg_ctx->app_regs[0] = app_func_id;
702 	app_reg_ctx->app_regs[1] = arg0;
703 	app_reg_ctx->app_regs[2] = arg1;
704 	app_reg_ctx->app_regs[3] = arg2;
705 	app_reg_ctx->app_regs[4] = arg3;
706 
707 	app_run_internal(app_data, app_reg_ctx);
708 
709 	/* Return the value in X0 as EL0 app return value if not yeilded */
710 	if (app_data->exit_flag != APP_EXIT_SVC_YIELD_FLAG) {
711 		retval = app_reg_ctx->app_regs[0];
712 	}
713 
714 	unmap_page(app_data->app_reg_ctx_pa, app_reg_ctx);
715 
716 	return retval;
717 }
718 
719 
app_resume(struct app_data_cfg * app_data)720 unsigned long app_resume(struct app_data_cfg *app_data)
721 {
722 	unsigned long retval = 0xF0F0F0F0U;
723 	struct app_reg_ctx *app_reg_ctx =
724 		(struct app_reg_ctx *)
725 		slot_map_app_reg_ctx_page(app_data->app_reg_ctx_pa);
726 
727 	assert(app_reg_ctx != NULL);
728 
729 	/* This function should only be called if the EL0 app was yeilded */
730 	assert(app_data->exit_flag == APP_EXIT_SVC_YIELD_FLAG);
731 
732 	app_run_internal(app_data, app_reg_ctx);
733 
734 	/* Return the value in X0 as EL0 app return value if not yeilded */
735 	if (app_data->exit_flag != APP_EXIT_SVC_YIELD_FLAG) {
736 		retval = app_reg_ctx->app_regs[0];
737 	}
738 
739 	unmap_page(app_data->app_reg_ctx_pa, app_reg_ctx);
740 
741 	return retval;
742 }
743 
app_abort(struct app_data_cfg * app_data)744 void app_abort(struct app_data_cfg *app_data)
745 {
746 	(void)init_app_reg_ctx(app_data);
747 }
748