1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2015-2022, Linaro Limited
4  */
5 
6 #include <arm.h>
7 #include <assert.h>
8 #include <compiler.h>
9 #include <config.h>
10 #include <console.h>
11 #include <crypto/crypto.h>
12 #include <drivers/gic.h>
13 #include <initcall.h>
14 #include <inttypes.h>
15 #include <keep.h>
16 #include <kernel/asan.h>
17 #include <kernel/boot.h>
18 #include <kernel/linker.h>
19 #include <kernel/misc.h>
20 #include <kernel/panic.h>
21 #include <kernel/tee_misc.h>
22 #include <kernel/thread.h>
23 #include <kernel/tpm.h>
24 #include <libfdt.h>
25 #include <malloc.h>
26 #include <memtag.h>
27 #include <mm/core_memprot.h>
28 #include <mm/core_mmu.h>
29 #include <mm/fobj.h>
30 #include <mm/tee_mm.h>
31 #include <mm/tee_pager.h>
32 #include <sm/psci.h>
33 #include <stdio.h>
34 #include <trace.h>
35 #include <utee_defines.h>
36 #include <util.h>
37 
38 #include <platform_config.h>
39 
40 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
41 #include <sm/sm.h>
42 #endif
43 
44 #if defined(CFG_WITH_VFP)
45 #include <kernel/vfp.h>
46 #endif
47 
48 /*
49  * In this file we're using unsigned long to represent physical pointers as
50  * they are received in a single register when OP-TEE is initially entered.
51  * This limits 32-bit systems to only use make use of the lower 32 bits
52  * of a physical address for initial parameters.
53  *
54  * 64-bit systems on the other hand can use full 64-bit physical pointers.
55  */
56 #define PADDR_INVALID		ULONG_MAX
57 
58 #if defined(CFG_BOOT_SECONDARY_REQUEST)
59 struct ns_entry_context {
60 	uintptr_t entry_point;
61 	uintptr_t context_id;
62 };
63 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
64 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
65 #endif
66 
67 #ifdef CFG_BOOT_SYNC_CPU
68 /*
69  * Array used when booting, to synchronize cpu.
70  * When 0, the cpu has not started.
71  * When 1, it has started
72  */
73 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
74 DECLARE_KEEP_PAGER(sem_cpu_sync);
75 #endif
76 
77 #ifdef CFG_DT
78 struct dt_descriptor {
79 	void *blob;
80 #ifdef _CFG_USE_DTB_OVERLAY
81 	int frag_id;
82 #endif
83 };
84 
85 static struct dt_descriptor external_dt __nex_bss;
86 #endif
87 
88 #ifdef CFG_SECONDARY_INIT_CNTFRQ
89 static uint32_t cntfrq;
90 #endif
91 
92 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_primary_init_early(void)93 __weak void plat_primary_init_early(void)
94 {
95 }
96 DECLARE_KEEP_PAGER(plat_primary_init_early);
97 
98 /* May be overridden in plat-$(PLATFORM)/main.c */
main_init_gic(void)99 __weak void main_init_gic(void)
100 {
101 }
102 
103 /* May be overridden in plat-$(PLATFORM)/main.c */
main_secondary_init_gic(void)104 __weak void main_secondary_init_gic(void)
105 {
106 }
107 
108 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_get_aslr_seed(void)109 __weak unsigned long plat_get_aslr_seed(void)
110 {
111 	DMSG("Warning: no ASLR seed");
112 
113 	return 0;
114 }
115 
116 #if defined(_CFG_CORE_STACK_PROTECTOR)
117 /* Generate random stack canary value on boot up */
plat_get_random_stack_canary(void)118 __weak uintptr_t plat_get_random_stack_canary(void)
119 {
120 	uintptr_t canary = 0xbaaaad00;
121 	TEE_Result ret = TEE_ERROR_GENERIC;
122 
123 	/*
124 	 * With virtualization the RNG is not initialized in Nexus core.
125 	 * Need to override with platform specific implementation.
126 	 */
127 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
128 		IMSG("WARNING: Using fixed value for stack canary");
129 		return canary;
130 	}
131 
132 	ret = crypto_rng_read(&canary, sizeof(canary));
133 	if (ret != TEE_SUCCESS)
134 		panic("Failed to generate random stack canary");
135 
136 	/* Leave null byte in canary to prevent string base exploit */
137 	return canary & ~0xffUL;
138 }
139 #endif /*_CFG_CORE_STACK_PROTECTOR*/
140 
141 /*
142  * This function is called as a guard after each smc call which is not
143  * supposed to return.
144  */
__panic_at_smc_return(void)145 void __panic_at_smc_return(void)
146 {
147 	panic();
148 }
149 
150 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_sec_mon(unsigned long nsec_entry __maybe_unused)151 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
152 {
153 	assert(nsec_entry == PADDR_INVALID);
154 	/* Do nothing as we don't have a secure monitor */
155 }
156 #else
157 /* May be overridden in plat-$(PLATFORM)/main.c */
init_sec_mon(unsigned long nsec_entry)158 __weak void init_sec_mon(unsigned long nsec_entry)
159 {
160 	struct sm_nsec_ctx *nsec_ctx;
161 
162 	assert(nsec_entry != PADDR_INVALID);
163 
164 	/* Initialize secure monitor */
165 	nsec_ctx = sm_get_nsec_ctx();
166 	nsec_ctx->mon_lr = nsec_entry;
167 	nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
168 	if (nsec_entry & 1)
169 		nsec_ctx->mon_spsr |= CPSR_T;
170 }
171 #endif
172 
173 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_vfp_nsec(void)174 static void init_vfp_nsec(void)
175 {
176 }
177 #else
init_vfp_nsec(void)178 static void init_vfp_nsec(void)
179 {
180 	/* Normal world can use CP10 and CP11 (SIMD/VFP) */
181 	write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
182 }
183 #endif
184 
185 #if defined(CFG_WITH_VFP)
186 
187 #ifdef ARM32
init_vfp_sec(void)188 static void init_vfp_sec(void)
189 {
190 	uint32_t cpacr = read_cpacr();
191 
192 	/*
193 	 * Enable Advanced SIMD functionality.
194 	 * Enable use of D16-D31 of the Floating-point Extension register
195 	 * file.
196 	 */
197 	cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
198 	/*
199 	 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
200 	 * mode.
201 	 */
202 	cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
203 	cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
204 	write_cpacr(cpacr);
205 }
206 #endif /* ARM32 */
207 
208 #ifdef ARM64
init_vfp_sec(void)209 static void init_vfp_sec(void)
210 {
211 	/* Not using VFP until thread_kernel_enable_vfp() */
212 	vfp_disable();
213 }
214 #endif /* ARM64 */
215 
216 #else /* CFG_WITH_VFP */
217 
init_vfp_sec(void)218 static void init_vfp_sec(void)
219 {
220 	/* Not using VFP */
221 }
222 #endif
223 
224 #ifdef CFG_SECONDARY_INIT_CNTFRQ
primary_save_cntfrq(void)225 static void primary_save_cntfrq(void)
226 {
227 	assert(cntfrq == 0);
228 
229 	/*
230 	 * CNTFRQ should be initialized on the primary CPU by a
231 	 * previous boot stage
232 	 */
233 	cntfrq = read_cntfrq();
234 }
235 
secondary_init_cntfrq(void)236 static void secondary_init_cntfrq(void)
237 {
238 	assert(cntfrq != 0);
239 	write_cntfrq(cntfrq);
240 }
241 #else /* CFG_SECONDARY_INIT_CNTFRQ */
primary_save_cntfrq(void)242 static void primary_save_cntfrq(void)
243 {
244 }
245 
secondary_init_cntfrq(void)246 static void secondary_init_cntfrq(void)
247 {
248 }
249 #endif
250 
251 #ifdef CFG_CORE_SANITIZE_KADDRESS
init_run_constructors(void)252 static void init_run_constructors(void)
253 {
254 	const vaddr_t *ctor;
255 
256 	for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
257 		((void (*)(void))(*ctor))();
258 }
259 
init_asan(void)260 static void init_asan(void)
261 {
262 
263 	/*
264 	 * CFG_ASAN_SHADOW_OFFSET is also supplied as
265 	 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
266 	 * Since all the needed values to calculate the value of
267 	 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
268 	 * calculate it in advance and hard code it into the platform
269 	 * conf.mk. Here where we have all the needed values we double
270 	 * check that the compiler is supplied the correct value.
271 	 */
272 
273 #define __ASAN_SHADOW_START \
274 	ROUNDUP(TEE_RAM_VA_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
275 	assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
276 #define __CFG_ASAN_SHADOW_OFFSET \
277 	(__ASAN_SHADOW_START - (TEE_RAM_VA_START / 8))
278 	COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
279 #undef __ASAN_SHADOW_START
280 #undef __CFG_ASAN_SHADOW_OFFSET
281 
282 	/*
283 	 * Assign area covered by the shadow area, everything from start up
284 	 * to the beginning of the shadow area.
285 	 */
286 	asan_set_shadowed((void *)TEE_TEXT_VA_START, &__asan_shadow_start);
287 
288 	/*
289 	 * Add access to areas that aren't opened automatically by a
290 	 * constructor.
291 	 */
292 	asan_tag_access(&__ctor_list, &__ctor_end);
293 	asan_tag_access(__rodata_start, __rodata_end);
294 #ifdef CFG_WITH_PAGER
295 	asan_tag_access(__pageable_start, __pageable_end);
296 #endif /*CFG_WITH_PAGER*/
297 	asan_tag_access(__nozi_start, __nozi_end);
298 	asan_tag_access(__exidx_start, __exidx_end);
299 	asan_tag_access(__extab_start, __extab_end);
300 
301 	init_run_constructors();
302 
303 	/* Everything is tagged correctly, let's start address sanitizing. */
304 	asan_start();
305 }
306 #else /*CFG_CORE_SANITIZE_KADDRESS*/
init_asan(void)307 static void init_asan(void)
308 {
309 }
310 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
311 
312 #if defined(CFG_MEMTAG)
313 /* Called from entry_a64.S only when MEMTAG is configured */
boot_init_memtag(void)314 void boot_init_memtag(void)
315 {
316 	memtag_init_ops(feat_mte_implemented());
317 	memtag_set_tags((void *)TEE_RAM_START, TEE_RAM_PH_SIZE, 0);
318 }
319 #endif
320 
321 #ifdef CFG_WITH_PAGER
322 
323 #ifdef CFG_CORE_SANITIZE_KADDRESS
carve_out_asan_mem(tee_mm_pool_t * pool)324 static void carve_out_asan_mem(tee_mm_pool_t *pool)
325 {
326 	const size_t s = pool->hi - pool->lo;
327 	tee_mm_entry_t *mm;
328 	paddr_t apa = ASAN_MAP_PA;
329 	size_t asz = ASAN_MAP_SZ;
330 
331 	if (core_is_buffer_outside(apa, asz, pool->lo, s))
332 		return;
333 
334 	/* Reserve the shadow area */
335 	if (!core_is_buffer_inside(apa, asz, pool->lo, s)) {
336 		if (apa < pool->lo) {
337 			/*
338 			 * ASAN buffer is overlapping with the beginning of
339 			 * the pool.
340 			 */
341 			asz -= pool->lo - apa;
342 			apa = pool->lo;
343 		} else {
344 			/*
345 			 * ASAN buffer is overlapping with the end of the
346 			 * pool.
347 			 */
348 			asz = pool->hi - apa;
349 		}
350 	}
351 	mm = tee_mm_alloc2(pool, apa, asz);
352 	assert(mm);
353 }
354 #else
carve_out_asan_mem(tee_mm_pool_t * pool __unused)355 static void carve_out_asan_mem(tee_mm_pool_t *pool __unused)
356 {
357 }
358 #endif
359 
print_pager_pool_size(void)360 static void print_pager_pool_size(void)
361 {
362 	struct tee_pager_stats __maybe_unused stats;
363 
364 	tee_pager_get_stats(&stats);
365 	IMSG("Pager pool size: %zukB",
366 		stats.npages_all * SMALL_PAGE_SIZE / 1024);
367 }
368 
init_vcore(tee_mm_pool_t * mm_vcore)369 static void init_vcore(tee_mm_pool_t *mm_vcore)
370 {
371 	const vaddr_t begin = VCORE_START_VA;
372 	size_t size = TEE_RAM_VA_SIZE;
373 
374 #ifdef CFG_CORE_SANITIZE_KADDRESS
375 	/* Carve out asan memory, flat maped after core memory */
376 	if (begin + size > ASAN_SHADOW_PA)
377 		size = ASAN_MAP_PA - begin;
378 #endif
379 
380 	if (!tee_mm_init(mm_vcore, begin, size, SMALL_PAGE_SHIFT,
381 			 TEE_MM_POOL_NO_FLAGS))
382 		panic("tee_mm_vcore init failed");
383 }
384 
385 /*
386  * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
387  * The init part is also paged just as the rest of the normal paged code, with
388  * the difference that it's preloaded during boot. When the backing store
389  * is configured the entire paged binary is copied in place and then also
390  * the init part. Since the init part has been relocated (references to
391  * addresses updated to compensate for the new load address) this has to be
392  * undone for the hashes of those pages to match with the original binary.
393  *
394  * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
395  * unchanged.
396  */
undo_init_relocation(uint8_t * paged_store __maybe_unused)397 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
398 {
399 #ifdef CFG_CORE_ASLR
400 	unsigned long *ptr = NULL;
401 	const uint32_t *reloc = NULL;
402 	const uint32_t *reloc_end = NULL;
403 	unsigned long offs = boot_mmu_config.load_offset;
404 	const struct boot_embdata *embdata = (const void *)__init_end;
405 	vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_RAM_START;
406 	vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_RAM_START;
407 
408 	reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
409 	reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
410 
411 	for (; reloc < reloc_end; reloc++) {
412 		if (*reloc < addr_start)
413 			continue;
414 		if (*reloc >= addr_end)
415 			break;
416 		ptr = (void *)(paged_store + *reloc - addr_start);
417 		*ptr -= offs;
418 	}
419 #endif
420 }
421 
ro_paged_alloc(tee_mm_entry_t * mm,void * hashes,void * store)422 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
423 				   void *store)
424 {
425 	const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
426 #ifdef CFG_CORE_ASLR
427 	unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
428 	const struct boot_embdata *embdata = (const void *)__init_end;
429 	const void *reloc = __init_end + embdata->reloc_offset;
430 
431 	return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
432 					 reloc, embdata->reloc_len, store);
433 #else
434 	return fobj_ro_paged_alloc(num_pages, hashes, store);
435 #endif
436 }
437 
init_runtime(unsigned long pageable_part)438 static void init_runtime(unsigned long pageable_part)
439 {
440 	size_t n;
441 	size_t init_size = (size_t)(__init_end - __init_start);
442 	size_t pageable_start = (size_t)__pageable_start;
443 	size_t pageable_end = (size_t)__pageable_end;
444 	size_t pageable_size = pageable_end - pageable_start;
445 	vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
446 			     VCORE_START_VA;
447 	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
448 			   TEE_SHA256_HASH_SIZE;
449 	const struct boot_embdata *embdata = (const void *)__init_end;
450 	const void *tmp_hashes = NULL;
451 	tee_mm_entry_t *mm = NULL;
452 	struct fobj *fobj = NULL;
453 	uint8_t *paged_store = NULL;
454 	uint8_t *hashes = NULL;
455 
456 	assert(pageable_size % SMALL_PAGE_SIZE == 0);
457 	assert(embdata->total_len >= embdata->hashes_offset +
458 				     embdata->hashes_len);
459 	assert(hash_size == embdata->hashes_len);
460 
461 	tmp_hashes = __init_end + embdata->hashes_offset;
462 
463 	init_asan();
464 
465 	/* Add heap2 first as heap1 may be too small as initial bget pool */
466 	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
467 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
468 
469 	/*
470 	 * This needs to be initialized early to support address lookup
471 	 * in MEM_AREA_TEE_RAM
472 	 */
473 	tee_pager_early_init();
474 
475 	hashes = malloc(hash_size);
476 	IMSG_RAW("\n");
477 	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
478 	assert(hashes);
479 	asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
480 
481 	/*
482 	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
483 	 * DDR below.
484 	 */
485 	core_mmu_init_ta_ram();
486 
487 	carve_out_asan_mem(&tee_mm_sec_ddr);
488 
489 	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
490 	assert(mm);
491 	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM,
492 				   pageable_size);
493 	/*
494 	 * Load pageable part in the dedicated allocated area:
495 	 * - Move pageable non-init part into pageable area. Note bootloader
496 	 *   may have loaded it anywhere in TA RAM hence use memmove().
497 	 * - Copy pageable init part from current location into pageable area.
498 	 */
499 	memmove(paged_store + init_size,
500 		phys_to_virt(pageable_part,
501 			     core_mmu_get_type_by_pa(pageable_part),
502 			     __pageable_part_end - __pageable_part_start),
503 		__pageable_part_end - __pageable_part_start);
504 	asan_memcpy_unchecked(paged_store, __init_start, init_size);
505 	/*
506 	 * Undo eventual relocation for the init part so the hash checks
507 	 * can pass.
508 	 */
509 	undo_init_relocation(paged_store);
510 
511 	/* Check that hashes of what's in pageable area is OK */
512 	DMSG("Checking hashes of pageable area");
513 	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
514 		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
515 		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
516 		TEE_Result res;
517 
518 		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
519 		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
520 		if (res != TEE_SUCCESS) {
521 			EMSG("Hash failed for page %zu at %p: res 0x%x",
522 			     n, (void *)page, res);
523 			panic();
524 		}
525 	}
526 
527 	/*
528 	 * Assert prepaged init sections are page aligned so that nothing
529 	 * trails uninited at the end of the premapped init area.
530 	 */
531 	assert(!(init_size & SMALL_PAGE_MASK));
532 
533 	/*
534 	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
535 	 * is supplied to tee_pager_init() below.
536 	 */
537 	init_vcore(&tee_mm_vcore);
538 
539 	/*
540 	 * Assign alias area for pager end of the small page block the rest
541 	 * of the binary is loaded into. We're taking more than needed, but
542 	 * we're guaranteed to not need more than the physical amount of
543 	 * TZSRAM.
544 	 */
545 	mm = tee_mm_alloc2(&tee_mm_vcore,
546 			   (vaddr_t)tee_mm_vcore.lo +
547 			   tee_mm_vcore.size - TZSRAM_SIZE,
548 			   TZSRAM_SIZE);
549 	assert(mm);
550 	tee_pager_set_alias_area(mm);
551 
552 	/*
553 	 * Claim virtual memory which isn't paged.
554 	 * Linear memory (flat map core memory) ends there.
555 	 */
556 	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
557 			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
558 	assert(mm);
559 
560 	/*
561 	 * Allocate virtual memory for the pageable area and let the pager
562 	 * take charge of all the pages already assigned to that memory.
563 	 */
564 	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
565 			   pageable_size);
566 	assert(mm);
567 	fobj = ro_paged_alloc(mm, hashes, paged_store);
568 	assert(fobj);
569 	tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
570 				  fobj);
571 	fobj_put(fobj);
572 
573 	tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
574 	tee_pager_add_pages(pageable_start + init_size,
575 			    (pageable_size - init_size) / SMALL_PAGE_SIZE,
576 			    true);
577 	if (pageable_end < tzsram_end)
578 		tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
579 						   SMALL_PAGE_SIZE, true);
580 
581 	/*
582 	 * There may be physical pages in TZSRAM before the core load address.
583 	 * These pages can be added to the physical pages pool of the pager.
584 	 * This setup may happen when a the secure bootloader runs in TZRAM
585 	 * and its memory can be reused by OP-TEE once boot stages complete.
586 	 */
587 	tee_pager_add_pages(tee_mm_vcore.lo,
588 			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
589 			true);
590 
591 	print_pager_pool_size();
592 }
593 #else
594 
init_runtime(unsigned long pageable_part __unused)595 static void init_runtime(unsigned long pageable_part __unused)
596 {
597 	init_asan();
598 
599 	/*
600 	 * By default whole OP-TEE uses malloc, so we need to initialize
601 	 * it early. But, when virtualization is enabled, malloc is used
602 	 * only by TEE runtime, so malloc should be initialized later, for
603 	 * every virtual partition separately. Core code uses nex_malloc
604 	 * instead.
605 	 */
606 #ifdef CFG_VIRTUALIZATION
607 	nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
608 					      __nex_heap_start);
609 #else
610 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
611 #endif
612 
613 	IMSG_RAW("\n");
614 }
615 #endif
616 
get_dt(void)617 void *get_dt(void)
618 {
619 	void *fdt = get_embedded_dt();
620 
621 	if (!fdt)
622 		fdt = get_external_dt();
623 
624 	return fdt;
625 }
626 
get_secure_dt(void)627 void *get_secure_dt(void)
628 {
629 	void *fdt = get_embedded_dt();
630 
631 	if (!fdt && IS_ENABLED(CFG_MAP_EXT_DT_SECURE))
632 		fdt = get_external_dt();
633 
634 	return fdt;
635 }
636 
637 #if defined(CFG_EMBED_DTB)
get_embedded_dt(void)638 void *get_embedded_dt(void)
639 {
640 	static bool checked;
641 
642 	assert(cpu_mmu_enabled());
643 
644 	if (!checked) {
645 		IMSG("Embedded DTB found");
646 
647 		if (fdt_check_header(embedded_secure_dtb))
648 			panic("Invalid embedded DTB");
649 
650 		checked = true;
651 	}
652 
653 	return embedded_secure_dtb;
654 }
655 #else
get_embedded_dt(void)656 void *get_embedded_dt(void)
657 {
658 	return NULL;
659 }
660 #endif /*CFG_EMBED_DTB*/
661 
662 #if defined(CFG_DT)
get_external_dt(void)663 void *get_external_dt(void)
664 {
665 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
666 		return NULL;
667 
668 	assert(cpu_mmu_enabled());
669 	return external_dt.blob;
670 }
671 
release_external_dt(void)672 static TEE_Result release_external_dt(void)
673 {
674 	int ret = 0;
675 
676 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
677 		return TEE_SUCCESS;
678 
679 	if (!external_dt.blob)
680 		return TEE_SUCCESS;
681 
682 	ret = fdt_pack(external_dt.blob);
683 	if (ret < 0) {
684 		EMSG("Failed to pack Device Tree at 0x%" PRIxPA ": error %d",
685 		     virt_to_phys(external_dt.blob), ret);
686 		panic();
687 	}
688 
689 	if (core_mmu_remove_mapping(MEM_AREA_EXT_DT, external_dt.blob,
690 				    CFG_DTB_MAX_SIZE))
691 		panic("Failed to remove temporary Device Tree mapping");
692 
693 	/* External DTB no more reached, reset pointer to invalid */
694 	external_dt.blob = NULL;
695 
696 	return TEE_SUCCESS;
697 }
698 boot_final(release_external_dt);
699 
700 #ifdef _CFG_USE_DTB_OVERLAY
add_dt_overlay_fragment(struct dt_descriptor * dt,int ioffs)701 static int add_dt_overlay_fragment(struct dt_descriptor *dt, int ioffs)
702 {
703 	char frag[32];
704 	int offs;
705 	int ret;
706 
707 	snprintf(frag, sizeof(frag), "fragment@%d", dt->frag_id);
708 	offs = fdt_add_subnode(dt->blob, ioffs, frag);
709 	if (offs < 0)
710 		return offs;
711 
712 	dt->frag_id += 1;
713 
714 	ret = fdt_setprop_string(dt->blob, offs, "target-path", "/");
715 	if (ret < 0)
716 		return -1;
717 
718 	return fdt_add_subnode(dt->blob, offs, "__overlay__");
719 }
720 
init_dt_overlay(struct dt_descriptor * dt,int __maybe_unused dt_size)721 static int init_dt_overlay(struct dt_descriptor *dt, int __maybe_unused dt_size)
722 {
723 	int fragment;
724 
725 	if (IS_ENABLED(CFG_EXTERNAL_DTB_OVERLAY)) {
726 		if (!fdt_check_header(dt->blob)) {
727 			fdt_for_each_subnode(fragment, dt->blob, 0)
728 				dt->frag_id += 1;
729 			return 0;
730 		}
731 	}
732 
733 	return fdt_create_empty_tree(dt->blob, dt_size);
734 }
735 #else
add_dt_overlay_fragment(struct dt_descriptor * dt __unused,int offs)736 static int add_dt_overlay_fragment(struct dt_descriptor *dt __unused, int offs)
737 {
738 	return offs;
739 }
740 
init_dt_overlay(struct dt_descriptor * dt __unused,int dt_size __unused)741 static int init_dt_overlay(struct dt_descriptor *dt __unused,
742 			   int dt_size __unused)
743 {
744 	return 0;
745 }
746 #endif /* _CFG_USE_DTB_OVERLAY */
747 
add_dt_path_subnode(struct dt_descriptor * dt,const char * path,const char * subnode)748 static int add_dt_path_subnode(struct dt_descriptor *dt, const char *path,
749 			       const char *subnode)
750 {
751 	int offs;
752 
753 	offs = fdt_path_offset(dt->blob, path);
754 	if (offs < 0)
755 		return -1;
756 	offs = add_dt_overlay_fragment(dt, offs);
757 	if (offs < 0)
758 		return -1;
759 	offs = fdt_add_subnode(dt->blob, offs, subnode);
760 	if (offs < 0)
761 		return -1;
762 	return offs;
763 }
764 
add_optee_dt_node(struct dt_descriptor * dt)765 static int add_optee_dt_node(struct dt_descriptor *dt)
766 {
767 	int offs;
768 	int ret;
769 
770 	if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
771 		DMSG("OP-TEE Device Tree node already exists!");
772 		return 0;
773 	}
774 
775 	offs = fdt_path_offset(dt->blob, "/firmware");
776 	if (offs < 0) {
777 		offs = add_dt_path_subnode(dt, "/", "firmware");
778 		if (offs < 0)
779 			return -1;
780 	}
781 
782 	offs = fdt_add_subnode(dt->blob, offs, "optee");
783 	if (offs < 0)
784 		return -1;
785 
786 	ret = fdt_setprop_string(dt->blob, offs, "compatible",
787 				 "linaro,optee-tz");
788 	if (ret < 0)
789 		return -1;
790 	ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
791 	if (ret < 0)
792 		return -1;
793 	if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
794 		/*
795 		 * The format of the interrupt property is defined by the
796 		 * binding of the interrupt domain root. In this case it's
797 		 * one Arm GIC v1, v2 or v3 so we must be compatible with
798 		 * these.
799 		 *
800 		 * An SPI type of interrupt is indicated with a 0 in the
801 		 * first cell.
802 		 *
803 		 * The interrupt number goes in the second cell where
804 		 * SPIs ranges from 0 to 987.
805 		 *
806 		 * Flags are passed in the third cell where a 1 means edge
807 		 * triggered.
808 		 */
809 		const uint32_t gic_spi = 0;
810 		const uint32_t irq_type_edge = 1;
811 		uint32_t val[] = {
812 			TEE_U32_TO_BIG_ENDIAN(gic_spi),
813 			TEE_U32_TO_BIG_ENDIAN(CFG_CORE_ASYNC_NOTIF_GIC_INTID -
814 					      GIC_SPI_BASE),
815 			TEE_U32_TO_BIG_ENDIAN(irq_type_edge),
816 		};
817 
818 		ret = fdt_setprop(dt->blob, offs, "interrupts", val,
819 				  sizeof(val));
820 		if (ret < 0)
821 			return -1;
822 	}
823 	return 0;
824 }
825 
826 #ifdef CFG_PSCI_ARM32
append_psci_compatible(void * fdt,int offs,const char * str)827 static int append_psci_compatible(void *fdt, int offs, const char *str)
828 {
829 	return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
830 }
831 
dt_add_psci_node(struct dt_descriptor * dt)832 static int dt_add_psci_node(struct dt_descriptor *dt)
833 {
834 	int offs;
835 
836 	if (fdt_path_offset(dt->blob, "/psci") >= 0) {
837 		DMSG("PSCI Device Tree node already exists!");
838 		return 0;
839 	}
840 
841 	offs = add_dt_path_subnode(dt, "/", "psci");
842 	if (offs < 0)
843 		return -1;
844 	if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
845 		return -1;
846 	if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
847 		return -1;
848 	if (append_psci_compatible(dt->blob, offs, "arm,psci"))
849 		return -1;
850 	if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
851 		return -1;
852 	if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
853 		return -1;
854 	if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
855 		return -1;
856 	if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
857 		return -1;
858 	if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
859 		return -1;
860 	if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
861 		return -1;
862 	return 0;
863 }
864 
check_node_compat_prefix(struct dt_descriptor * dt,int offs,const char * prefix)865 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
866 				    const char *prefix)
867 {
868 	const size_t prefix_len = strlen(prefix);
869 	size_t l;
870 	int plen;
871 	const char *prop;
872 
873 	prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
874 	if (!prop)
875 		return -1;
876 
877 	while (plen > 0) {
878 		if (memcmp(prop, prefix, prefix_len) == 0)
879 			return 0; /* match */
880 
881 		l = strlen(prop) + 1;
882 		prop += l;
883 		plen -= l;
884 	}
885 
886 	return -1;
887 }
888 
dt_add_psci_cpu_enable_methods(struct dt_descriptor * dt)889 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
890 {
891 	int offs = 0;
892 
893 	while (1) {
894 		offs = fdt_next_node(dt->blob, offs, NULL);
895 		if (offs < 0)
896 			break;
897 		if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
898 			continue; /* already set */
899 		if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
900 			continue; /* no compatible */
901 		if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
902 			return -1;
903 		/* Need to restart scanning as offsets may have changed */
904 		offs = 0;
905 	}
906 	return 0;
907 }
908 
config_psci(struct dt_descriptor * dt)909 static int config_psci(struct dt_descriptor *dt)
910 {
911 	if (dt_add_psci_node(dt))
912 		return -1;
913 	return dt_add_psci_cpu_enable_methods(dt);
914 }
915 #else
config_psci(struct dt_descriptor * dt __unused)916 static int config_psci(struct dt_descriptor *dt __unused)
917 {
918 	return 0;
919 }
920 #endif /*CFG_PSCI_ARM32*/
921 
set_dt_val(void * data,uint32_t cell_size,uint64_t val)922 static void set_dt_val(void *data, uint32_t cell_size, uint64_t val)
923 {
924 	if (cell_size == 1) {
925 		fdt32_t v = cpu_to_fdt32((uint32_t)val);
926 
927 		memcpy(data, &v, sizeof(v));
928 	} else {
929 		fdt64_t v = cpu_to_fdt64(val);
930 
931 		memcpy(data, &v, sizeof(v));
932 	}
933 }
934 
add_res_mem_dt_node(struct dt_descriptor * dt,const char * name,paddr_t pa,size_t size)935 static int add_res_mem_dt_node(struct dt_descriptor *dt, const char *name,
936 			       paddr_t pa, size_t size)
937 {
938 	int offs = 0;
939 	int ret = 0;
940 	int addr_size = -1;
941 	int len_size = -1;
942 	bool found = true;
943 	char subnode_name[80] = { 0 };
944 
945 	offs = fdt_path_offset(dt->blob, "/reserved-memory");
946 
947 	if (offs < 0) {
948 		found = false;
949 		offs = 0;
950 	}
951 
952 	if (IS_ENABLED2(_CFG_USE_DTB_OVERLAY)) {
953 		len_size = sizeof(paddr_t) / sizeof(uint32_t);
954 		addr_size = sizeof(paddr_t) / sizeof(uint32_t);
955 	} else {
956 		len_size = fdt_size_cells(dt->blob, offs);
957 		if (len_size < 0)
958 			return -1;
959 		addr_size = fdt_address_cells(dt->blob, offs);
960 		if (addr_size < 0)
961 			return -1;
962 	}
963 
964 	if (!found) {
965 		offs = add_dt_path_subnode(dt, "/", "reserved-memory");
966 		if (offs < 0)
967 			return -1;
968 		ret = fdt_setprop_cell(dt->blob, offs, "#address-cells",
969 				       addr_size);
970 		if (ret < 0)
971 			return -1;
972 		ret = fdt_setprop_cell(dt->blob, offs, "#size-cells", len_size);
973 		if (ret < 0)
974 			return -1;
975 		ret = fdt_setprop(dt->blob, offs, "ranges", NULL, 0);
976 		if (ret < 0)
977 			return -1;
978 	}
979 
980 	ret = snprintf(subnode_name, sizeof(subnode_name),
981 		       "%s@%" PRIxPA, name, pa);
982 	if (ret < 0 || ret >= (int)sizeof(subnode_name))
983 		DMSG("truncated node \"%s@%" PRIxPA"\"", name, pa);
984 	offs = fdt_add_subnode(dt->blob, offs, subnode_name);
985 	if (offs >= 0) {
986 		uint32_t data[FDT_MAX_NCELLS * 2];
987 
988 		set_dt_val(data, addr_size, pa);
989 		set_dt_val(data + addr_size, len_size, size);
990 		ret = fdt_setprop(dt->blob, offs, "reg", data,
991 				  sizeof(uint32_t) * (addr_size + len_size));
992 		if (ret < 0)
993 			return -1;
994 		ret = fdt_setprop(dt->blob, offs, "no-map", NULL, 0);
995 		if (ret < 0)
996 			return -1;
997 	} else {
998 		return -1;
999 	}
1000 	return 0;
1001 }
1002 
1003 #ifdef CFG_CORE_DYN_SHM
get_dt_val_and_advance(const void * data,size_t * offs,uint32_t cell_size)1004 static uint64_t get_dt_val_and_advance(const void *data, size_t *offs,
1005 				       uint32_t cell_size)
1006 {
1007 	uint64_t rv = 0;
1008 
1009 	if (cell_size == 1) {
1010 		uint32_t v;
1011 
1012 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1013 		*offs += sizeof(v);
1014 		rv = fdt32_to_cpu(v);
1015 	} else {
1016 		uint64_t v;
1017 
1018 		memcpy(&v, (const uint8_t *)data + *offs, sizeof(v));
1019 		*offs += sizeof(v);
1020 		rv = fdt64_to_cpu(v);
1021 	}
1022 
1023 	return rv;
1024 }
1025 
1026 /*
1027  * Find all non-secure memory from DT. Memory marked inaccessible by Secure
1028  * World is ignored since it could not be mapped to be used as dynamic shared
1029  * memory.
1030  */
get_nsec_memory_helper(void * fdt,struct core_mmu_phys_mem * mem)1031 static int get_nsec_memory_helper(void *fdt, struct core_mmu_phys_mem *mem)
1032 {
1033 	const uint8_t *prop = NULL;
1034 	uint64_t a = 0;
1035 	uint64_t l = 0;
1036 	size_t prop_offs = 0;
1037 	size_t prop_len = 0;
1038 	int elems_total = 0;
1039 	int addr_size = 0;
1040 	int len_size = 0;
1041 	int offs = 0;
1042 	size_t n = 0;
1043 	int len = 0;
1044 
1045 	addr_size = fdt_address_cells(fdt, 0);
1046 	if (addr_size < 0)
1047 		return 0;
1048 
1049 	len_size = fdt_size_cells(fdt, 0);
1050 	if (len_size < 0)
1051 		return 0;
1052 
1053 	while (true) {
1054 		offs = fdt_node_offset_by_prop_value(fdt, offs, "device_type",
1055 						     "memory",
1056 						     sizeof("memory"));
1057 		if (offs < 0)
1058 			break;
1059 
1060 		if (_fdt_get_status(fdt, offs) != (DT_STATUS_OK_NSEC |
1061 						   DT_STATUS_OK_SEC))
1062 			continue;
1063 
1064 		prop = fdt_getprop(fdt, offs, "reg", &len);
1065 		if (!prop)
1066 			continue;
1067 
1068 		prop_len = len;
1069 		for (n = 0, prop_offs = 0; prop_offs < prop_len; n++) {
1070 			a = get_dt_val_and_advance(prop, &prop_offs, addr_size);
1071 			if (prop_offs >= prop_len) {
1072 				n--;
1073 				break;
1074 			}
1075 
1076 			l = get_dt_val_and_advance(prop, &prop_offs, len_size);
1077 			if (mem) {
1078 				mem->type = MEM_AREA_DDR_OVERALL;
1079 				mem->addr = a;
1080 				mem->size = l;
1081 				mem++;
1082 			}
1083 		}
1084 
1085 		elems_total += n;
1086 	}
1087 
1088 	return elems_total;
1089 }
1090 
get_nsec_memory(void * fdt,size_t * nelems)1091 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt, size_t *nelems)
1092 {
1093 	struct core_mmu_phys_mem *mem = NULL;
1094 	int elems_total = 0;
1095 
1096 	elems_total = get_nsec_memory_helper(fdt, NULL);
1097 	if (elems_total <= 0)
1098 		return NULL;
1099 
1100 	mem = nex_calloc(elems_total, sizeof(*mem));
1101 	if (!mem)
1102 		panic();
1103 
1104 	elems_total = get_nsec_memory_helper(fdt, mem);
1105 	assert(elems_total > 0);
1106 
1107 	*nelems = elems_total;
1108 
1109 	return mem;
1110 }
1111 #endif /*CFG_CORE_DYN_SHM*/
1112 
1113 #ifdef CFG_CORE_RESERVED_SHM
mark_static_shm_as_reserved(struct dt_descriptor * dt)1114 static int mark_static_shm_as_reserved(struct dt_descriptor *dt)
1115 {
1116 	vaddr_t shm_start;
1117 	vaddr_t shm_end;
1118 
1119 	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &shm_start, &shm_end);
1120 	if (shm_start != shm_end)
1121 		return add_res_mem_dt_node(dt, "optee_shm",
1122 					   virt_to_phys((void *)shm_start),
1123 					   shm_end - shm_start);
1124 
1125 	DMSG("No SHM configured");
1126 	return -1;
1127 }
1128 #endif /*CFG_CORE_RESERVED_SHM*/
1129 
init_external_dt(unsigned long phys_dt)1130 static void init_external_dt(unsigned long phys_dt)
1131 {
1132 	struct dt_descriptor *dt = &external_dt;
1133 	void *fdt;
1134 	int ret;
1135 
1136 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1137 		return;
1138 
1139 	if (!phys_dt) {
1140 		/*
1141 		 * No need to panic as we're not using the DT in OP-TEE
1142 		 * yet, we're only adding some nodes for normal world use.
1143 		 * This makes the switch to using DT easier as we can boot
1144 		 * a newer OP-TEE with older boot loaders. Once we start to
1145 		 * initialize devices based on DT we'll likely panic
1146 		 * instead of returning here.
1147 		 */
1148 		IMSG("No non-secure external DT");
1149 		return;
1150 	}
1151 
1152 	fdt = core_mmu_add_mapping(MEM_AREA_EXT_DT, phys_dt, CFG_DTB_MAX_SIZE);
1153 	if (!fdt)
1154 		panic("Failed to map external DTB");
1155 
1156 	dt->blob = fdt;
1157 
1158 	ret = init_dt_overlay(dt, CFG_DTB_MAX_SIZE);
1159 	if (ret < 0) {
1160 		EMSG("Device Tree Overlay init fail @ %#lx: error %d", phys_dt,
1161 		     ret);
1162 		panic();
1163 	}
1164 
1165 	ret = fdt_open_into(fdt, fdt, CFG_DTB_MAX_SIZE);
1166 	if (ret < 0) {
1167 		EMSG("Invalid Device Tree at %#lx: error %d", phys_dt, ret);
1168 		panic();
1169 	}
1170 
1171 	IMSG("Non-secure external DT found");
1172 }
1173 
mark_tzdram_as_reserved(struct dt_descriptor * dt)1174 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
1175 {
1176 	return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
1177 				   CFG_TZDRAM_SIZE);
1178 }
1179 
update_external_dt(void)1180 static void update_external_dt(void)
1181 {
1182 	struct dt_descriptor *dt = &external_dt;
1183 
1184 	if (!IS_ENABLED(CFG_EXTERNAL_DT))
1185 		return;
1186 
1187 	if (!dt->blob)
1188 		return;
1189 
1190 	if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
1191 		panic("Failed to add OP-TEE Device Tree node");
1192 
1193 	if (config_psci(dt))
1194 		panic("Failed to config PSCI");
1195 
1196 #ifdef CFG_CORE_RESERVED_SHM
1197 	if (mark_static_shm_as_reserved(dt))
1198 		panic("Failed to config non-secure memory");
1199 #endif
1200 
1201 	if (mark_tzdram_as_reserved(dt))
1202 		panic("Failed to config secure memory");
1203 }
1204 #else /*CFG_DT*/
get_external_dt(void)1205 void *get_external_dt(void)
1206 {
1207 	return NULL;
1208 }
1209 
init_external_dt(unsigned long phys_dt __unused)1210 static void init_external_dt(unsigned long phys_dt __unused)
1211 {
1212 }
1213 
update_external_dt(void)1214 static void update_external_dt(void)
1215 {
1216 }
1217 
1218 #ifdef CFG_CORE_DYN_SHM
get_nsec_memory(void * fdt __unused,size_t * nelems __unused)1219 static struct core_mmu_phys_mem *get_nsec_memory(void *fdt __unused,
1220 						 size_t *nelems __unused)
1221 {
1222 	return NULL;
1223 }
1224 #endif /*CFG_CORE_DYN_SHM*/
1225 #endif /*!CFG_DT*/
1226 
1227 #ifdef CFG_CORE_DYN_SHM
discover_nsec_memory(void)1228 static void discover_nsec_memory(void)
1229 {
1230 	struct core_mmu_phys_mem *mem;
1231 	const struct core_mmu_phys_mem *mem_begin = NULL;
1232 	const struct core_mmu_phys_mem *mem_end = NULL;
1233 	size_t nelems;
1234 	void *fdt = get_external_dt();
1235 
1236 	if (fdt) {
1237 		mem = get_nsec_memory(fdt, &nelems);
1238 		if (mem) {
1239 			core_mmu_set_discovered_nsec_ddr(mem, nelems);
1240 			return;
1241 		}
1242 
1243 		DMSG("No non-secure memory found in FDT");
1244 	}
1245 
1246 	mem_begin = phys_ddr_overall_begin;
1247 	mem_end = phys_ddr_overall_end;
1248 	nelems = mem_end - mem_begin;
1249 	if (nelems) {
1250 		/*
1251 		 * Platform cannot use both register_ddr() and the now
1252 		 * deprecated register_dynamic_shm().
1253 		 */
1254 		assert(phys_ddr_overall_compat_begin ==
1255 		       phys_ddr_overall_compat_end);
1256 	} else {
1257 		mem_begin = phys_ddr_overall_compat_begin;
1258 		mem_end = phys_ddr_overall_compat_end;
1259 		nelems = mem_end - mem_begin;
1260 		if (!nelems)
1261 			return;
1262 		DMSG("Warning register_dynamic_shm() is deprecated, please use register_ddr() instead");
1263 	}
1264 
1265 	mem = nex_calloc(nelems, sizeof(*mem));
1266 	if (!mem)
1267 		panic();
1268 
1269 	memcpy(mem, phys_ddr_overall_begin, sizeof(*mem) * nelems);
1270 	core_mmu_set_discovered_nsec_ddr(mem, nelems);
1271 }
1272 #else /*CFG_CORE_DYN_SHM*/
discover_nsec_memory(void)1273 static void discover_nsec_memory(void)
1274 {
1275 }
1276 #endif /*!CFG_CORE_DYN_SHM*/
1277 
1278 #ifdef CFG_VIRTUALIZATION
virt_init_heap(void)1279 static TEE_Result virt_init_heap(void)
1280 {
1281 	/* We need to initialize pool for every virtual guest partition */
1282 	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
1283 
1284 	return TEE_SUCCESS;
1285 }
1286 preinit_early(virt_init_heap);
1287 #endif
1288 
init_tee_runtime(void)1289 void init_tee_runtime(void)
1290 {
1291 #ifndef CFG_WITH_PAGER
1292 	/* Pager initializes TA RAM early */
1293 	core_mmu_init_ta_ram();
1294 #endif
1295 	/*
1296 	 * With virtualization we call this function when creating the
1297 	 * OP-TEE partition instead.
1298 	 */
1299 	if (!IS_ENABLED(CFG_VIRTUALIZATION))
1300 		call_preinitcalls();
1301 	call_initcalls();
1302 
1303 	/*
1304 	 * These two functions uses crypto_rng_read() to initialize the
1305 	 * pauth keys. Once call_initcalls() returns we're guaranteed that
1306 	 * crypto_rng_read() is ready to be used.
1307 	 */
1308 	thread_init_core_local_pauth_keys();
1309 	thread_init_thread_pauth_keys();
1310 }
1311 
init_primary(unsigned long pageable_part,unsigned long nsec_entry)1312 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
1313 {
1314 	thread_init_core_local_stacks();
1315 	/*
1316 	 * Mask asynchronous exceptions before switch to the thread vector
1317 	 * as the thread handler requires those to be masked while
1318 	 * executing with the temporary stack. The thread subsystem also
1319 	 * asserts that the foreign interrupts are blocked when using most of
1320 	 * its functions.
1321 	 */
1322 	thread_set_exceptions(THREAD_EXCP_ALL);
1323 	primary_save_cntfrq();
1324 	init_vfp_sec();
1325 	/*
1326 	 * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must
1327 	 * set a current thread right now to avoid a chicken-and-egg problem
1328 	 * (thread_init_boot_thread() sets the current thread but needs
1329 	 * things set by init_runtime()).
1330 	 */
1331 	thread_get_core_local()->curr_thread = 0;
1332 	init_runtime(pageable_part);
1333 
1334 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1335 		/*
1336 		 * Virtualization: We can't initialize threads right now because
1337 		 * threads belong to "tee" part and will be initialized
1338 		 * separately per each new virtual guest. So, we'll clear
1339 		 * "curr_thread" and call it done.
1340 		 */
1341 		thread_get_core_local()->curr_thread = -1;
1342 	} else {
1343 		thread_init_boot_thread();
1344 	}
1345 	thread_init_primary();
1346 	thread_init_per_cpu();
1347 	init_sec_mon(nsec_entry);
1348 }
1349 
cpu_nmfi_enabled(void)1350 static bool cpu_nmfi_enabled(void)
1351 {
1352 #if defined(ARM32)
1353 	return read_sctlr() & SCTLR_NMFI;
1354 #else
1355 	/* Note: ARM64 does not feature non-maskable FIQ support. */
1356 	return false;
1357 #endif
1358 }
1359 
1360 /*
1361  * Note: this function is weak just to make it possible to exclude it from
1362  * the unpaged area.
1363  */
boot_init_primary_late(unsigned long fdt)1364 void __weak boot_init_primary_late(unsigned long fdt)
1365 {
1366 	init_external_dt(fdt);
1367 	tpm_map_log_area(get_external_dt());
1368 	discover_nsec_memory();
1369 	update_external_dt();
1370 	configure_console_from_dt();
1371 
1372 	IMSG("OP-TEE version: %s", core_v_str);
1373 	if (IS_ENABLED(CFG_WARN_INSECURE)) {
1374 		IMSG("WARNING: This OP-TEE configuration might be insecure!");
1375 		IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1376 	}
1377 	IMSG("Primary CPU initializing");
1378 #ifdef CFG_CORE_ASLR
1379 	DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1380 	     (unsigned long)boot_mmu_config.load_offset, VCORE_START_VA);
1381 #endif
1382 	if (IS_ENABLED(CFG_MEMTAG))
1383 		DMSG("Memory tagging %s",
1384 		     memtag_is_enabled() ?  "enabled" : "disabled");
1385 
1386 	/* Check if platform needs NMFI workaround */
1387 	if (cpu_nmfi_enabled())	{
1388 		if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1389 			IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1390 	} else {
1391 		if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1392 			IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1393 	}
1394 
1395 	main_init_gic();
1396 	init_vfp_nsec();
1397 	if (IS_ENABLED(CFG_VIRTUALIZATION)) {
1398 		IMSG("Initializing virtualization support");
1399 		core_mmu_init_virtualization();
1400 	} else {
1401 		init_tee_runtime();
1402 	}
1403 	call_finalcalls();
1404 	IMSG("Primary CPU switching to normal world boot");
1405 }
1406 
init_secondary_helper(unsigned long nsec_entry)1407 static void init_secondary_helper(unsigned long nsec_entry)
1408 {
1409 	IMSG("Secondary CPU %zu initializing", get_core_pos());
1410 
1411 	/*
1412 	 * Mask asynchronous exceptions before switch to the thread vector
1413 	 * as the thread handler requires those to be masked while
1414 	 * executing with the temporary stack. The thread subsystem also
1415 	 * asserts that the foreign interrupts are blocked when using most of
1416 	 * its functions.
1417 	 */
1418 	thread_set_exceptions(THREAD_EXCP_ALL);
1419 
1420 	secondary_init_cntfrq();
1421 	thread_init_per_cpu();
1422 	init_sec_mon(nsec_entry);
1423 	main_secondary_init_gic();
1424 	init_vfp_sec();
1425 	init_vfp_nsec();
1426 
1427 	IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1428 }
1429 
1430 /*
1431  * Note: this function is weak just to make it possible to exclude it from
1432  * the unpaged area so that it lies in the init area.
1433  */
boot_init_primary_early(unsigned long pageable_part,unsigned long nsec_entry __maybe_unused)1434 void __weak boot_init_primary_early(unsigned long pageable_part,
1435 				    unsigned long nsec_entry __maybe_unused)
1436 {
1437 	unsigned long e = PADDR_INVALID;
1438 
1439 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
1440 	e = nsec_entry;
1441 #endif
1442 
1443 	init_primary(pageable_part, e);
1444 }
1445 
1446 #if defined(CFG_WITH_ARM_TRUSTED_FW)
boot_cpu_on_handler(unsigned long a0 __maybe_unused,unsigned long a1 __unused)1447 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1448 				  unsigned long a1 __unused)
1449 {
1450 	init_secondary_helper(PADDR_INVALID);
1451 	return 0;
1452 }
1453 #else
boot_init_secondary(unsigned long nsec_entry)1454 void boot_init_secondary(unsigned long nsec_entry)
1455 {
1456 	init_secondary_helper(nsec_entry);
1457 }
1458 #endif
1459 
1460 #if defined(CFG_BOOT_SECONDARY_REQUEST)
boot_set_core_ns_entry(size_t core_idx,uintptr_t entry,uintptr_t context_id)1461 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1462 			    uintptr_t context_id)
1463 {
1464 	ns_entry_contexts[core_idx].entry_point = entry;
1465 	ns_entry_contexts[core_idx].context_id = context_id;
1466 	dsb_ishst();
1467 }
1468 
boot_core_release(size_t core_idx,paddr_t entry)1469 int boot_core_release(size_t core_idx, paddr_t entry)
1470 {
1471 	if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1472 		return -1;
1473 
1474 	ns_entry_contexts[core_idx].entry_point = entry;
1475 	dmb();
1476 	spin_table[core_idx] = 1;
1477 	dsb();
1478 	sev();
1479 
1480 	return 0;
1481 }
1482 
1483 /*
1484  * spin until secondary boot request, then returns with
1485  * the secondary core entry address.
1486  */
boot_core_hpen(void)1487 struct ns_entry_context *boot_core_hpen(void)
1488 {
1489 #ifdef CFG_PSCI_ARM32
1490 	return &ns_entry_contexts[get_core_pos()];
1491 #else
1492 	do {
1493 		wfe();
1494 	} while (!spin_table[get_core_pos()]);
1495 	dmb();
1496 	return &ns_entry_contexts[get_core_pos()];
1497 #endif
1498 }
1499 #endif
1500 
1501 #if defined(CFG_CORE_ASLR)
1502 #if defined(CFG_DT)
get_aslr_seed(void * fdt)1503 unsigned long __weak get_aslr_seed(void *fdt)
1504 {
1505 	int rc = fdt_check_header(fdt);
1506 	const uint64_t *seed = NULL;
1507 	int offs = 0;
1508 	int len = 0;
1509 
1510 	if (rc) {
1511 		DMSG("Bad fdt: %d", rc);
1512 		goto err;
1513 	}
1514 
1515 	offs =  fdt_path_offset(fdt, "/secure-chosen");
1516 	if (offs < 0) {
1517 		DMSG("Cannot find /secure-chosen");
1518 		goto err;
1519 	}
1520 	seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1521 	if (!seed || len != sizeof(*seed)) {
1522 		DMSG("Cannot find valid kaslr-seed");
1523 		goto err;
1524 	}
1525 
1526 	return fdt64_to_cpu(*seed);
1527 
1528 err:
1529 	/* Try platform implementation */
1530 	return plat_get_aslr_seed();
1531 }
1532 #else /*!CFG_DT*/
get_aslr_seed(void * fdt __unused)1533 unsigned long __weak get_aslr_seed(void *fdt __unused)
1534 {
1535 	/* Try platform implementation */
1536 	return plat_get_aslr_seed();
1537 }
1538 #endif /*!CFG_DT*/
1539 #endif /*CFG_CORE_ASLR*/
1540