1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3 * Copyright (c) 2015-2023, Linaro Limited
4 * Copyright (c) 2023, Arm Limited
5 */
6
7 #include <arm.h>
8 #include <assert.h>
9 #include <compiler.h>
10 #include <config.h>
11 #include <console.h>
12 #include <crypto/crypto.h>
13 #include <drivers/gic.h>
14 #include <dt-bindings/interrupt-controller/arm-gic.h>
15 #include <ffa.h>
16 #include <initcall.h>
17 #include <inttypes.h>
18 #include <io.h>
19 #include <keep.h>
20 #include <kernel/asan.h>
21 #include <kernel/boot.h>
22 #include <kernel/dt.h>
23 #include <kernel/linker.h>
24 #include <kernel/misc.h>
25 #include <kernel/panic.h>
26 #include <kernel/tee_misc.h>
27 #include <kernel/thread.h>
28 #include <kernel/tpm.h>
29 #include <kernel/transfer_list.h>
30 #include <libfdt.h>
31 #include <malloc.h>
32 #include <memtag.h>
33 #include <mm/core_memprot.h>
34 #include <mm/core_mmu.h>
35 #include <mm/fobj.h>
36 #include <mm/phys_mem.h>
37 #include <mm/tee_mm.h>
38 #include <mm/tee_pager.h>
39 #include <sm/psci.h>
40 #include <trace.h>
41 #include <utee_defines.h>
42 #include <util.h>
43
44 #include <platform_config.h>
45
46 #if !defined(CFG_WITH_ARM_TRUSTED_FW)
47 #include <sm/sm.h>
48 #endif
49
50 #if defined(CFG_WITH_VFP)
51 #include <kernel/vfp.h>
52 #endif
53
54 /*
55 * In this file we're using unsigned long to represent physical pointers as
56 * they are received in a single register when OP-TEE is initially entered.
57 * This limits 32-bit systems to only use make use of the lower 32 bits
58 * of a physical address for initial parameters.
59 *
60 * 64-bit systems on the other hand can use full 64-bit physical pointers.
61 */
62 #define PADDR_INVALID ULONG_MAX
63
64 #if defined(CFG_BOOT_SECONDARY_REQUEST)
65 struct ns_entry_context {
66 uintptr_t entry_point;
67 uintptr_t context_id;
68 };
69 struct ns_entry_context ns_entry_contexts[CFG_TEE_CORE_NB_CORE];
70 static uint32_t spin_table[CFG_TEE_CORE_NB_CORE];
71 #endif
72
73 #ifdef CFG_BOOT_SYNC_CPU
74 /*
75 * Array used when booting, to synchronize cpu.
76 * When 0, the cpu has not started.
77 * When 1, it has started
78 */
79 uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE];
80 DECLARE_KEEP_PAGER(sem_cpu_sync);
81 #endif
82
83 /*
84 * Must not be in .bss since it's initialized and used from assembly before
85 * .bss is cleared.
86 */
87 vaddr_t boot_cached_mem_end __nex_data = 1;
88
89 static unsigned long boot_arg_fdt __nex_bss;
90 static unsigned long boot_arg_nsec_entry __nex_bss;
91 static unsigned long boot_arg_pageable_part __nex_bss;
92 static unsigned long boot_arg_transfer_list __nex_bss;
93 static struct transfer_list_header *mapped_tl __nex_bss;
94
95 #ifdef CFG_SECONDARY_INIT_CNTFRQ
96 static uint32_t cntfrq;
97 #endif
98
99 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_primary_init_early(void)100 __weak void plat_primary_init_early(void)
101 {
102 }
103 DECLARE_KEEP_PAGER(plat_primary_init_early);
104
105 /* May be overridden in plat-$(PLATFORM)/main.c */
boot_primary_init_intc(void)106 __weak void boot_primary_init_intc(void)
107 {
108 }
109
110 /* May be overridden in plat-$(PLATFORM)/main.c */
boot_secondary_init_intc(void)111 __weak void boot_secondary_init_intc(void)
112 {
113 }
114
115 /* May be overridden in plat-$(PLATFORM)/main.c */
plat_get_aslr_seed(void)116 __weak unsigned long plat_get_aslr_seed(void)
117 {
118 DMSG("Warning: no ASLR seed");
119
120 return 0;
121 }
122
123 #if defined(_CFG_CORE_STACK_PROTECTOR) || defined(CFG_WITH_STACK_CANARIES)
124 /* Generate random stack canary value on boot up */
plat_get_random_stack_canaries(void * buf,size_t ncan,size_t size)125 __weak void plat_get_random_stack_canaries(void *buf, size_t ncan, size_t size)
126 {
127 TEE_Result ret = TEE_ERROR_GENERIC;
128 size_t i = 0;
129
130 assert(buf && ncan && size);
131
132 /*
133 * With virtualization the RNG is not initialized in Nexus core.
134 * Need to override with platform specific implementation.
135 */
136 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
137 IMSG("WARNING: Using fixed value for stack canary");
138 memset(buf, 0xab, ncan * size);
139 goto out;
140 }
141
142 ret = crypto_rng_read(buf, ncan * size);
143 if (ret != TEE_SUCCESS)
144 panic("Failed to generate random stack canary");
145
146 out:
147 /* Leave null byte in canary to prevent string base exploit */
148 for (i = 0; i < ncan; i++)
149 *((uint8_t *)buf + size * i) = 0;
150 }
151 #endif /* _CFG_CORE_STACK_PROTECTOR || CFG_WITH_STACK_CANARIES */
152
153 /*
154 * This function is called as a guard after each smc call which is not
155 * supposed to return.
156 */
__panic_at_smc_return(void)157 void __panic_at_smc_return(void)
158 {
159 panic();
160 }
161
162 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_sec_mon(unsigned long nsec_entry __maybe_unused)163 void init_sec_mon(unsigned long nsec_entry __maybe_unused)
164 {
165 assert(nsec_entry == PADDR_INVALID);
166 /* Do nothing as we don't have a secure monitor */
167 }
168 #else
169 /* May be overridden in plat-$(PLATFORM)/main.c */
init_sec_mon(unsigned long nsec_entry)170 __weak void init_sec_mon(unsigned long nsec_entry)
171 {
172 struct sm_nsec_ctx *nsec_ctx;
173
174 assert(nsec_entry != PADDR_INVALID);
175
176 /* Initialize secure monitor */
177 nsec_ctx = sm_get_nsec_ctx();
178 nsec_ctx->mon_lr = nsec_entry;
179 nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I;
180 if (nsec_entry & 1)
181 nsec_ctx->mon_spsr |= CPSR_T;
182 }
183 #endif
184
185 #if defined(CFG_WITH_ARM_TRUSTED_FW)
init_vfp_nsec(void)186 static void init_vfp_nsec(void)
187 {
188 }
189 #else
init_vfp_nsec(void)190 static void init_vfp_nsec(void)
191 {
192 /* Normal world can use CP10 and CP11 (SIMD/VFP) */
193 write_nsacr(read_nsacr() | NSACR_CP10 | NSACR_CP11);
194 }
195 #endif
196
check_crypto_extensions(void)197 static void check_crypto_extensions(void)
198 {
199 bool ce_supported = true;
200
201 if (!feat_aes_implemented() &&
202 IS_ENABLED(CFG_CRYPTO_AES_ARM_CE)) {
203 EMSG("AES instructions are not supported");
204 ce_supported = false;
205 }
206
207 if (!feat_sha1_implemented() &&
208 IS_ENABLED(CFG_CRYPTO_SHA1_ARM_CE)) {
209 EMSG("SHA1 instructions are not supported");
210 ce_supported = false;
211 }
212
213 if (!feat_sha256_implemented() &&
214 IS_ENABLED(CFG_CRYPTO_SHA256_ARM_CE)) {
215 EMSG("SHA256 instructions are not supported");
216 ce_supported = false;
217 }
218
219 /* Check aarch64 specific instructions */
220 if (IS_ENABLED(CFG_ARM64_core)) {
221 if (!feat_sha512_implemented() &&
222 IS_ENABLED(CFG_CRYPTO_SHA512_ARM_CE)) {
223 EMSG("SHA512 instructions are not supported");
224 ce_supported = false;
225 }
226
227 if (!feat_sha3_implemented() &&
228 IS_ENABLED(CFG_CRYPTO_SHA3_ARM_CE)) {
229 EMSG("SHA3 instructions are not supported");
230 ce_supported = false;
231 }
232
233 if (!feat_sm3_implemented() &&
234 IS_ENABLED(CFG_CRYPTO_SM3_ARM_CE)) {
235 EMSG("SM3 instructions are not supported");
236 ce_supported = false;
237 }
238
239 if (!feat_sm4_implemented() &&
240 IS_ENABLED(CFG_CRYPTO_SM4_ARM_CE)) {
241 EMSG("SM4 instructions are not supported");
242 ce_supported = false;
243 }
244 }
245
246 if (!ce_supported)
247 panic("HW doesn't support CE instructions");
248 }
249
250 #if defined(CFG_WITH_VFP)
251
252 #ifdef ARM32
init_vfp_sec(void)253 static void init_vfp_sec(void)
254 {
255 uint32_t cpacr = read_cpacr();
256
257 /*
258 * Enable Advanced SIMD functionality.
259 * Enable use of D16-D31 of the Floating-point Extension register
260 * file.
261 */
262 cpacr &= ~(CPACR_ASEDIS | CPACR_D32DIS);
263 /*
264 * Enable usage of CP10 and CP11 (SIMD/VFP) (both kernel and user
265 * mode.
266 */
267 cpacr |= CPACR_CP(10, CPACR_CP_ACCESS_FULL);
268 cpacr |= CPACR_CP(11, CPACR_CP_ACCESS_FULL);
269 write_cpacr(cpacr);
270 }
271 #endif /* ARM32 */
272
273 #ifdef ARM64
init_vfp_sec(void)274 static void init_vfp_sec(void)
275 {
276 /* Not using VFP until thread_kernel_enable_vfp() */
277 vfp_disable();
278 }
279 #endif /* ARM64 */
280
281 #else /* CFG_WITH_VFP */
282
init_vfp_sec(void)283 static void init_vfp_sec(void)
284 {
285 /* Not using VFP */
286 }
287 #endif
288
289 #ifdef CFG_SECONDARY_INIT_CNTFRQ
primary_save_cntfrq(void)290 static void primary_save_cntfrq(void)
291 {
292 assert(cntfrq == 0);
293
294 /*
295 * CNTFRQ should be initialized on the primary CPU by a
296 * previous boot stage
297 */
298 cntfrq = read_cntfrq();
299 }
300
secondary_init_cntfrq(void)301 static void secondary_init_cntfrq(void)
302 {
303 assert(cntfrq != 0);
304 write_cntfrq(cntfrq);
305 }
306 #else /* CFG_SECONDARY_INIT_CNTFRQ */
primary_save_cntfrq(void)307 static void primary_save_cntfrq(void)
308 {
309 }
310
secondary_init_cntfrq(void)311 static void secondary_init_cntfrq(void)
312 {
313 }
314 #endif
315
316 #ifdef CFG_CORE_SANITIZE_KADDRESS
init_run_constructors(void)317 static void init_run_constructors(void)
318 {
319 const vaddr_t *ctor;
320
321 for (ctor = &__ctor_list; ctor < &__ctor_end; ctor++)
322 ((void (*)(void))(*ctor))();
323 }
324
init_asan(void)325 static void init_asan(void)
326 {
327
328 /*
329 * CFG_ASAN_SHADOW_OFFSET is also supplied as
330 * -fasan-shadow-offset=$(CFG_ASAN_SHADOW_OFFSET) to the compiler.
331 * Since all the needed values to calculate the value of
332 * CFG_ASAN_SHADOW_OFFSET isn't available in to make we need to
333 * calculate it in advance and hard code it into the platform
334 * conf.mk. Here where we have all the needed values we double
335 * check that the compiler is supplied the correct value.
336 */
337
338 #define __ASAN_SHADOW_START \
339 ROUNDUP(TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8, 8)
340 assert(__ASAN_SHADOW_START == (vaddr_t)&__asan_shadow_start);
341 #define __CFG_ASAN_SHADOW_OFFSET \
342 (__ASAN_SHADOW_START - (TEE_RAM_START / 8))
343 COMPILE_TIME_ASSERT(CFG_ASAN_SHADOW_OFFSET == __CFG_ASAN_SHADOW_OFFSET);
344 #undef __ASAN_SHADOW_START
345 #undef __CFG_ASAN_SHADOW_OFFSET
346
347 /*
348 * Assign area covered by the shadow area, everything from start up
349 * to the beginning of the shadow area.
350 */
351 asan_set_shadowed((void *)TEE_LOAD_ADDR, &__asan_shadow_start);
352
353 /*
354 * Add access to areas that aren't opened automatically by a
355 * constructor.
356 */
357 asan_tag_access(&__ctor_list, &__ctor_end);
358 asan_tag_access(__rodata_start, __rodata_end);
359 #ifdef CFG_WITH_PAGER
360 asan_tag_access(__pageable_start, __pageable_end);
361 #endif /*CFG_WITH_PAGER*/
362 asan_tag_access(__nozi_start, __nozi_end);
363 #ifdef ARM32
364 asan_tag_access(__exidx_start, __exidx_end);
365 asan_tag_access(__extab_start, __extab_end);
366 #endif
367
368 init_run_constructors();
369
370 /* Everything is tagged correctly, let's start address sanitizing. */
371 asan_start();
372 }
373 #else /*CFG_CORE_SANITIZE_KADDRESS*/
init_asan(void)374 static void init_asan(void)
375 {
376 }
377 #endif /*CFG_CORE_SANITIZE_KADDRESS*/
378
379 #if defined(CFG_MEMTAG)
380 /* Called from entry_a64.S only when MEMTAG is configured */
boot_init_memtag(void)381 void boot_init_memtag(void)
382 {
383 memtag_init_ops(feat_mte_implemented());
384 }
385
mmap_clear_memtag(struct tee_mmap_region * map,void * ptr __unused)386 static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map,
387 void *ptr __unused)
388 {
389 switch (map->type) {
390 case MEM_AREA_NEX_RAM_RO:
391 case MEM_AREA_SEC_RAM_OVERALL:
392 DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA,
393 map->va, map->va + map->size - 1);
394 memtag_set_tags((void *)map->va, map->size, 0);
395 break;
396 default:
397 break;
398 }
399
400 return TEE_SUCCESS;
401 }
402
403 /* Called from entry_a64.S only when MEMTAG is configured */
boot_clear_memtag(void)404 void boot_clear_memtag(void)
405 {
406 core_mmu_for_each_map(NULL, mmap_clear_memtag);
407 }
408 #endif
409
410 #ifdef CFG_WITH_PAGER
411
412 #ifdef CFG_CORE_SANITIZE_KADDRESS
carve_out_asan_mem(void)413 static void carve_out_asan_mem(void)
414 {
415 nex_phys_mem_partial_carve_out(ASAN_MAP_PA, ASAN_MAP_SZ);
416 }
417 #else
carve_out_asan_mem(void)418 static void carve_out_asan_mem(void)
419 {
420 }
421 #endif
422
print_pager_pool_size(void)423 static void print_pager_pool_size(void)
424 {
425 struct tee_pager_stats __maybe_unused stats;
426
427 tee_pager_get_stats(&stats);
428 IMSG("Pager pool size: %zukB",
429 stats.npages_all * SMALL_PAGE_SIZE / 1024);
430 }
431
init_virt_pool(tee_mm_pool_t * virt_pool)432 static void init_virt_pool(tee_mm_pool_t *virt_pool)
433 {
434 const vaddr_t begin = VCORE_START_VA;
435 size_t size = TEE_RAM_VA_SIZE;
436
437 #ifdef CFG_CORE_SANITIZE_KADDRESS
438 /* Carve out asan memory, flat maped after core memory */
439 if (begin + size > ASAN_SHADOW_PA)
440 size = ASAN_MAP_PA - begin;
441 #endif
442
443 if (!tee_mm_init(virt_pool, begin, size, SMALL_PAGE_SHIFT,
444 TEE_MM_POOL_NO_FLAGS))
445 panic("core_virt_mem_pool init failed");
446 }
447
448 /*
449 * With CFG_CORE_ASLR=y the init part is relocated very early during boot.
450 * The init part is also paged just as the rest of the normal paged code, with
451 * the difference that it's preloaded during boot. When the backing store
452 * is configured the entire paged binary is copied in place and then also
453 * the init part. Since the init part has been relocated (references to
454 * addresses updated to compensate for the new load address) this has to be
455 * undone for the hashes of those pages to match with the original binary.
456 *
457 * If CFG_CORE_ASLR=n, nothing needs to be done as the code/ro pages are
458 * unchanged.
459 */
undo_init_relocation(uint8_t * paged_store __maybe_unused)460 static void undo_init_relocation(uint8_t *paged_store __maybe_unused)
461 {
462 #ifdef CFG_CORE_ASLR
463 unsigned long *ptr = NULL;
464 const uint32_t *reloc = NULL;
465 const uint32_t *reloc_end = NULL;
466 unsigned long offs = boot_mmu_config.map_offset;
467 const struct boot_embdata *embdata = (const void *)__init_end;
468 vaddr_t addr_end = (vaddr_t)__init_end - offs - TEE_LOAD_ADDR;
469 vaddr_t addr_start = (vaddr_t)__init_start - offs - TEE_LOAD_ADDR;
470
471 reloc = (const void *)((vaddr_t)embdata + embdata->reloc_offset);
472 reloc_end = reloc + embdata->reloc_len / sizeof(*reloc);
473
474 for (; reloc < reloc_end; reloc++) {
475 if (*reloc < addr_start)
476 continue;
477 if (*reloc >= addr_end)
478 break;
479 ptr = (void *)(paged_store + *reloc - addr_start);
480 *ptr -= offs;
481 }
482 #endif
483 }
484
ro_paged_alloc(tee_mm_entry_t * mm,void * hashes,void * store)485 static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes,
486 void *store)
487 {
488 const unsigned int num_pages = tee_mm_get_bytes(mm) / SMALL_PAGE_SIZE;
489 #ifdef CFG_CORE_ASLR
490 unsigned int reloc_offs = (vaddr_t)__pageable_start - VCORE_START_VA;
491 const struct boot_embdata *embdata = (const void *)__init_end;
492 const void *reloc = __init_end + embdata->reloc_offset;
493
494 return fobj_ro_reloc_paged_alloc(num_pages, hashes, reloc_offs,
495 reloc, embdata->reloc_len, store);
496 #else
497 return fobj_ro_paged_alloc(num_pages, hashes, store);
498 #endif
499 }
500
init_pager_runtime(unsigned long pageable_part)501 static void init_pager_runtime(unsigned long pageable_part)
502 {
503 size_t n;
504 size_t init_size = (size_t)(__init_end - __init_start);
505 size_t pageable_start = (size_t)__pageable_start;
506 size_t pageable_end = (size_t)__pageable_end;
507 size_t pageable_size = pageable_end - pageable_start;
508 vaddr_t tzsram_end = TZSRAM_BASE + TZSRAM_SIZE - TEE_LOAD_ADDR +
509 VCORE_START_VA;
510 size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
511 TEE_SHA256_HASH_SIZE;
512 const struct boot_embdata *embdata = (const void *)__init_end;
513 const void *tmp_hashes = NULL;
514 tee_mm_entry_t *mm = NULL;
515 struct fobj *fobj = NULL;
516 uint8_t *paged_store = NULL;
517 uint8_t *hashes = NULL;
518
519 assert(pageable_size % SMALL_PAGE_SIZE == 0);
520 assert(embdata->total_len >= embdata->hashes_offset +
521 embdata->hashes_len);
522 assert(hash_size == embdata->hashes_len);
523
524 tmp_hashes = __init_end + embdata->hashes_offset;
525
526 /*
527 * This needs to be initialized early to support address lookup
528 * in MEM_AREA_TEE_RAM
529 */
530 tee_pager_early_init();
531
532 hashes = malloc(hash_size);
533 IMSG_RAW("\n");
534 IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
535 assert(hashes);
536 asan_memcpy_unchecked(hashes, tmp_hashes, hash_size);
537
538 /*
539 * The pager is about the be enabled below, eventual temporary boot
540 * memory allocation must be removed now.
541 */
542 boot_mem_release_tmp_alloc();
543
544 carve_out_asan_mem();
545
546 mm = nex_phys_mem_ta_alloc(pageable_size);
547 assert(mm);
548 paged_store = phys_to_virt(tee_mm_get_smem(mm),
549 MEM_AREA_SEC_RAM_OVERALL, pageable_size);
550 /*
551 * Load pageable part in the dedicated allocated area:
552 * - Move pageable non-init part into pageable area. Note bootloader
553 * may have loaded it anywhere in TA RAM hence use memmove().
554 * - Copy pageable init part from current location into pageable area.
555 */
556 memmove(paged_store + init_size,
557 phys_to_virt(pageable_part,
558 core_mmu_get_type_by_pa(pageable_part),
559 __pageable_part_end - __pageable_part_start),
560 __pageable_part_end - __pageable_part_start);
561 asan_memcpy_unchecked(paged_store, __init_start, init_size);
562 /*
563 * Undo eventual relocation for the init part so the hash checks
564 * can pass.
565 */
566 undo_init_relocation(paged_store);
567
568 /* Check that hashes of what's in pageable area is OK */
569 DMSG("Checking hashes of pageable area");
570 for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
571 const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
572 const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
573 TEE_Result res;
574
575 DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
576 res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
577 if (res != TEE_SUCCESS) {
578 EMSG("Hash failed for page %zu at %p: res 0x%x",
579 n, (void *)page, res);
580 panic();
581 }
582 }
583
584 /*
585 * Assert prepaged init sections are page aligned so that nothing
586 * trails uninited at the end of the premapped init area.
587 */
588 assert(!(init_size & SMALL_PAGE_MASK));
589
590 /*
591 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
592 * is supplied to tee_pager_init() below.
593 */
594 init_virt_pool(&core_virt_mem_pool);
595
596 /*
597 * Assign alias area for pager end of the small page block the rest
598 * of the binary is loaded into. We're taking more than needed, but
599 * we're guaranteed to not need more than the physical amount of
600 * TZSRAM.
601 */
602 mm = tee_mm_alloc2(&core_virt_mem_pool,
603 (vaddr_t)core_virt_mem_pool.lo +
604 core_virt_mem_pool.size - TZSRAM_SIZE,
605 TZSRAM_SIZE);
606 assert(mm);
607 tee_pager_set_alias_area(mm);
608
609 /*
610 * Claim virtual memory which isn't paged.
611 * Linear memory (flat map core memory) ends there.
612 */
613 mm = tee_mm_alloc2(&core_virt_mem_pool, VCORE_UNPG_RX_PA,
614 (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
615 assert(mm);
616
617 /*
618 * Allocate virtual memory for the pageable area and let the pager
619 * take charge of all the pages already assigned to that memory.
620 */
621 mm = tee_mm_alloc2(&core_virt_mem_pool, (vaddr_t)__pageable_start,
622 pageable_size);
623 assert(mm);
624 fobj = ro_paged_alloc(mm, hashes, paged_store);
625 assert(fobj);
626 tee_pager_add_core_region(tee_mm_get_smem(mm), PAGED_REGION_TYPE_RO,
627 fobj);
628 fobj_put(fobj);
629
630 tee_pager_add_pages(pageable_start, init_size / SMALL_PAGE_SIZE, false);
631 tee_pager_add_pages(pageable_start + init_size,
632 (pageable_size - init_size) / SMALL_PAGE_SIZE,
633 true);
634 if (pageable_end < tzsram_end)
635 tee_pager_add_pages(pageable_end, (tzsram_end - pageable_end) /
636 SMALL_PAGE_SIZE, true);
637
638 /*
639 * There may be physical pages in TZSRAM before the core load address.
640 * These pages can be added to the physical pages pool of the pager.
641 * This setup may happen when a the secure bootloader runs in TZRAM
642 * and its memory can be reused by OP-TEE once boot stages complete.
643 */
644 tee_pager_add_pages(core_virt_mem_pool.lo,
645 (VCORE_UNPG_RX_PA - core_virt_mem_pool.lo) /
646 SMALL_PAGE_SIZE,
647 true);
648
649 print_pager_pool_size();
650 }
651 #else /*!CFG_WITH_PAGER*/
init_pager_runtime(unsigned long pageable_part __unused)652 static void init_pager_runtime(unsigned long pageable_part __unused)
653 {
654 }
655 #endif
656
657 #if defined(CFG_DT)
add_optee_dt_node(struct dt_descriptor * dt)658 static int add_optee_dt_node(struct dt_descriptor *dt)
659 {
660 int offs;
661 int ret;
662
663 if (fdt_path_offset(dt->blob, "/firmware/optee") >= 0) {
664 DMSG("OP-TEE Device Tree node already exists!");
665 return 0;
666 }
667
668 offs = fdt_path_offset(dt->blob, "/firmware");
669 if (offs < 0) {
670 offs = add_dt_path_subnode(dt, "/", "firmware");
671 if (offs < 0)
672 return -1;
673 }
674
675 offs = fdt_add_subnode(dt->blob, offs, "optee");
676 if (offs < 0)
677 return -1;
678
679 ret = fdt_setprop_string(dt->blob, offs, "compatible",
680 "linaro,optee-tz");
681 if (ret < 0)
682 return -1;
683 ret = fdt_setprop_string(dt->blob, offs, "method", "smc");
684 if (ret < 0)
685 return -1;
686
687 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID) {
688 /*
689 * The format of the interrupt property is defined by the
690 * binding of the interrupt domain root. In this case it's
691 * one Arm GIC v1, v2 or v3 so we must be compatible with
692 * these.
693 *
694 * An SPI type of interrupt is indicated with a 0 in the
695 * first cell. A PPI type is indicated with value 1.
696 *
697 * The interrupt number goes in the second cell where
698 * SPIs ranges from 0 to 987 and PPI ranges from 0 to 15.
699 *
700 * Flags are passed in the third cells.
701 */
702 uint32_t itr_trigger = 0;
703 uint32_t itr_type = 0;
704 uint32_t itr_id = 0;
705 uint32_t val[3] = { };
706
707 /* PPI are visible only in current CPU cluster */
708 static_assert(IS_ENABLED(CFG_CORE_FFA) ||
709 !CFG_CORE_ASYNC_NOTIF_GIC_INTID ||
710 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
711 GIC_SPI_BASE) ||
712 ((CFG_TEE_CORE_NB_CORE <= 8) &&
713 (CFG_CORE_ASYNC_NOTIF_GIC_INTID >=
714 GIC_PPI_BASE)));
715
716 if (CFG_CORE_ASYNC_NOTIF_GIC_INTID >= GIC_SPI_BASE) {
717 itr_type = GIC_SPI;
718 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_SPI_BASE;
719 itr_trigger = IRQ_TYPE_EDGE_RISING;
720 } else {
721 itr_type = GIC_PPI;
722 itr_id = CFG_CORE_ASYNC_NOTIF_GIC_INTID - GIC_PPI_BASE;
723 itr_trigger = IRQ_TYPE_EDGE_RISING |
724 GIC_CPU_MASK_SIMPLE(CFG_TEE_CORE_NB_CORE);
725 }
726
727 val[0] = TEE_U32_TO_BIG_ENDIAN(itr_type);
728 val[1] = TEE_U32_TO_BIG_ENDIAN(itr_id);
729 val[2] = TEE_U32_TO_BIG_ENDIAN(itr_trigger);
730
731 ret = fdt_setprop(dt->blob, offs, "interrupts", val,
732 sizeof(val));
733 if (ret < 0)
734 return -1;
735 }
736 return 0;
737 }
738
739 #ifdef CFG_PSCI_ARM32
append_psci_compatible(void * fdt,int offs,const char * str)740 static int append_psci_compatible(void *fdt, int offs, const char *str)
741 {
742 return fdt_appendprop(fdt, offs, "compatible", str, strlen(str) + 1);
743 }
744
dt_add_psci_node(struct dt_descriptor * dt)745 static int dt_add_psci_node(struct dt_descriptor *dt)
746 {
747 int offs;
748
749 if (fdt_path_offset(dt->blob, "/psci") >= 0) {
750 DMSG("PSCI Device Tree node already exists!");
751 return 0;
752 }
753
754 offs = add_dt_path_subnode(dt, "/", "psci");
755 if (offs < 0)
756 return -1;
757 if (append_psci_compatible(dt->blob, offs, "arm,psci-1.0"))
758 return -1;
759 if (append_psci_compatible(dt->blob, offs, "arm,psci-0.2"))
760 return -1;
761 if (append_psci_compatible(dt->blob, offs, "arm,psci"))
762 return -1;
763 if (fdt_setprop_string(dt->blob, offs, "method", "smc"))
764 return -1;
765 if (fdt_setprop_u32(dt->blob, offs, "cpu_suspend", PSCI_CPU_SUSPEND))
766 return -1;
767 if (fdt_setprop_u32(dt->blob, offs, "cpu_off", PSCI_CPU_OFF))
768 return -1;
769 if (fdt_setprop_u32(dt->blob, offs, "cpu_on", PSCI_CPU_ON))
770 return -1;
771 if (fdt_setprop_u32(dt->blob, offs, "sys_poweroff", PSCI_SYSTEM_OFF))
772 return -1;
773 if (fdt_setprop_u32(dt->blob, offs, "sys_reset", PSCI_SYSTEM_RESET))
774 return -1;
775 return 0;
776 }
777
check_node_compat_prefix(struct dt_descriptor * dt,int offs,const char * prefix)778 static int check_node_compat_prefix(struct dt_descriptor *dt, int offs,
779 const char *prefix)
780 {
781 const size_t prefix_len = strlen(prefix);
782 size_t l;
783 int plen;
784 const char *prop;
785
786 prop = fdt_getprop(dt->blob, offs, "compatible", &plen);
787 if (!prop)
788 return -1;
789
790 while (plen > 0) {
791 if (memcmp(prop, prefix, prefix_len) == 0)
792 return 0; /* match */
793
794 l = strlen(prop) + 1;
795 prop += l;
796 plen -= l;
797 }
798
799 return -1;
800 }
801
dt_add_psci_cpu_enable_methods(struct dt_descriptor * dt)802 static int dt_add_psci_cpu_enable_methods(struct dt_descriptor *dt)
803 {
804 int offs = 0;
805
806 while (1) {
807 offs = fdt_next_node(dt->blob, offs, NULL);
808 if (offs < 0)
809 break;
810 if (fdt_getprop(dt->blob, offs, "enable-method", NULL))
811 continue; /* already set */
812 if (check_node_compat_prefix(dt, offs, "arm,cortex-a"))
813 continue; /* no compatible */
814 if (fdt_setprop_string(dt->blob, offs, "enable-method", "psci"))
815 return -1;
816 /* Need to restart scanning as offsets may have changed */
817 offs = 0;
818 }
819 return 0;
820 }
821
config_psci(struct dt_descriptor * dt)822 static int config_psci(struct dt_descriptor *dt)
823 {
824 if (dt_add_psci_node(dt))
825 return -1;
826 return dt_add_psci_cpu_enable_methods(dt);
827 }
828 #else
config_psci(struct dt_descriptor * dt __unused)829 static int config_psci(struct dt_descriptor *dt __unused)
830 {
831 return 0;
832 }
833 #endif /*CFG_PSCI_ARM32*/
834
mark_tzdram_as_reserved(struct dt_descriptor * dt)835 static int mark_tzdram_as_reserved(struct dt_descriptor *dt)
836 {
837 return add_res_mem_dt_node(dt, "optee_core", CFG_TZDRAM_START,
838 CFG_TZDRAM_SIZE);
839 }
840
update_external_dt(void)841 static void update_external_dt(void)
842 {
843 struct dt_descriptor *dt = get_external_dt_desc();
844
845 if (!dt || !dt->blob)
846 return;
847
848 if (!IS_ENABLED(CFG_CORE_FFA) && add_optee_dt_node(dt))
849 panic("Failed to add OP-TEE Device Tree node");
850
851 if (config_psci(dt))
852 panic("Failed to config PSCI");
853
854 #ifdef CFG_CORE_RESERVED_SHM
855 if (mark_static_shm_as_reserved(dt))
856 panic("Failed to config non-secure memory");
857 #endif
858
859 if (mark_tzdram_as_reserved(dt))
860 panic("Failed to config secure memory");
861 }
862 #else /*CFG_DT*/
update_external_dt(void)863 static void update_external_dt(void)
864 {
865 }
866 #endif /*!CFG_DT*/
867
init_tee_runtime(void)868 void init_tee_runtime(void)
869 {
870 /*
871 * With virtualization we call this function when creating the
872 * OP-TEE partition instead.
873 */
874 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
875 call_preinitcalls();
876 call_early_initcalls();
877 call_service_initcalls();
878
879 /*
880 * These two functions uses crypto_rng_read() to initialize the
881 * pauth keys. Once call_initcalls() returns we're guaranteed that
882 * crypto_rng_read() is ready to be used.
883 */
884 thread_init_core_local_pauth_keys();
885 thread_init_thread_pauth_keys();
886
887 /*
888 * Reinitialize canaries around the stacks with crypto_rng_read().
889 *
890 * TODO: Updating canaries when CFG_NS_VIRTUALIZATION is enabled will
891 * require synchronization between thread_check_canaries() and
892 * thread_update_canaries().
893 */
894 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
895 thread_update_canaries();
896 }
897
init_primary(unsigned long pageable_part,unsigned long nsec_entry)898 static void init_primary(unsigned long pageable_part, unsigned long nsec_entry)
899 {
900 vaddr_t va = 0;
901
902 thread_init_core_local_stacks();
903 /*
904 * Mask asynchronous exceptions before switch to the thread vector
905 * as the thread handler requires those to be masked while
906 * executing with the temporary stack. The thread subsystem also
907 * asserts that the foreign interrupts are blocked when using most of
908 * its functions.
909 */
910 thread_set_exceptions(THREAD_EXCP_ALL);
911 primary_save_cntfrq();
912 init_vfp_sec();
913
914 if (IS_ENABLED(CFG_CRYPTO_WITH_CE))
915 check_crypto_extensions();
916
917 init_asan();
918
919 /*
920 * By default whole OP-TEE uses malloc, so we need to initialize
921 * it early. But, when virtualization is enabled, malloc is used
922 * only by TEE runtime, so malloc should be initialized later, for
923 * every virtual partition separately. Core code uses nex_malloc
924 * instead.
925 */
926 #ifdef CFG_WITH_PAGER
927 /* Add heap2 first as heap1 may be too small as initial bget pool */
928 malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);
929 #endif
930 #ifdef CFG_NS_VIRTUALIZATION
931 nex_malloc_add_pool(__nex_heap_start, __nex_heap_end -
932 __nex_heap_start);
933 #else
934 malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
935 #endif
936 IMSG_RAW("\n");
937
938 core_mmu_save_mem_map();
939 core_mmu_init_phys_mem();
940 va = boot_mem_release_unused();
941 if (!IS_ENABLED(CFG_WITH_PAGER)) {
942 /*
943 * We must update boot_cached_mem_end to reflect the memory
944 * just unmapped by boot_mem_release_unused().
945 */
946 assert(va && va <= boot_cached_mem_end);
947 boot_cached_mem_end = va;
948 }
949
950 if (IS_ENABLED(CFG_WITH_PAGER)) {
951 /*
952 * Pager: init_runtime() calls thread_kernel_enable_vfp()
953 * so we must set a current thread right now to avoid a
954 * chicken-and-egg problem (thread_init_boot_thread() sets
955 * the current thread but needs things set by
956 * init_runtime()).
957 */
958 thread_get_core_local()->curr_thread = 0;
959 init_pager_runtime(pageable_part);
960 }
961
962 if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) {
963 /*
964 * Virtualization: We can't initialize threads right now because
965 * threads belong to "tee" part and will be initialized
966 * separately per each new virtual guest. So, we'll clear
967 * "curr_thread" and call it done.
968 */
969 thread_get_core_local()->curr_thread = -1;
970 } else {
971 thread_init_boot_thread();
972 }
973 thread_init_primary();
974 thread_init_per_cpu();
975 init_sec_mon(nsec_entry);
976 }
977
cpu_nmfi_enabled(void)978 static bool cpu_nmfi_enabled(void)
979 {
980 #if defined(ARM32)
981 return read_sctlr() & SCTLR_NMFI;
982 #else
983 /* Note: ARM64 does not feature non-maskable FIQ support. */
984 return false;
985 #endif
986 }
987
988 /*
989 * Note: this function is weak just to make it possible to exclude it from
990 * the unpaged area.
991 */
boot_init_primary_late(unsigned long fdt __unused,unsigned long manifest __unused)992 void __weak boot_init_primary_late(unsigned long fdt __unused,
993 unsigned long manifest __unused)
994 {
995 size_t fdt_size = CFG_DTB_MAX_SIZE;
996
997 if (IS_ENABLED(CFG_TRANSFER_LIST) && mapped_tl) {
998 struct transfer_list_entry *tl_e = NULL;
999
1000 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1001 if (tl_e)
1002 fdt_size = tl_e->data_size;
1003 }
1004
1005 init_external_dt(boot_arg_fdt, fdt_size);
1006 reinit_manifest_dt();
1007 #ifdef CFG_CORE_SEL1_SPMC
1008 tpm_map_log_area(get_manifest_dt());
1009 #else
1010 tpm_map_log_area(get_external_dt());
1011 #endif
1012 discover_nsec_memory();
1013 update_external_dt();
1014 configure_console_from_dt();
1015
1016 IMSG("OP-TEE version: %s", core_v_str);
1017 if (IS_ENABLED(CFG_INSECURE)) {
1018 IMSG("WARNING: This OP-TEE configuration might be insecure!");
1019 IMSG("WARNING: Please check https://optee.readthedocs.io/en/latest/architecture/porting_guidelines.html");
1020 }
1021 IMSG("Primary CPU initializing");
1022 #ifdef CFG_CORE_ASLR
1023 DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA,
1024 (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA);
1025 #endif
1026 #ifdef CFG_NS_VIRTUALIZATION
1027 DMSG("NS-virtualization enabled, supporting %u guests",
1028 CFG_VIRT_GUEST_COUNT);
1029 #endif
1030 if (IS_ENABLED(CFG_MEMTAG))
1031 DMSG("Memory tagging %s",
1032 memtag_is_enabled() ? "enabled" : "disabled");
1033
1034 /* Check if platform needs NMFI workaround */
1035 if (cpu_nmfi_enabled()) {
1036 if (!IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1037 IMSG("WARNING: This ARM core has NMFI enabled, please apply workaround!");
1038 } else {
1039 if (IS_ENABLED(CFG_CORE_WORKAROUND_ARM_NMFI))
1040 IMSG("WARNING: This ARM core does not have NMFI enabled, no need for workaround");
1041 }
1042
1043 boot_primary_init_intc();
1044 init_vfp_nsec();
1045 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1046 init_tee_runtime();
1047 }
1048
1049 /*
1050 * Note: this function is weak just to make it possible to exclude it from
1051 * the unpaged area.
1052 */
boot_init_primary_final(void)1053 void __weak boot_init_primary_final(void)
1054 {
1055 if (!IS_ENABLED(CFG_WITH_PAGER))
1056 boot_mem_release_tmp_alloc();
1057
1058 if (!IS_ENABLED(CFG_NS_VIRTUALIZATION))
1059 call_driver_initcalls();
1060 call_finalcalls();
1061 IMSG("Primary CPU switching to normal world boot");
1062 }
1063
init_secondary_helper(unsigned long nsec_entry)1064 static void init_secondary_helper(unsigned long nsec_entry)
1065 {
1066 IMSG("Secondary CPU %zu initializing", get_core_pos());
1067
1068 /*
1069 * Mask asynchronous exceptions before switch to the thread vector
1070 * as the thread handler requires those to be masked while
1071 * executing with the temporary stack. The thread subsystem also
1072 * asserts that the foreign interrupts are blocked when using most of
1073 * its functions.
1074 */
1075 thread_set_exceptions(THREAD_EXCP_ALL);
1076
1077 secondary_init_cntfrq();
1078 thread_init_per_cpu();
1079 init_sec_mon(nsec_entry);
1080 boot_secondary_init_intc();
1081 init_vfp_sec();
1082 init_vfp_nsec();
1083
1084 IMSG("Secondary CPU %zu switching to normal world boot", get_core_pos());
1085 }
1086
1087 /*
1088 * Note: this function is weak just to make it possible to exclude it from
1089 * the unpaged area so that it lies in the init area.
1090 */
boot_init_primary_early(void)1091 void __weak boot_init_primary_early(void)
1092 {
1093 unsigned long pageable_part = 0;
1094 unsigned long e = PADDR_INVALID;
1095 struct transfer_list_entry *tl_e = NULL;
1096
1097 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW))
1098 e = boot_arg_nsec_entry;
1099
1100 if (IS_ENABLED(CFG_TRANSFER_LIST) && boot_arg_transfer_list) {
1101 /* map and save the TL */
1102 mapped_tl = transfer_list_map(boot_arg_transfer_list);
1103 if (!mapped_tl)
1104 panic("Failed to map transfer list");
1105
1106 transfer_list_dump(mapped_tl);
1107 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1108 if (tl_e) {
1109 /*
1110 * Expand the data size of the DTB entry to the maximum
1111 * allocable mapped memory to reserve sufficient space
1112 * for inserting new nodes, avoid potentially corrupting
1113 * next entries.
1114 */
1115 uint32_t dtb_max_sz = mapped_tl->max_size -
1116 mapped_tl->size + tl_e->data_size;
1117
1118 if (!transfer_list_set_data_size(mapped_tl, tl_e,
1119 dtb_max_sz)) {
1120 EMSG("Failed to extend DTB size to %#"PRIx32,
1121 dtb_max_sz);
1122 panic();
1123 }
1124 }
1125 tl_e = transfer_list_find(mapped_tl, TL_TAG_OPTEE_PAGABLE_PART);
1126 }
1127
1128 if (IS_ENABLED(CFG_WITH_PAGER)) {
1129 if (IS_ENABLED(CFG_TRANSFER_LIST) && tl_e)
1130 pageable_part =
1131 get_le64(transfer_list_entry_data(tl_e));
1132 else
1133 pageable_part = boot_arg_pageable_part;
1134 }
1135
1136 init_primary(pageable_part, e);
1137 }
1138
boot_save_transfer_list(unsigned long zero_reg,unsigned long transfer_list,unsigned long fdt)1139 static void boot_save_transfer_list(unsigned long zero_reg,
1140 unsigned long transfer_list,
1141 unsigned long fdt)
1142 {
1143 struct transfer_list_header *tl = (void *)transfer_list;
1144 struct transfer_list_entry *tl_e = NULL;
1145
1146 if (zero_reg != 0)
1147 panic("Incorrect transfer list register convention");
1148
1149 if (!IS_ALIGNED_WITH_TYPE(transfer_list, struct transfer_list_header) ||
1150 !IS_ALIGNED(transfer_list, TL_ALIGNMENT_FROM_ORDER(tl->alignment)))
1151 panic("Transfer list base address is not aligned");
1152
1153 if (transfer_list_check_header(tl) == TL_OPS_NONE)
1154 panic("Invalid transfer list");
1155
1156 tl_e = transfer_list_find(tl, TL_TAG_FDT);
1157 if (fdt != (unsigned long)transfer_list_entry_data(tl_e))
1158 panic("DT does not match to the DT entry of the TL");
1159
1160 boot_arg_transfer_list = transfer_list;
1161 }
1162
1163 #if defined(CFG_WITH_ARM_TRUSTED_FW)
boot_cpu_on_handler(unsigned long a0 __maybe_unused,unsigned long a1 __unused)1164 unsigned long boot_cpu_on_handler(unsigned long a0 __maybe_unused,
1165 unsigned long a1 __unused)
1166 {
1167 init_secondary_helper(PADDR_INVALID);
1168 return 0;
1169 }
1170 #else
boot_init_secondary(unsigned long nsec_entry)1171 void boot_init_secondary(unsigned long nsec_entry)
1172 {
1173 init_secondary_helper(nsec_entry);
1174 }
1175 #endif
1176
1177 #if defined(CFG_BOOT_SECONDARY_REQUEST)
boot_set_core_ns_entry(size_t core_idx,uintptr_t entry,uintptr_t context_id)1178 void boot_set_core_ns_entry(size_t core_idx, uintptr_t entry,
1179 uintptr_t context_id)
1180 {
1181 ns_entry_contexts[core_idx].entry_point = entry;
1182 ns_entry_contexts[core_idx].context_id = context_id;
1183 dsb_ishst();
1184 }
1185
boot_core_release(size_t core_idx,paddr_t entry)1186 int boot_core_release(size_t core_idx, paddr_t entry)
1187 {
1188 if (!core_idx || core_idx >= CFG_TEE_CORE_NB_CORE)
1189 return -1;
1190
1191 ns_entry_contexts[core_idx].entry_point = entry;
1192 dmb();
1193 spin_table[core_idx] = 1;
1194 dsb();
1195 sev();
1196
1197 return 0;
1198 }
1199
1200 /*
1201 * spin until secondary boot request, then returns with
1202 * the secondary core entry address.
1203 */
boot_core_hpen(void)1204 struct ns_entry_context *boot_core_hpen(void)
1205 {
1206 #ifdef CFG_PSCI_ARM32
1207 return &ns_entry_contexts[get_core_pos()];
1208 #else
1209 do {
1210 wfe();
1211 } while (!spin_table[get_core_pos()]);
1212 dmb();
1213 return &ns_entry_contexts[get_core_pos()];
1214 #endif
1215 }
1216 #endif
1217
1218 #if defined(CFG_CORE_ASLR)
1219 #if defined(CFG_DT)
get_aslr_seed(void)1220 unsigned long __weak get_aslr_seed(void)
1221 {
1222 void *fdt = NULL;
1223 int rc = 0;
1224 const uint64_t *seed = NULL;
1225 int offs = 0;
1226 int len = 0;
1227
1228 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC))
1229 fdt = (void *)boot_arg_fdt;
1230
1231 if (!fdt) {
1232 DMSG("No fdt");
1233 goto err;
1234 }
1235
1236 rc = fdt_check_header(fdt);
1237 if (rc) {
1238 DMSG("Bad fdt: %d", rc);
1239 goto err;
1240 }
1241
1242 offs = fdt_path_offset(fdt, "/secure-chosen");
1243 if (offs < 0) {
1244 DMSG("Cannot find /secure-chosen");
1245 goto err;
1246 }
1247 seed = fdt_getprop(fdt, offs, "kaslr-seed", &len);
1248 if (!seed || len != sizeof(*seed)) {
1249 DMSG("Cannot find valid kaslr-seed");
1250 goto err;
1251 }
1252
1253 return fdt64_to_cpu(fdt64_ld(seed));
1254
1255 err:
1256 /* Try platform implementation */
1257 return plat_get_aslr_seed();
1258 }
1259 #else /*!CFG_DT*/
get_aslr_seed(void)1260 unsigned long __weak get_aslr_seed(void)
1261 {
1262 /* Try platform implementation */
1263 return plat_get_aslr_seed();
1264 }
1265 #endif /*!CFG_DT*/
1266 #endif /*CFG_CORE_ASLR*/
1267
get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 * hdr)1268 static void *get_fdt_from_boot_info(struct ffa_boot_info_header_1_1 *hdr)
1269 {
1270 struct ffa_boot_info_1_1 *desc = NULL;
1271 uint8_t content_fmt = 0;
1272 uint8_t name_fmt = 0;
1273 void *fdt = NULL;
1274 int ret = 0;
1275
1276 if (hdr->signature != FFA_BOOT_INFO_SIGNATURE) {
1277 EMSG("Bad boot info signature %#"PRIx32, hdr->signature);
1278 panic();
1279 }
1280 if (hdr->version != FFA_BOOT_INFO_VERSION) {
1281 EMSG("Bad boot info version %#"PRIx32, hdr->version);
1282 panic();
1283 }
1284 if (hdr->desc_count != 1) {
1285 EMSG("Bad boot info descriptor count %#"PRIx32,
1286 hdr->desc_count);
1287 panic();
1288 }
1289 desc = (void *)((vaddr_t)hdr + hdr->desc_offset);
1290 name_fmt = desc->flags & FFA_BOOT_INFO_FLAG_NAME_FORMAT_MASK;
1291 if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_STRING)
1292 DMSG("Boot info descriptor name \"%16s\"", desc->name);
1293 else if (name_fmt == FFA_BOOT_INFO_FLAG_NAME_FORMAT_UUID)
1294 DMSG("Boot info descriptor UUID %pUl", (void *)desc->name);
1295 else
1296 DMSG("Boot info descriptor: unknown name format %"PRIu8,
1297 name_fmt);
1298
1299 content_fmt = (desc->flags & FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_MASK) >>
1300 FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_SHIFT;
1301 if (content_fmt != FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR) {
1302 EMSG("Bad boot info content format %"PRIu8", expected %u (address)",
1303 content_fmt, FFA_BOOT_INFO_FLAG_CONTENT_FORMAT_ADDR);
1304 panic();
1305 }
1306
1307 fdt = (void *)(vaddr_t)desc->contents;
1308 ret = fdt_check_full(fdt, desc->size);
1309 if (ret < 0) {
1310 EMSG("Invalid Device Tree at %p: error %d", fdt, ret);
1311 panic();
1312 }
1313 return fdt;
1314 }
1315
get_sec_mem_from_manifest(void * fdt,paddr_t * base,size_t * size)1316 static void get_sec_mem_from_manifest(void *fdt, paddr_t *base, size_t *size)
1317 {
1318 int ret = 0;
1319 uint64_t num = 0;
1320
1321 ret = fdt_node_check_compatible(fdt, 0, "arm,ffa-manifest-1.0");
1322 if (ret < 0) {
1323 EMSG("Invalid FF-A manifest at %p: error %d", fdt, ret);
1324 panic();
1325 }
1326 ret = dt_getprop_as_number(fdt, 0, "load-address", &num);
1327 if (ret < 0) {
1328 EMSG("Can't read \"load-address\" from FF-A manifest at %p: error %d",
1329 fdt, ret);
1330 panic();
1331 }
1332 *base = num;
1333 /* "mem-size" is currently an undocumented extension to the spec. */
1334 ret = dt_getprop_as_number(fdt, 0, "mem-size", &num);
1335 if (ret < 0) {
1336 EMSG("Can't read \"mem-size\" from FF-A manifest at %p: error %d",
1337 fdt, ret);
1338 panic();
1339 }
1340 *size = num;
1341 }
1342
boot_save_args(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4 __maybe_unused)1343 void __weak boot_save_args(unsigned long a0, unsigned long a1,
1344 unsigned long a2, unsigned long a3,
1345 unsigned long a4 __maybe_unused)
1346 {
1347 /*
1348 * Register use:
1349 *
1350 * Scenario A: Default arguments
1351 * a0 - CFG_CORE_FFA=y && CFG_CORE_SEL2_SPMC=n:
1352 * if non-NULL holds the TOS FW config [1] address
1353 * - CFG_CORE_FFA=y &&
1354 (CFG_CORE_SEL2_SPMC=y || CFG_CORE_EL3_SPMC=y):
1355 * address of FF-A Boot Information Blob
1356 * - CFG_CORE_FFA=n:
1357 * if non-NULL holds the pagable part address
1358 * a1 - CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1359 * Armv7 standard bootarg #1 (kept track of in entry_a32.S)
1360 * a2 - CFG_CORE_SEL2_SPMC=n:
1361 * if non-NULL holds the system DTB address
1362 * - CFG_WITH_ARM_TRUSTED_FW=n (Armv7):
1363 * Armv7 standard bootarg #2 (system DTB address, kept track
1364 * of in entry_a32.S)
1365 * a3 - Not used
1366 * a4 - CFG_WITH_ARM_TRUSTED_FW=n:
1367 * Non-secure entry address
1368 *
1369 * [1] A TF-A concept: TOS_FW_CONFIG - Trusted OS Firmware
1370 * configuration file. Used by Trusted OS (BL32), that is, OP-TEE
1371 * here. This is also called Manifest DT, related to the Manifest DT
1372 * passed in the FF-A Boot Information Blob, but with a different
1373 * compatible string.
1374
1375 * Scenario B: FW Handoff via Transfer List
1376 * Note: FF-A and non-secure entry are not yet supported with
1377 * Transfer List
1378 * a0 - DTB address or 0 (AArch64)
1379 * - must be 0 (AArch32)
1380 * a1 - 1 << 32 | TRANSFER_LIST_SIGNATURE[0:31] (AArch64)
1381 * - 1 << 24 | TRANSFER_LIST_SIGNATURE[0:23] (AArch32)
1382 * a2 - must be 0 (AArch64)
1383 * - DTB address or 0 (AArch32)
1384 * a3 - Transfer list base address
1385 * a4 - Not used
1386 */
1387
1388 if (IS_ENABLED(CFG_TRANSFER_LIST)) {
1389 if (IS_ENABLED(CFG_ARM64_core) &&
1390 a1 == TL_HANDOFF_X1_VALUE(TL_REG_CONVENTION_VER)) {
1391 boot_save_transfer_list(a2, a3, a0);
1392 boot_arg_fdt = a0;
1393 } else if (IS_ENABLED(CFG_ARM32_core) &&
1394 a1 == TL_HANDOFF_R1_VALUE(TL_REG_CONVENTION_VER)) {
1395 boot_save_transfer_list(a0, a3, a2);
1396 boot_arg_fdt = a2;
1397 }
1398
1399 return;
1400 }
1401
1402 if (!IS_ENABLED(CFG_CORE_SEL2_SPMC)) {
1403 #if defined(CFG_DT_ADDR)
1404 boot_arg_fdt = CFG_DT_ADDR;
1405 #else
1406 boot_arg_fdt = a2;
1407 #endif
1408 }
1409
1410 if (IS_ENABLED(CFG_CORE_FFA)) {
1411 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) ||
1412 IS_ENABLED(CFG_CORE_EL3_SPMC))
1413 init_manifest_dt(get_fdt_from_boot_info((void *)a0));
1414 else
1415 init_manifest_dt((void *)a0);
1416 if (IS_ENABLED(CFG_CORE_SEL2_SPMC) &&
1417 IS_ENABLED(CFG_CORE_PHYS_RELOCATABLE)) {
1418 paddr_t base = 0;
1419 size_t size = 0;
1420
1421 get_sec_mem_from_manifest(get_manifest_dt(),
1422 &base, &size);
1423 core_mmu_set_secure_memory(base, size);
1424 }
1425 } else {
1426 if (IS_ENABLED(CFG_WITH_PAGER)) {
1427 #if defined(CFG_PAGEABLE_ADDR)
1428 boot_arg_pageable_part = CFG_PAGEABLE_ADDR;
1429 #else
1430 boot_arg_pageable_part = a0;
1431 #endif
1432 }
1433 if (!IS_ENABLED(CFG_WITH_ARM_TRUSTED_FW)) {
1434 #if defined(CFG_NS_ENTRY_ADDR)
1435 boot_arg_nsec_entry = CFG_NS_ENTRY_ADDR;
1436 #else
1437 boot_arg_nsec_entry = a4;
1438 #endif
1439 }
1440 }
1441 }
1442
1443 #if defined(CFG_TRANSFER_LIST)
release_transfer_list(void)1444 static TEE_Result release_transfer_list(void)
1445 {
1446 struct dt_descriptor *dt = get_external_dt_desc();
1447
1448 if (!mapped_tl)
1449 return TEE_SUCCESS;
1450
1451 if (dt) {
1452 int ret = 0;
1453 struct transfer_list_entry *tl_e = NULL;
1454
1455 /*
1456 * Pack the DTB and update the transfer list before un-mapping
1457 */
1458 ret = fdt_pack(dt->blob);
1459 if (ret < 0) {
1460 EMSG("Failed to pack Device Tree at 0x%" PRIxPA
1461 ": error %d", virt_to_phys(dt->blob), ret);
1462 panic();
1463 }
1464
1465 tl_e = transfer_list_find(mapped_tl, TL_TAG_FDT);
1466 assert(dt->blob == transfer_list_entry_data(tl_e));
1467 transfer_list_set_data_size(mapped_tl, tl_e,
1468 fdt_totalsize(dt->blob));
1469 dt->blob = NULL;
1470 }
1471
1472 transfer_list_unmap_sync(mapped_tl);
1473 mapped_tl = NULL;
1474
1475 return TEE_SUCCESS;
1476 }
1477
1478 boot_final(release_transfer_list);
1479 #endif
1480