1 /*
2  * Copyright (c) 2024 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <kernel_internal.h>
8 #include <zephyr/platform/hooks.h>
9 #include <zephyr/arch/cache.h>
10 
11 extern FUNC_NORETURN void z_cstart(void);
12 
13 /* defined by the SoC in case of CONFIG_SOC_HAS_RUNTIME_NUM_CPUS=y */
14 extern void soc_num_cpus_init(void);
15 
16 /* Make sure the platform configuration matches what the toolchain
17  * thinks the hardware is doing.
18  */
19 #ifdef CONFIG_DCACHE_LINE_SIZE
20 BUILD_ASSERT(CONFIG_DCACHE_LINE_SIZE == XCHAL_DCACHE_LINESIZE);
21 #endif
22 
23 /**
24  *
25  * @brief Prepare to and run C code
26  *
27  * This routine prepares for the execution of and runs C code.
28  *
29  */
z_prep_c(void)30 void z_prep_c(void)
31 {
32 #if defined(CONFIG_SOC_PREP_HOOK)
33 	soc_prep_hook();
34 #endif
35 #if CONFIG_SOC_HAS_RUNTIME_NUM_CPUS
36 	soc_num_cpus_init();
37 #endif
38 
39 	_cpu_t *cpu0 = &_kernel.cpus[0];
40 
41 #ifdef CONFIG_KERNEL_COHERENCE
42 	/* Make sure we don't have live data for unexpected cached
43 	 * regions due to boot firmware
44 	 */
45 	sys_cache_data_flush_and_invd_all();
46 
47 #if !defined(CONFIG_SCHED_CPU_MASK_PIN_ONLY)
48 	/* Our cache top stash location might have junk in it from a
49 	 * pre-boot environment.  Must be zero or valid!
50 	 */
51 	XTENSA_WSR(ZSR_FLUSH_STR, 0);
52 #endif
53 #endif
54 
55 	cpu0->nested = 0;
56 
57 	/* The asm2 scheme keeps the kernel pointer in a scratch SR
58 	 * (see zsr.h for generation specifics) for easy access.  That
59 	 * saves 4 bytes of immediate value to store the address when
60 	 * compared to the legacy scheme.  But in SMP this record is a
61 	 * per-CPU thing and having it stored in a SR already is a big
62 	 * win.
63 	 */
64 	XTENSA_WSR(ZSR_CPU_STR, cpu0);
65 
66 #ifdef CONFIG_INIT_STACKS
67 	char *stack_start = K_KERNEL_STACK_BUFFER(z_interrupt_stacks[0]);
68 	size_t stack_sz = K_KERNEL_STACK_SIZEOF(z_interrupt_stacks[0]);
69 	char *stack_end = stack_start + stack_sz;
70 
71 	uint32_t sp;
72 
73 	__asm__ volatile("mov %0, sp" : "=a"(sp));
74 
75 	/* Only clear the interrupt stack if the current stack pointer
76 	 * is not within the interrupt stack. Or else we would be
77 	 * wiping the in-use stack.
78 	 */
79 	if (((uintptr_t)sp < (uintptr_t)stack_start) ||
80 	    ((uintptr_t)sp >= (uintptr_t)stack_end)) {
81 		memset(stack_start, 0xAA, stack_sz);
82 	}
83 #endif
84 #if CONFIG_ARCH_CACHE
85 	arch_cache_init();
86 #endif
87 
88 #ifdef CONFIG_XTENSA_MMU
89 	xtensa_mmu_init();
90 #endif
91 
92 #ifdef CONFIG_XTENSA_MPU
93 	xtensa_mpu_init();
94 #endif
95 	z_cstart();
96 	CODE_UNREACHABLE;
97 }
98