1/*
2 * Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#include <zephyr/toolchain.h>
8#include <zephyr/linker/sections.h>
9#include <zephyr/arch/cpu.h>
10#include <zephyr/offsets.h>
11#include <zephyr/devicetree.h>
12#include "boot.h"
13#include "macro_priv.inc"
14
15_ASM_FILE_PROLOGUE
16
17/*
18 * Platform specific pre-C init code
19 *
20 * Note: - Stack is not yet available
21 *       - x23, x24 and x25 must be preserved
22 */
23
24WTEXT(z_arm64_el3_plat_prep_c)
25SECTION_FUNC(TEXT,z_arm64_el3_plat_prep_c)
26	ret
27
28WTEXT(z_arm64_el2_plat_prep_c)
29SECTION_FUNC(TEXT,z_arm64_el2_plat_prep_c)
30	ret
31
32WTEXT(z_arm64_el1_plat_prep_c)
33SECTION_FUNC(TEXT,z_arm64_el1_plat_prep_c)
34	ret
35
36/*
37 * Set the minimum necessary to safely call C code
38 */
39
40GTEXT(__reset_prep_c)
41SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset_prep_c)
42	/* return address: x23 */
43	mov	x23, lr
44
45	switch_el x0, 3f, 2f, 1f
463:
47#if !defined(CONFIG_ARMV8_R)
48	/* Reinitialize SCTLR from scratch in EL3 */
49	ldr	w0, =(SCTLR_EL3_RES1 | SCTLR_I_BIT | SCTLR_SA_BIT)
50	msr	sctlr_el3, x0
51	isb
52
53	/* Custom plat prep_c init */
54	bl	z_arm64_el3_plat_prep_c
55
56	/* Set SP_EL1 */
57	msr     sp_el1, x24
58
59	b	out
60#endif /* CONFIG_ARMV8_R */
612:
62	/* Disable alignment fault checking */
63	mrs	x0, sctlr_el2
64	bic	x0, x0, SCTLR_A_BIT
65	msr	sctlr_el2, x0
66	isb
67
68	/* Custom plat prep_c init */
69	bl	z_arm64_el2_plat_prep_c
70
71	/* Set SP_EL1 */
72	msr     sp_el1, x24
73
74	b	out
751:
76	/* Disable alignment fault checking */
77	mrs	x0, sctlr_el1
78	bic	x0, x0, SCTLR_A_BIT
79	msr	sctlr_el1, x0
80	isb
81
82	/* Custom plat prep_c init */
83	bl	z_arm64_el1_plat_prep_c
84
85	/* Set SP_EL1. We cannot use sp_el1 at EL1 */
86	msr     SPSel, #1
87	mov     sp, x24
88out:
89	isb
90
91	/* Select SP_EL0 */
92	msr	SPSel, #0
93
94	/* Initialize stack */
95	mov	sp, x24
96
97	/* fp = NULL */
98	mov	fp, xzr
99
100	ret	x23
101
102/*
103 * Reset vector
104 *
105 * Ran when the system comes out of reset. The processor is in thread mode with
106 * privileged level. At this point, neither SP_EL0 nor SP_ELx point to a valid
107 * area in SRAM.
108 */
109
110GTEXT(__reset)
111SECTION_SUBSEC_FUNC(TEXT,_reset_section,__reset)
112
113GTEXT(__start)
114SECTION_SUBSEC_FUNC(TEXT,_reset_section,__start)
115
116#ifdef CONFIG_WAIT_AT_RESET_VECTOR
117resetwait:
118	wfe
119	b       resetwait
120#endif
121
122	/* Mask all exceptions */
123	msr	DAIFSet, #0xf
124
125#if CONFIG_MP_MAX_NUM_CPUS > 1
126
127	/*
128	 * Deal with multi core booting simultaneously to race for being the primary core.
129	 * Use voting lock[1] with reasonable but minimal requirements on the memory system
130	 * to make sure only one core wins at last.
131	 *
132	 * [1] kernel.org/doc/html/next/arch/arm/vlocks.html
133	 */
134	ldr	x0, =arm64_cpu_boot_params
135
136	/*
137	 * Get the "logic" id defined by cpu_node_list statically for voting lock self-identify.
138	 * It is worth noting that this is NOT the final logic id (arch_curr_cpu()->id)
139	 */
140	get_cpu_logic_id	x1, x2, x3, x4	//x1: MPID, x2: logic id
141
142	add	x4, x0, #BOOT_PARAM_VOTING_OFFSET
143
144	/* signal our desire to vote */
145	mov	w5, #1
146	strb	w5, [x4, x2]
147	ldr	x3, [x0, #BOOT_PARAM_MPID_OFFSET]
148	cmn	x3, #1
149	beq	1f
150
151	/* some core already won, release */
152	strb	wzr, [x4, x2]
153	b	secondary_core
154
155	/* suggest current core then release */
1561:	str	x1, [x0, #BOOT_PARAM_MPID_OFFSET]
157	strb	wzr, [x4, x2]
158	dmb	ish
159
160	/* then wait until every core else is done voting */
161	mov	x5, #0
1622:	ldrb	w3, [x4, x5]
163	tst	w3, #255
164	/* wait */
165	bne	2b
166	add	x5, x5, #1
167	cmp	x5, #DT_CHILD_NUM_STATUS_OKAY(DT_PATH(cpus))
168	bne	2b
169
170
171	/* check if current core won */
172	dmb	ish
173	ldr	x3, [x0, #BOOT_PARAM_MPID_OFFSET]
174	cmp	x3, x1
175	beq	primary_core
176	/* fallthrough secondary */
177
178	/* loop until our turn comes */
179secondary_core:
180	dmb	ish
181	ldr	x2, [x0, #BOOT_PARAM_MPID_OFFSET]
182	cmp	x1, x2
183	bne	secondary_core
184
185	/* we can now load our stack pointer value and move on */
186	ldr	x24, [x0, #BOOT_PARAM_SP_OFFSET]
187	ldr	x25, =z_arm64_secondary_prep_c
188	b	boot
189
190primary_core:
191#endif
192	/* load primary stack and entry point */
193	ldr	x24, =(z_interrupt_stacks + __z_interrupt_stack_SIZEOF)
194	ldr	x25, =z_prep_c
195boot:
196	/* Prepare for calling C code */
197	bl	__reset_prep_c
198
199	/*
200	 * Initialize the interrupt stack with 0xaa so stack utilization
201	 * can be measured. This needs to be done before using the stack
202	 * so that we don't clobber any data.
203	 */
204#ifdef CONFIG_INIT_STACKS
205	mov_imm	x0, CONFIG_ISR_STACK_SIZE
206	sub	x0, sp, x0
207	sub     x9, sp, #8
208	mov     x10, 0xaaaaaaaaaaaaaaaa
209stack_init_loop:
210	cmp     x0, x9
211	beq     stack_init_done
212	str     x10, [x0], #8
213	b       stack_init_loop
214stack_init_done:
215#endif
216
217	/* Platform hook for highest EL */
218	bl	z_arm64_el_highest_init
219
220switch_el:
221	switch_el x0, 3f, 2f, 1f
222
2233:
224#if !defined(CONFIG_ARMV8_R)
225	/* EL3 init */
226	bl	z_arm64_el3_init
227
228	/* Get next EL */
229	adr	x0, switch_el
230	bl	z_arm64_el3_get_next_el
231	eret
232#endif /* CONFIG_ARMV8_R */
233
2342:
235	/* EL2 init */
236	bl	z_arm64_el2_init
237
238	/* Move to EL1 with all exceptions masked */
239	mov_imm	x0, (SPSR_DAIF_MASK | SPSR_MODE_EL1T)
240	msr	spsr_el2, x0
241
242	adr	x0, 1f
243	msr	elr_el2, x0
244	eret
245
2461:
247	/* EL1 init */
248	bl	z_arm64_el1_init
249
250	/* We want to use SP_ELx from now on */
251	msr	SPSel, #1
252
253	/* Enable SError interrupts */
254	msr	DAIFClr, #(DAIFCLR_ABT_BIT)
255	isb
256
257	ret	x25  /* either z_prep_c or z_arm64_secondary_prep_c */
258