1/*
2 * Copyright (c) 2016-2022, ARM Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef EL3_COMMON_MACROS_S
8#define EL3_COMMON_MACROS_S
9
10#include <arch.h>
11#include <asm_macros.S>
12#include <assert_macros.S>
13#include <lib/xlat_tables/xlat_tables_defs.h>
14
15#define PAGE_START_MASK		~(PAGE_SIZE_MASK)
16
17	/*
18	 * Helper macro to initialise EL3 registers we care about.
19	 */
20	.macro el3_arch_init_common
21	/* ---------------------------------------------------------------------
22	 * SCTLR has already been initialised - read current value before
23	 * modifying.
24	 *
25	 * SCTLR.I: Enable the instruction cache.
26	 *
27	 * SCTLR.A: Enable Alignment fault checking. All instructions that load
28	 *  or store one or more registers have an alignment check that the
29	 *  address being accessed is aligned to the size of the data element(s)
30	 *  being accessed.
31	 * ---------------------------------------------------------------------
32	 */
33	ldr	r1, =(SCTLR_I_BIT | SCTLR_A_BIT)
34	ldcopr	r0, SCTLR
35	orr	r0, r0, r1
36	stcopr	r0, SCTLR
37	isb
38
39	/* ---------------------------------------------------------------------
40	 * Initialise SCR, setting all fields rather than relying on the hw.
41	 *
42	 * SCR.SIF: Enabled so that Secure state instruction fetches from
43	 *  Non-secure memory are not permitted.
44	 * ---------------------------------------------------------------------
45	 */
46	ldr	r0, =(SCR_RESET_VAL | SCR_SIF_BIT)
47	stcopr	r0, SCR
48
49	/* -----------------------------------------------------
50	 * Enable the Asynchronous data abort now that the
51	 * exception vectors have been setup.
52	 * -----------------------------------------------------
53	 */
54	cpsie   a
55	isb
56
57	/* ---------------------------------------------------------------------
58	 * Initialise NSACR, setting all the fields, except for the
59	 * IMPLEMENTATION DEFINED field, rather than relying on the hw. Some
60	 * fields are architecturally UNKNOWN on reset.
61	 *
62	 * NSACR_ENABLE_FP_ACCESS: Represents NSACR.cp11 and NSACR.cp10. The
63	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
64	 *  field is set to allow access to Advanced SIMD and floating point
65	 *  features from both Security states.
66	 *
67	 * NSACR.NSTRCDIS: When system register trace implemented, Set to one
68	 *  so that NS System register accesses to all implemented trace
69	 *  registers are disabled.
70	 *  When system register trace is not implemented, this bit is RES0 and
71	 *  hence set to zero.
72	 * ---------------------------------------------------------------------
73	 */
74	ldcopr	r0, NSACR
75	and	r0, r0, #NSACR_IMP_DEF_MASK
76	orr	r0, r0, #(NSACR_RESET_VAL | NSACR_ENABLE_FP_ACCESS)
77	ldcopr	r1, ID_DFR0
78	ubfx	r1, r1, #ID_DFR0_COPTRC_SHIFT, #ID_DFR0_COPTRC_LENGTH
79	cmp	r1, #ID_DFR0_COPTRC_SUPPORTED
80	bne	1f
81	orr	r0, r0, #NSTRCDIS_BIT
821:
83	stcopr	r0, NSACR
84	isb
85
86	/* ---------------------------------------------------------------------
87	 * Initialise CPACR, setting all fields rather than relying on hw. Some
88	 * fields are architecturally UNKNOWN on reset.
89	 *
90	 * CPACR.TRCDIS: Trap control for PL0 and PL1 System register accesses
91	 *  to trace registers. Set to zero to allow access.
92	 *
93	 * CPACR_ENABLE_FP_ACCESS: Represents CPACR.cp11 and CPACR.cp10. The
94	 *  cp11 field is ignored, but is set to same value as cp10. The cp10
95	 *  field is set to allow full access from PL0 and PL1 to floating-point
96	 *  and Advanced SIMD features.
97	 * ---------------------------------------------------------------------
98	 */
99	ldr	r0, =((CPACR_RESET_VAL | CPACR_ENABLE_FP_ACCESS) & ~(TRCDIS_BIT))
100	stcopr	r0, CPACR
101	isb
102
103	/* ---------------------------------------------------------------------
104	 * Initialise FPEXC, setting all fields rather than relying on hw. Some
105	 * fields are architecturally UNKNOWN on reset and are set to zero
106	 * except for field(s) listed below.
107	 *
108	 * FPEXC.EN: Enable access to Advanced SIMD and floating point features
109	 *  from all exception levels.
110         *
111         * __SOFTFP__: Predefined macro exposed by soft-float toolchain.
112         *  ARMv7 and Cortex-A32(ARMv8/aarch32) has both soft-float and
113         *  hard-float variants of toolchain, avoid compiling below code with
114         *  soft-float toolchain as "vmsr" instruction will not be recognized.
115	 * ---------------------------------------------------------------------
116	 */
117#if ((ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_VFP)) && !(__SOFTFP__)
118	ldr	r0, =(FPEXC_RESET_VAL | FPEXC_EN_BIT)
119	vmsr	FPEXC, r0
120	isb
121#endif
122
123#if (ARM_ARCH_MAJOR > 7)
124	/* ---------------------------------------------------------------------
125	 * Initialise SDCR, setting all the fields rather than relying on hw.
126	 *
127	 * SDCR.SPD: Disable AArch32 privileged debug. Debug exceptions from
128	 *  Secure EL1 are disabled.
129	 *
130	 * SDCR.SCCD: Set to one so that cycle counting by PMCCNTR is prohibited
131	 *  in Secure state. This bit is RES0 in versions of the architecture
132	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect on
133	 *  them.
134	 *
135	 * SDCR.TTRF: Set to one so that access to trace filter control
136	 *  registers in non-monitor mode generate Monitor trap exception,
137	 *  unless the access generates a higher priority exception when
138	 *  trace filter control(FEAT_TRF) is implemented.
139	 *  When FEAT_TRF is not implemented, this bit is RES0.
140	 * ---------------------------------------------------------------------
141	 */
142	ldr	r0, =((SDCR_RESET_VAL | SDCR_SPD(SDCR_SPD_DISABLE) | \
143		      SDCR_SCCD_BIT) & ~SDCR_TTRF_BIT)
144	ldcopr	r1, ID_DFR0
145	ubfx	r1, r1, #ID_DFR0_TRACEFILT_SHIFT, #ID_DFR0_TRACEFILT_LENGTH
146	cmp	r1, #ID_DFR0_TRACEFILT_SUPPORTED
147	bne	1f
148	orr	r0, r0, #SDCR_TTRF_BIT
1491:
150	stcopr	r0, SDCR
151
152	/* ---------------------------------------------------------------------
153	 * Initialise PMCR, setting all fields rather than relying
154	 * on hw. Some fields are architecturally UNKNOWN on reset.
155	 *
156	 * PMCR.LP: Set to one so that event counter overflow, that
157	 *  is recorded in PMOVSCLR[0-30], occurs on the increment
158	 *  that changes PMEVCNTR<n>[63] from 1 to 0, when ARMv8.5-PMU
159	 *  is implemented. This bit is RES0 in versions of the architecture
160	 *  earlier than ARMv8.5, setting it to 1 doesn't have any effect
161	 *  on them.
162	 *  This bit is Reserved, UNK/SBZP in ARMv7.
163	 *
164	 * PMCR.LC: Set to one so that cycle counter overflow, that
165	 *  is recorded in PMOVSCLR[31], occurs on the increment
166	 *  that changes PMCCNTR[63] from 1 to 0.
167	 *  This bit is Reserved, UNK/SBZP in ARMv7.
168	 *
169	 * PMCR.DP: Set to one to prohibit cycle counting whilst in Secure mode.
170	 * ---------------------------------------------------------------------
171	 */
172	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT | PMCR_LC_BIT | \
173		      PMCR_LP_BIT)
174#else
175	ldr	r0, =(PMCR_RESET_VAL | PMCR_DP_BIT)
176#endif
177	stcopr	r0, PMCR
178
179	/*
180	 * If Data Independent Timing (DIT) functionality is implemented,
181	 * always enable DIT in EL3
182	 */
183	ldcopr	r0, ID_PFR0
184	and	r0, r0, #(ID_PFR0_DIT_MASK << ID_PFR0_DIT_SHIFT)
185	cmp	r0, #ID_PFR0_DIT_SUPPORTED
186	bne	1f
187	mrs	r0, cpsr
188	orr	r0, r0, #CPSR_DIT_BIT
189	msr	cpsr_cxsf, r0
1901:
191	.endm
192
193/* -----------------------------------------------------------------------------
194 * This is the super set of actions that need to be performed during a cold boot
195 * or a warm boot in EL3. This code is shared by BL1 and BL32 (SP_MIN).
196 *
197 * This macro will always perform reset handling, architectural initialisations
198 * and stack setup. The rest of the actions are optional because they might not
199 * be needed, depending on the context in which this macro is called. This is
200 * why this macro is parameterised ; each parameter allows to enable/disable
201 * some actions.
202 *
203 *  _init_sctlr:
204 *	Whether the macro needs to initialise the SCTLR register including
205 *	configuring the endianness of data accesses.
206 *
207 *  _warm_boot_mailbox:
208 *	Whether the macro needs to detect the type of boot (cold/warm). The
209 *	detection is based on the platform entrypoint address : if it is zero
210 *	then it is a cold boot, otherwise it is a warm boot. In the latter case,
211 *	this macro jumps on the platform entrypoint address.
212 *
213 *  _secondary_cold_boot:
214 *	Whether the macro needs to identify the CPU that is calling it: primary
215 *	CPU or secondary CPU. The primary CPU will be allowed to carry on with
216 *	the platform initialisations, while the secondaries will be put in a
217 *	platform-specific state in the meantime.
218 *
219 *	If the caller knows this macro will only be called by the primary CPU
220 *	then this parameter can be defined to 0 to skip this step.
221 *
222 * _init_memory:
223 *	Whether the macro needs to initialise the memory.
224 *
225 * _init_c_runtime:
226 *	Whether the macro needs to initialise the C runtime environment.
227 *
228 * _exception_vectors:
229 *	Address of the exception vectors to program in the VBAR_EL3 register.
230 *
231 * _pie_fixup_size:
232 *	Size of memory region to fixup Global Descriptor Table (GDT).
233 *
234 *	A non-zero value is expected when firmware needs GDT to be fixed-up.
235 *
236 * -----------------------------------------------------------------------------
237 */
238	.macro el3_entrypoint_common					\
239		_init_sctlr, _warm_boot_mailbox, _secondary_cold_boot,	\
240		_init_memory, _init_c_runtime, _exception_vectors,	\
241		_pie_fixup_size
242
243	/* Make sure we are in Secure Mode */
244#if ENABLE_ASSERTIONS
245	ldcopr	r0, SCR
246	tst	r0, #SCR_NS_BIT
247	ASM_ASSERT(eq)
248#endif
249
250	.if \_init_sctlr
251		/* -------------------------------------------------------------
252		 * This is the initialisation of SCTLR and so must ensure that
253		 * all fields are explicitly set rather than relying on hw. Some
254		 * fields reset to an IMPLEMENTATION DEFINED value.
255		 *
256		 * SCTLR.TE: Set to zero so that exceptions to an Exception
257		 *  Level executing at PL1 are taken to A32 state.
258		 *
259		 * SCTLR.EE: Set the CPU endianness before doing anything that
260		 *  might involve memory reads or writes. Set to zero to select
261		 *  Little Endian.
262		 *
263		 * SCTLR.V: Set to zero to select the normal exception vectors
264		 *  with base address held in VBAR.
265		 *
266		 * SCTLR.DSSBS: Set to zero to disable speculation store bypass
267		 *  safe behaviour upon exception entry to EL3.
268		 * -------------------------------------------------------------
269		 */
270		ldr     r0, =(SCTLR_RESET_VAL & ~(SCTLR_TE_BIT | SCTLR_EE_BIT | \
271				SCTLR_V_BIT | SCTLR_DSSBS_BIT))
272		stcopr	r0, SCTLR
273		isb
274	.endif /* _init_sctlr */
275
276	/* Switch to monitor mode */
277	cps	#MODE32_mon
278	isb
279
280#if DISABLE_MTPMU
281	bl	mtpmu_disable
282#endif
283
284	.if \_warm_boot_mailbox
285		/* -------------------------------------------------------------
286		 * This code will be executed for both warm and cold resets.
287		 * Now is the time to distinguish between the two.
288		 * Query the platform entrypoint address and if it is not zero
289		 * then it means it is a warm boot so jump to this address.
290		 * -------------------------------------------------------------
291		 */
292		bl	plat_get_my_entrypoint
293		cmp	r0, #0
294		bxne	r0
295	.endif /* _warm_boot_mailbox */
296
297	.if \_pie_fixup_size
298#if ENABLE_PIE
299		/*
300		 * ------------------------------------------------------------
301		 * If PIE is enabled fixup the Global descriptor Table only
302		 * once during primary core cold boot path.
303		 *
304		 * Compile time base address, required for fixup, is calculated
305		 * using "pie_fixup" label present within first page.
306		 * ------------------------------------------------------------
307		 */
308	pie_fixup:
309		ldr	r0, =pie_fixup
310		ldr	r1, =PAGE_START_MASK
311		and	r0, r0, r1
312		mov_imm	r1, \_pie_fixup_size
313		add	r1, r1, r0
314		bl	fixup_gdt_reloc
315#endif /* ENABLE_PIE */
316	.endif /* _pie_fixup_size */
317
318	/* ---------------------------------------------------------------------
319	 * Set the exception vectors (VBAR/MVBAR).
320	 * ---------------------------------------------------------------------
321	 */
322	ldr	r0, =\_exception_vectors
323	stcopr	r0, VBAR
324	stcopr	r0, MVBAR
325	isb
326
327	/* ---------------------------------------------------------------------
328	 * It is a cold boot.
329	 * Perform any processor specific actions upon reset e.g. cache, TLB
330	 * invalidations etc.
331	 * ---------------------------------------------------------------------
332	 */
333	bl	reset_handler
334
335	el3_arch_init_common
336
337	.if \_secondary_cold_boot
338		/* -------------------------------------------------------------
339		 * Check if this is a primary or secondary CPU cold boot.
340		 * The primary CPU will set up the platform while the
341		 * secondaries are placed in a platform-specific state until the
342		 * primary CPU performs the necessary actions to bring them out
343		 * of that state and allows entry into the OS.
344		 * -------------------------------------------------------------
345		 */
346		bl	plat_is_my_cpu_primary
347		cmp	r0, #0
348		bne	do_primary_cold_boot
349
350		/* This is a cold boot on a secondary CPU */
351		bl	plat_secondary_cold_boot_setup
352		/* plat_secondary_cold_boot_setup() is not supposed to return */
353		no_ret	plat_panic_handler
354
355	do_primary_cold_boot:
356	.endif /* _secondary_cold_boot */
357
358	/* ---------------------------------------------------------------------
359	 * Initialize memory now. Secondary CPU initialization won't get to this
360	 * point.
361	 * ---------------------------------------------------------------------
362	 */
363
364	.if \_init_memory
365		bl	platform_mem_init
366	.endif /* _init_memory */
367
368	/* ---------------------------------------------------------------------
369	 * Init C runtime environment:
370	 *   - Zero-initialise the NOBITS sections. There are 2 of them:
371	 *       - the .bss section;
372	 *       - the coherent memory section (if any).
373	 *   - Relocate the data section from ROM to RAM, if required.
374	 * ---------------------------------------------------------------------
375	 */
376	.if \_init_c_runtime
377#if defined(IMAGE_BL32) || (defined(IMAGE_BL2) && BL2_AT_EL3)
378		/* -----------------------------------------------------------------
379		 * Invalidate the RW memory used by the image. This
380		 * includes the data and NOBITS sections. This is done to
381		 * safeguard against possible corruption of this memory by
382		 * dirty cache lines in a system cache as a result of use by
383		 * an earlier boot loader stage. If PIE is enabled however,
384		 * RO sections including the GOT may be modified during
385		 * pie fixup. Therefore, to be on the safe side, invalidate
386		 * the entire image region if PIE is enabled.
387		 * -----------------------------------------------------------------
388		 */
389#if ENABLE_PIE
390#if SEPARATE_CODE_AND_RODATA
391		ldr	r0, =__TEXT_START__
392#else
393		ldr	r0, =__RO_START__
394#endif /* SEPARATE_CODE_AND_RODATA */
395#else
396		ldr	r0, =__RW_START__
397#endif /* ENABLE_PIE */
398		ldr	r1, =__RW_END__
399		sub	r1, r1, r0
400		bl	inv_dcache_range
401#if defined(IMAGE_BL2) && SEPARATE_BL2_NOLOAD_REGION
402		ldr	r0, =__BL2_NOLOAD_START__
403		ldr	r1, =__BL2_NOLOAD_END__
404		sub	r1, r1, r0
405		bl	inv_dcache_range
406#endif
407#endif
408
409		/*
410		 * zeromem uses r12 whereas it is used to save previous BL arg3,
411		 * save it in r7
412		 */
413		mov	r7, r12
414		ldr	r0, =__BSS_START__
415		ldr	r1, =__BSS_END__
416		sub 	r1, r1, r0
417		bl	zeromem
418
419#if USE_COHERENT_MEM
420		ldr	r0, =__COHERENT_RAM_START__
421		ldr	r1, =__COHERENT_RAM_END_UNALIGNED__
422		sub 	r1, r1, r0
423		bl	zeromem
424#endif
425
426		/* Restore r12 */
427		mov	r12, r7
428
429#if defined(IMAGE_BL1) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_IN_XIP_MEM)
430		/* -----------------------------------------------------
431		 * Copy data from ROM to RAM.
432		 * -----------------------------------------------------
433		 */
434		ldr	r0, =__DATA_RAM_START__
435		ldr	r1, =__DATA_ROM_START__
436		ldr	r2, =__DATA_RAM_END__
437		sub 	r2, r2, r0
438		bl	memcpy4
439#endif
440	.endif /* _init_c_runtime */
441
442	/* ---------------------------------------------------------------------
443	 * Allocate a stack whose memory will be marked as Normal-IS-WBWA when
444	 * the MMU is enabled. There is no risk of reading stale stack memory
445	 * after enabling the MMU as only the primary CPU is running at the
446	 * moment.
447	 * ---------------------------------------------------------------------
448	 */
449	bl	plat_set_my_stack
450
451#if STACK_PROTECTOR_ENABLED
452	.if \_init_c_runtime
453	bl	update_stack_protector_canary
454	.endif /* _init_c_runtime */
455#endif
456	.endm
457
458#endif /* EL3_COMMON_MACROS_S */
459