1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 * Copyright 2021, HENSOLDT Cyber
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 */
7
8#include <config.h>
9#include <machine/assembler.h>
10#include <arch/machine/hardware.h>
11#include <arch/machine/registerset.h>
12#include <util.h>
13
14#ifndef ALLOW_UNALIGNED_ACCESS
15#define ALLOW_UNALIGNED_ACCESS 1
16#endif
17
18#if ALLOW_UNALIGNED_ACCESS
19#define CR_ALIGN_SET     BIT(CONTROL_U)
20#define CR_ALIGN_CLEAR   BIT(CONTROL_A)
21#else
22#define CR_ALIGN_SET     BIT(CONTROL_A)
23#define CR_ALIGN_CLEAR   BIT(CONTROL_U)
24#endif
25
26#ifndef CONFIG_DEBUG_DISABLE_L1_ICACHE
27    #define CR_L1_ICACHE_SET   BIT(CONTROL_I)
28    #define CR_L1_ICACHE_CLEAR 0
29#else
30    #define CR_L1_ICACHE_SET   0
31    #define CR_L1_ICACHE_CLEAR BIT(CONTROL_I)
32#endif
33
34#ifndef CONFIG_DEBUG_DISABLE_L1_DCACHE
35    #define CR_L1_DCACHE_SET   BIT(CONTROL_C)
36    #define CR_L1_DCACHE_CLEAR 0
37#else
38    #define CR_L1_DCACHE_SET   0
39    #define CR_L1_DCACHE_CLEAR BIT(CONTROL_C)
40#endif
41
42#ifndef CONFIG_DEBUG_DISABLE_BRANCH_PREDICTION
43    #define CR_BRANCH_PREDICTION_SET   BIT(CONTROL_Z)
44    #define CR_BRANCH_PREDICTION_CLEAR 0
45#else
46    #define CR_BRANCH_PREDICTION_SET   0
47    #define CR_BRANCH_PREDICTION_CLEAR BIT(CONTROL_Z)
48#endif
49
50#define CR_BITS_SET    (CR_ALIGN_SET | \
51                        CR_L1_ICACHE_SET | \
52                        CR_L1_DCACHE_SET | \
53                        BIT(CONTROL_M) | \
54                        CR_BRANCH_PREDICTION_SET | \
55                        BIT(CONTROL_V) | \
56                        BIT(CONTROL_XP))
57
58#define CR_BITS_CLEAR  (CR_ALIGN_CLEAR | \
59                        CR_L1_ICACHE_CLEAR | \
60                        CR_L1_DCACHE_CLEAR | \
61                        CR_BRANCH_PREDICTION_CLEAR | \
62                        BIT(CONTROL_B) | \
63                        BIT(CONTROL_S) | \
64                        BIT(CONTROL_R) | \
65                        BIT(CONTROL_VE) | \
66                        BIT(CONTROL_RR) | \
67                        BIT(CONTROL_EE) | \
68                        BIT(CONTROL_TRE) | \
69                        BIT(CONTROL_AP))
70
71/*
72 * Entry point of the kernel ELF image.
73 * R0-R3 contain parameters that are passed to init_kernel(),
74 * and we put arguments 5 and 6 (DTB address/size) in r7 and r8.
75 */
76
77.code 32
78.section .boot.text, "ax"
79BEGIN_FUNC(_start)
80    /*
81     * Get the dtb and dtb size from the elfloader stack. Do this first because
82     * sp might change when we switch to supervisor mode.
83     */
84    pop {r7, r8}
85
86    /* Supervisor/hypervisor mode, interrupts disabled */
87    ldr r5, =CPSR_KERNEL
88    msr cpsr_fc, r5
89
90    /* Initialise CP15 control register */
91#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
92    mrc p15, 4, r4, c1, c0, 0
93#else
94    mrc p15, 0, r4, c1, c0, 0
95#endif
96    ldr r5, =CR_BITS_SET
97    ldr r6, =CR_BITS_CLEAR
98    orr r4, r4, r5
99    bic r4, r4, r6
100#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
101    mcr p15, 4, r4, c1, c0, 0
102
103    /* Initialise vector base */
104    ldr r4, =PPTR_VECTOR_TABLE
105    mcr p15, 4, r4, c12, c0, 0
106#else
107    mcr p15, 0, r4, c1, c0, 0
108#endif
109
110#if defined(CONFIG_ARM_CORTEX_A9) && defined(CONFIG_ENABLE_A9_PREFETCHER)
111    /* Set bit 2 in the ACTLR, which on the cortex-a9 is the l1 prefetch enable
112     * bit. See section 4.3.10 of the Cortex-A9 Technical Reference Manual */
113    mrc p15, 0, r4, c1, c0, 1
114    ldr r5, =BIT(2)
115    orr r4, r4, r5
116    mcr p15, 0, r4, c1, c0, 1
117#endif
118
119#if defined(CONFIG_PLAT_HIKEY)
120    /* Prefetcher configuration */
121   mrrc p15, 0, r4, r5, c15
122   ldr r6, =PREFETCHER_MASK
123   bic r4, r4, r6
124   ldr r6, =PREFETCHER
125   orr r4, r4, r6
126   mcrr p15, 0, r4, r5, c15
127#endif
128
129     /* Load kernel stack pointer
130      * On ARM SMP, kernel_stack_alloc is indexed by CPU ID
131      * to get different stacks for each core
132      */
133    ldr sp, =kernel_stack_alloc + BIT(CONFIG_KERNEL_STACK_BITS)
134
135#ifdef ENABLE_SMP_SUPPORT
136    /*
137     * Read MPIDR in r4
138     * See ARM Referce Manual (ARMv7-A and ARMv7-R edition), Section B4.1.106
139     * for more details about MPIDR register.
140     */
141    mrc p15, 0, r4, c0, c0, 5
142    and r4, r4, #0xff
143    /* Set the sp for each core assuming linear indices */
144    ldr     r5, =BIT(CONFIG_KERNEL_STACK_BITS)
145    mul     r5, r4
146    add     sp, sp, r5
147#endif /* ENABLE_SMP_SUPPORT */
148
149    /* Attempt to workaround any known ARM errata. */
150    push {r0-r3,r7-r8}
151    blx arm_errata
152    pop {r0-r3,r7-r8}
153
154    /* Hyp kernel always run in Hyp mode. */
155#ifndef CONFIG_ARM_HYPERVISOR_SUPPORT
156    /* Initialise ABORT stack pointer */
157    cps #PMODE_ABORT
158    ldr sp, =_breakpoint_stack_top
159    cps #PMODE_SUPERVISOR
160#endif
161
162    /* Put the DTB address back on the new stack for init_kernel. */
163    push {r7, r8}
164
165    /* Call bootstrapping implemented in C with parameters:
166     *   r0: user image physical start address
167     *   r1: user image physical end address
168     *   r2: physical/virtual offset
169     *   r3: user image virtual entry address
170     *   sp[0]: DTB physical address (0 if there is none)
171     *   sp[1]: DTB size (0 if there is none)
172     */
173    blx init_kernel
174
175    /* Restore the initial thread. Note that the function restore_user_context()
176     * could technically also be called at the end of init_kernel() directly,
177     * there is no need to return to the assembly code here at all. However, for
178     * verification things are a lot easier when init_kernel() is a normal C
179     * function that returns. The function restore_user_context() is not a
180     * normal C function and thus handled specially in verification, it does
181     * highly architecture specific things to exit to user mode.
182     */
183    b restore_user_context
184
185END_FUNC(_start)
186