1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 * Copyright 2021, HENSOLDT Cyber
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 */
7
8#include <config.h>
9#include <machine/assembler.h>
10#include <arch/machine/hardware.h>
11#include <arch/machine/registerset.h>
12#include <util.h>
13
14#ifndef ALLOW_UNALIGNED_ACCESS
15#define ALLOW_UNALIGNED_ACCESS 1
16#endif
17
18#if ALLOW_UNALIGNED_ACCESS
19#define CR_ALIGN_SET     0
20#define CR_ALIGN_CLEAR   BIT(CONTROL_A)
21#else
22#define CR_ALIGN_SET     BIT(CONTROL_A)
23#define CR_ALIGN_CLEAR   0
24#endif
25
26#ifndef CONFIG_DEBUG_DISABLE_L1_ICACHE
27    #define CR_L1_ICACHE_SET   BIT(CONTROL_I)
28    #define CR_L1_ICACHE_CLEAR 0
29#else
30    #define CR_L1_ICACHE_SET   0
31    #define CR_L1_ICACHE_CLEAR BIT(CONTROL_I)
32#endif
33
34#ifndef CONFIG_DEBUG_DISABLE_L1_DCACHE
35    #define CR_L1_DCACHE_SET   BIT(CONTROL_C)
36    #define CR_L1_DCACHE_CLEAR 0
37#else
38    #define CR_L1_DCACHE_SET   0
39    #define CR_L1_DCACHE_CLEAR BIT(CONTROL_C)
40#endif
41
42#define CR_BITS_SET    (CR_ALIGN_SET | \
43                        CR_L1_ICACHE_SET | \
44                        CR_L1_DCACHE_SET | \
45                        BIT(CONTROL_M))
46
47#define CR_BITS_CLEAR  (CR_ALIGN_CLEAR | \
48                        CR_L1_ICACHE_CLEAR | \
49                        CR_L1_DCACHE_CLEAR | \
50                        BIT(CONTROL_SA0) | \
51                        BIT(CONTROL_EE) | \
52                        BIT(CONTROL_E0E))
53
54/*
55 * Entry point of the kernel ELF image.
56 * X0-X5 contain parameters that are passed to init_kernel().
57 *
58 * Note that for SMP kernel, the tpidr_el1 is used to pass
59 * the logical core ID.
60 */
61
62#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
63#define SCTLR   sctlr_el2
64#else
65#define SCTLR   sctlr_el1
66#endif
67
68.section .boot.text
69BEGIN_FUNC(_start)
70    /* Save x4 and x5 so we don't clobber it */
71    mov     x7, x4
72    mov     x8, x5
73
74    /* Make sure interrupts are disable */
75    msr daifset, #DAIFSET_MASK
76
77    /* Initialise sctlr_el1 or sctlr_el2 register */
78    msr     spsel, #1
79    mrs     x4, SCTLR
80    ldr     x19, =CR_BITS_SET
81    ldr     x20, =CR_BITS_CLEAR
82    orr     x4, x4, x19
83    bic     x4, x4, x20
84    msr     SCTLR, x4
85
86#ifdef ENABLE_SMP_SUPPORT
87    /* tpidr_el1 has the logic ID of the core, starting from 0 */
88    mrs     x6, tpidr_el1
89    /* Set the sp for each core assuming linear indices */
90    ldr     x5, =BIT(CONFIG_KERNEL_STACK_BITS)
91    mul     x5, x5, x6
92    ldr     x4, =kernel_stack_alloc + BIT(CONFIG_KERNEL_STACK_BITS)
93    add     x4, x4, x5
94    mov     sp, x4
95    /* the kernel stack must be 4-KiB aligned since we use the
96       lowest 12 bits to store the logical core ID. */
97    orr     x6, x6, x4
98#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
99    msr     tpidr_el2, x6
100#else
101    msr     tpidr_el1, x6
102#endif
103#else
104    ldr    x4, =kernel_stack_alloc + BIT(CONFIG_KERNEL_STACK_BITS)
105    mov    sp, x4
106#endif /* ENABLE_SMP_SUPPORT */
107
108    /* Attempt to workaround any known ARM errata. */
109    stp     x0, x1, [sp, #-16]!
110    stp     x2, x3, [sp, #-16]!
111    stp     x7, x8, [sp, #-16]!
112    bl arm_errata
113    ldp     x4, x5, [sp], #16
114    ldp     x2, x3, [sp], #16
115    ldp     x0, x1, [sp], #16
116
117    /* Call bootstrapping implemented in C with parameters:
118     *  x0: user image physical start address
119     *  x1: user image physical end address
120     *  x2: physical/virtual offset
121     *  x3: user image virtual entry address
122     *  x4: DTB physical address (0 if there is none)
123     *  x5: DTB size (0 if there is none)
124     */
125    bl      init_kernel
126
127    /* Restore the initial thread. Note that the function restore_user_context()
128     * could technically also be called at the end of init_kernel() directly,
129     * there is no need to return to the assembly code here at all. However, for
130     * verification things are a lot easier when init_kernel() is a normal C
131     * function that returns. The function restore_user_context() is not a
132     * normal C function and thus handled specially in verification, it does
133     * highly architecture specific things to exit to user mode.
134     */
135    b restore_user_context
136
137END_FUNC(_start)
138