1 /*
2  * Copyright (c) 2014-2016 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <lk/debug.h>
9 #include <stdlib.h>
10 #include <arch.h>
11 #include <arch/atomic.h>
12 #include <arch/ops.h>
13 #include <arch/arm64.h>
14 #include <arch/arm64/mmu.h>
15 #include <arch/mp.h>
16 #include <kernel/thread.h>
17 #include <lk/init.h>
18 #include <lk/main.h>
19 #include <platform.h>
20 #include <lk/trace.h>
21 
22 #define LOCAL_TRACE 0
23 
24 #if WITH_SMP
25 /* smp boot lock */
26 static spin_lock_t arm_boot_cpu_lock = 1;
27 static volatile int secondaries_to_init = 0;
28 #endif
29 
arm64_cpu_early_init(void)30 static void arm64_cpu_early_init(void) {
31     /* set the vector base */
32     ARM64_WRITE_SYSREG(VBAR_EL1, (uint64_t)&arm64_exception_base);
33 
34     /* switch to EL1 */
35     unsigned int current_el = ARM64_READ_SYSREG(CURRENTEL) >> 2;
36     if (current_el > 1) {
37         arm64_el3_to_el1();
38     }
39 
40     arch_enable_fiqs();
41 }
42 
arch_early_init(void)43 void arch_early_init(void) {
44     arm64_cpu_early_init();
45     platform_init_mmu_mappings();
46 }
47 
arch_stacktrace(uint64_t fp,uint64_t pc)48 void arch_stacktrace(uint64_t fp, uint64_t pc)
49 {
50     struct arm64_stackframe frame;
51 
52     if (!fp) {
53         frame.fp = (uint64_t)__builtin_frame_address(0);
54         frame.pc = (uint64_t)arch_stacktrace;
55     } else {
56         frame.fp = fp;
57         frame.pc = pc;
58     }
59 
60     printf("stack trace:\n");
61     while (frame.fp) {
62         printf("0x%llx\n", frame.pc);
63 
64         /* Stack frame pointer should be 16 bytes aligned */
65         if (frame.fp & 0xF)
66             break;
67 
68         frame.pc = *((uint64_t *)(frame.fp + 8));
69         frame.fp = *((uint64_t *)frame.fp);
70     }
71 }
72 
arch_init(void)73 void arch_init(void) {
74 #if WITH_SMP
75     arch_mp_init_percpu();
76 
77     LTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1));
78 
79     secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
80 
81     lk_init_secondary_cpus(secondaries_to_init);
82 
83     LTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
84 
85     /* release the secondary cpus */
86     spin_unlock(&arm_boot_cpu_lock);
87 
88     /* flush the release of the lock, since the secondary cpus are running without cache on */
89     arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock));
90 #endif
91 }
92 
arch_quiesce(void)93 void arch_quiesce(void) {
94 }
95 
arch_idle(void)96 void arch_idle(void) {
97     __asm__ volatile("wfi");
98 }
99 
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)100 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
101     PANIC_UNIMPLEMENTED;
102 }
103 
104 /* switch to user mode, set the user stack pointer to user_stack_top, put the svc stack pointer to the top of the kernel stack */
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)105 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
106     DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 16));
107 
108     thread_t *ct = get_current_thread();
109 
110     vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
111     kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
112 
113     /* set up a default spsr to get into 64bit user space:
114      * zeroed NZCV
115      * no SS, no IL, no D
116      * all interrupts enabled
117      * mode 0: EL0t
118      */
119     uint32_t spsr = 0;
120 
121     arch_disable_ints();
122 
123     asm volatile(
124         "mov    sp, %[kstack];"
125         "msr    sp_el0, %[ustack];"
126         "msr    elr_el1, %[entry];"
127         "msr    spsr_el1, %[spsr];"
128         "eret;"
129         :
130         : [ustack]"r"(user_stack_top),
131         [kstack]"r"(kernel_stack_top),
132         [entry]"r"(entry_point),
133         [spsr]"r"(spsr)
134         : "memory");
135     __UNREACHABLE;
136 }
137 
138 #if WITH_SMP
139 /* called from assembly */
140 void arm64_secondary_entry(ulong);
arm64_secondary_entry(ulong asm_cpu_num)141 void arm64_secondary_entry(ulong asm_cpu_num) {
142     uint cpu = arch_curr_cpu_num();
143     if (cpu != asm_cpu_num)
144         return;
145 
146     arm64_cpu_early_init();
147 
148     spin_lock(&arm_boot_cpu_lock);
149     spin_unlock(&arm_boot_cpu_lock);
150 
151     /* run early secondary cpu init routines up to the threading level */
152     lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
153 
154     arch_mp_init_percpu();
155 
156     LTRACEF("cpu num %d\n", cpu);
157 
158     /* we're done, tell the main cpu we're up */
159     atomic_add(&secondaries_to_init, -1);
160     __asm__ volatile("sev");
161 
162     lk_secondary_cpu_entry();
163 }
164 #endif
165 
166