1 /*
2  * Copyright (c) 2014-2016 Travis Geiselbrecht
3  *
4  * Use of this source code is governed by a MIT-style
5  * license that can be found in the LICENSE file or at
6  * https://opensource.org/licenses/MIT
7  */
8 #include <lk/debug.h>
9 #include <stdlib.h>
10 #include <arch.h>
11 #include <arch/atomic.h>
12 #include <arch/ops.h>
13 #include <arch/arm64.h>
14 #include <arch/arm64/mmu.h>
15 #include <arch/mp.h>
16 #include <kernel/thread.h>
17 #include <lk/init.h>
18 #include <lk/main.h>
19 #include <platform.h>
20 #include <lk/trace.h>
21 
22 #define LOCAL_TRACE 0
23 
24 #if WITH_SMP
25 /* smp boot lock */
26 static spin_lock_t arm_boot_cpu_lock = 1;
27 static volatile int secondaries_to_init = 0;
28 #endif
29 
arm64_cpu_early_init(void)30 static void arm64_cpu_early_init(void) {
31     /* set the vector base */
32     ARM64_WRITE_SYSREG(VBAR_EL1, (uint64_t)&arm64_exception_base);
33 
34     arch_enable_fiqs();
35 }
36 
arch_early_init(void)37 void arch_early_init(void) {
38     arm64_cpu_early_init();
39     platform_init_mmu_mappings();
40 }
41 
arch_stacktrace(uint64_t fp,uint64_t pc)42 void arch_stacktrace(uint64_t fp, uint64_t pc)
43 {
44     struct arm64_stackframe frame;
45 
46     if (!fp) {
47         frame.fp = (uint64_t)__builtin_frame_address(0);
48         frame.pc = (uint64_t)arch_stacktrace;
49     } else {
50         frame.fp = fp;
51         frame.pc = pc;
52     }
53 
54     printf("stack trace:\n");
55     while (frame.fp) {
56         printf("0x%llx\n", frame.pc);
57 
58         /* Stack frame pointer should be 16 bytes aligned */
59         if (frame.fp & 0xF)
60             break;
61 
62         frame.pc = *((uint64_t *)(frame.fp + 8));
63         frame.fp = *((uint64_t *)frame.fp);
64     }
65 }
66 
arch_init(void)67 void arch_init(void) {
68 #if WITH_SMP
69     arch_mp_init_percpu();
70 
71     LTRACEF("midr_el1 0x%llx\n", ARM64_READ_SYSREG(midr_el1));
72 
73     secondaries_to_init = SMP_MAX_CPUS - 1; /* TODO: get count from somewhere else, or add cpus as they boot */
74 
75     lk_init_secondary_cpus(secondaries_to_init);
76 
77     LTRACEF("releasing %d secondary cpus\n", secondaries_to_init);
78 
79     /* release the secondary cpus */
80     spin_unlock(&arm_boot_cpu_lock);
81 
82     /* flush the release of the lock, since the secondary cpus are running without cache on */
83     arch_clean_cache_range((addr_t)&arm_boot_cpu_lock, sizeof(arm_boot_cpu_lock));
84 #endif
85 }
86 
arch_quiesce(void)87 void arch_quiesce(void) {
88 }
89 
arch_idle(void)90 void arch_idle(void) {
91     __asm__ volatile("wfi");
92 }
93 
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)94 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
95     PANIC_UNIMPLEMENTED;
96 }
97 
98 /* switch to user mode, set the user stack pointer to user_stack_top, put the svc stack pointer to the top of the kernel stack */
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)99 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
100     DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 16));
101 
102     thread_t *ct = get_current_thread();
103 
104     vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
105     kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
106 
107     /* set up a default spsr to get into 64bit user space:
108      * zeroed NZCV
109      * no SS, no IL, no D
110      * all interrupts enabled
111      * mode 0: EL0t
112      */
113     uint32_t spsr = 0;
114 
115     arch_disable_ints();
116 
117     asm volatile(
118         "mov    sp, %[kstack];"
119         "msr    sp_el0, %[ustack];"
120         "msr    elr_el1, %[entry];"
121         "msr    spsr_el1, %[spsr];"
122         "eret;"
123         :
124         : [ustack]"r"(user_stack_top),
125         [kstack]"r"(kernel_stack_top),
126         [entry]"r"(entry_point),
127         [spsr]"r"(spsr)
128         : "memory");
129     __UNREACHABLE;
130 }
131 
132 #if WITH_SMP
133 /* called from assembly */
134 void arm64_secondary_entry(ulong);
arm64_secondary_entry(ulong asm_cpu_num)135 void arm64_secondary_entry(ulong asm_cpu_num) {
136     uint cpu = arch_curr_cpu_num();
137     if (cpu != asm_cpu_num)
138         return;
139 
140     arm64_cpu_early_init();
141 
142     spin_lock(&arm_boot_cpu_lock);
143     spin_unlock(&arm_boot_cpu_lock);
144 
145     /* run early secondary cpu init routines up to the threading level */
146     lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
147 
148     arch_mp_init_percpu();
149 
150     LTRACEF("cpu num %d\n", cpu);
151 
152     /* we're done, tell the main cpu we're up */
153     atomic_add(&secondaries_to_init, -1);
154     __asm__ volatile("sev");
155 
156     lk_secondary_cpu_entry();
157 }
158 #endif
159 
160