1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2009 Corey Tabaka
3 // Copyright (c) 2015 Intel Corporation
4 // Copyright (c) 2016 Travis Geiselbrecht
5 //
6 // Use of this source code is governed by a MIT-style
7 // license that can be found in the LICENSE file or at
8 // https://opensource.org/licenses/MIT
9
10 #include <arch.h>
11 #include <arch/mmu.h>
12 #include <arch/mp.h>
13 #include <arch/ops.h>
14 #include <arch/x86.h>
15 #include <arch/x86/apic.h>
16 #include <arch/x86/descriptor.h>
17 #include <arch/x86/feature.h>
18 #include <arch/x86/mmu.h>
19 #include <arch/x86/mmu_mem_types.h>
20 #include <arch/x86/mp.h>
21 #include <arch/x86/proc_trace.h>
22 #include <arch/x86/tsc.h>
23 #include <assert.h>
24 #include <assert.h>
25 #include <debug.h>
26 #include <err.h>
27 #include <inttypes.h>
28 #include <lib/console.h>
29 #include <lk/init.h>
30 #include <lk/main.h>
31 #include <platform.h>
32 #include <string.h>
33 #include <sys/types.h>
34 #include <trace.h>
35 #include <vm/vm.h>
36 #include <zircon/compiler.h>
37 #include <zircon/types.h>
38
39 #define LOCAL_TRACE 0
40
41 /* save a pointer to the bootdata, if present */
42 void* _zbi_base;
43
arch_early_init(void)44 void arch_early_init(void) {
45 x86_mmu_early_init();
46 }
47
arch_init(void)48 void arch_init(void) {
49 const struct x86_model_info* model = x86_get_model();
50 printf("Processor Model Info: type %#x family %#x model %#x stepping %#x\n",
51 model->processor_type, model->family, model->model, model->stepping);
52 printf("\tdisplay_family %#x display_model %#x\n",
53 model->display_family, model->display_model);
54
55 x86_feature_debug();
56
57 x86_mmu_init();
58
59 gdt_setup();
60 idt_setup_readonly();
61
62 x86_processor_trace_init();
63 }
64
arch_enter_uspace(uintptr_t entry_point,uintptr_t sp,uintptr_t arg1,uintptr_t arg2)65 void arch_enter_uspace(uintptr_t entry_point, uintptr_t sp,
66 uintptr_t arg1, uintptr_t arg2) {
67 LTRACEF("entry %#" PRIxPTR " user stack %#" PRIxPTR "\n", entry_point, sp);
68 LTRACEF("kernel stack %#" PRIxPTR "\n", x86_get_percpu()->default_tss.rsp0);
69
70 arch_disable_ints();
71
72 /* default user space flags:
73 * IOPL 0
74 * Interrupts enabled
75 */
76 ulong flags = (0 << X86_FLAGS_IOPL_SHIFT) | X86_FLAGS_IF;
77
78 /* check that we're probably still pointed at the kernel gs */
79 DEBUG_ASSERT(is_kernel_address(read_msr(X86_MSR_IA32_GS_BASE)));
80
81 /* check that the kernel stack is set properly */
82 DEBUG_ASSERT(is_kernel_address(x86_get_percpu()->default_tss.rsp0));
83
84 /* set up user's fs: gs: base */
85 write_msr(X86_MSR_IA32_FS_BASE, 0);
86
87 /* set the KERNEL_GS_BASE msr here, because we're going to swapgs below */
88 write_msr(X86_MSR_IA32_KERNEL_GS_BASE, 0);
89
90 x86_uspace_entry(arg1, arg2, sp, entry_point, flags);
91 __UNREACHABLE;
92 }
93
arch_suspend(void)94 void arch_suspend(void) {
95 DEBUG_ASSERT(arch_ints_disabled());
96 apic_io_save();
97 x86_tsc_store_adjustment();
98 }
99
arch_resume(void)100 void arch_resume(void) {
101 DEBUG_ASSERT(arch_ints_disabled());
102
103 x86_init_percpu(0);
104 x86_mmu_percpu_init();
105 x86_pat_sync(cpu_num_to_mask(0));
106
107 apic_local_init();
108
109 // Ensure the CPU that resumed was assigned the correct percpu object.
110 DEBUG_ASSERT(apic_local_id() == x86_get_percpu()->apic_id);
111
112 apic_io_restore();
113 }
114
finish_secondary_entry(volatile int * aps_still_booting,thread_t * thread,uint cpu_num)115 [[ noreturn, gnu::noinline ]] static void finish_secondary_entry(
116 volatile int* aps_still_booting, thread_t* thread, uint cpu_num) {
117
118 // Signal that this CPU is initialized. It is important that after this
119 // operation, we do not touch any resources associated with bootstrap
120 // besides our thread_t and stack, since this is the checkpoint the
121 // bootstrap process uses to identify completion.
122 int old_val = atomic_and(aps_still_booting, ~(1U << cpu_num));
123 if (old_val == 0) {
124 // If the value is already zero, then booting this CPU timed out.
125 goto fail;
126 }
127
128 // Defer configuring memory settings until after the atomic_and above.
129 // This ensures that we were in no-fill cache mode for the duration of early
130 // AP init.
131 DEBUG_ASSERT(x86_get_cr0() & X86_CR0_CD);
132 x86_mmu_percpu_init();
133
134 // Load the appropriate PAT/MTRRs. This must happen after init_percpu, so
135 // that this CPU is considered online.
136 x86_pat_sync(1U << cpu_num);
137
138 /* run early secondary cpu init routines up to the threading level */
139 lk_init_level(LK_INIT_FLAG_SECONDARY_CPUS, LK_INIT_LEVEL_EARLIEST, LK_INIT_LEVEL_THREADING - 1);
140
141 thread_secondary_cpu_init_early(thread);
142 // The thread stacks and struct are from a single allocation, free it
143 // when we exit into the scheduler.
144 thread->flags |= THREAD_FLAG_FREE_STRUCT;
145
146 lk_secondary_cpu_entry();
147
148 // lk_secondary_cpu_entry only returns on an error, halt the core in this
149 // case.
150 fail:
151 arch_disable_ints();
152 while (1) {
153 x86_hlt();
154 }
155 }
156
157 // This is called from assembly, before any other C code.
158 // The %gs.base is not set up yet, so we have to trust that
159 // this function is simple enough that the compiler won't
160 // want to generate stack-protector prologue/epilogue code,
161 // which would use %gs.
x86_secondary_entry(volatile int * aps_still_booting,thread_t * thread)162 __NO_SAFESTACK __NO_RETURN void x86_secondary_entry(volatile int* aps_still_booting,
163 thread_t* thread) {
164 // Would prefer this to be in init_percpu, but there is a dependency on a
165 // page mapping existing, and the BP calls that before the VM subsystem is
166 // initialized.
167 apic_local_init();
168
169 uint32_t local_apic_id = apic_local_id();
170 int cpu_num = x86_apic_id_to_cpu_num(local_apic_id);
171 if (cpu_num < 0) {
172 // If we could not find our CPU number, do not proceed further
173 arch_disable_ints();
174 while (1) {
175 x86_hlt();
176 }
177 }
178
179 DEBUG_ASSERT(cpu_num > 0);
180
181 // Set %gs.base to our percpu struct. This has to be done before
182 // calling x86_init_percpu, which initializes most of that struct, so
183 // that x86_init_percpu can use safe-stack and/or stack-protector code.
184 struct x86_percpu* const percpu = &ap_percpus[cpu_num - 1];
185 write_msr(X86_MSR_IA32_GS_BASE, (uintptr_t)percpu);
186
187 // Copy the stack-guard value from the boot CPU's perpcu.
188 percpu->stack_guard = bp_percpu.stack_guard;
189
190 #if __has_feature(safe_stack)
191 // Set up the initial unsafe stack pointer.
192 x86_write_gs_offset64(
193 ZX_TLS_UNSAFE_SP_OFFSET,
194 ROUNDDOWN(thread->stack.unsafe_base + thread->stack.size, 16));
195 #endif
196
197 x86_init_percpu((uint)cpu_num);
198
199 // Now do the rest of the work, in a function that is free to
200 // use %gs in its code.
201 finish_secondary_entry(aps_still_booting, thread, cpu_num);
202 }
203
cmd_cpu(int argc,const cmd_args * argv,uint32_t flags)204 static int cmd_cpu(int argc, const cmd_args* argv, uint32_t flags) {
205 if (argc < 2) {
206 printf("not enough arguments\n");
207 usage:
208 printf("usage:\n");
209 printf("%s features\n", argv[0].str);
210 printf("%s unplug <cpu_id>\n", argv[0].str);
211 printf("%s hotplug <cpu_id>\n", argv[0].str);
212 return ZX_ERR_INTERNAL;
213 }
214
215 if (!strcmp(argv[1].str, "features")) {
216 x86_feature_debug();
217 } else if (!strcmp(argv[1].str, "unplug")) {
218 if (argc < 3) {
219 printf("specify a cpu_id\n");
220 goto usage;
221 }
222 zx_status_t status = mp_unplug_cpu((uint)argv[2].u);
223 printf("CPU %lu unplugged: %d\n", argv[2].u, status);
224 } else if (!strcmp(argv[1].str, "hotplug")) {
225 if (argc < 3) {
226 printf("specify a cpu_id\n");
227 goto usage;
228 }
229 zx_status_t status = mp_hotplug_cpu((uint)argv[2].u);
230 printf("CPU %lu hotplugged: %d\n", argv[2].u, status);
231 } else {
232 printf("unknown command\n");
233 goto usage;
234 }
235
236 return ZX_OK;
237 }
238
239 STATIC_COMMAND_START
240 #if LK_DEBUGLEVEL > 0
241 STATIC_COMMAND("cpu", "cpu test commands", &cmd_cpu)
242 #endif
243 STATIC_COMMAND_END(cpu);
244