1 /*
2  * Copyright (c) 2009 Corey Tabaka
3  * Copyright (c) 2015 Intel Corporation
4  *
5  * Use of this source code is governed by a MIT-style
6  * license that can be found in the LICENSE file or at
7  * https://opensource.org/licenses/MIT
8  */
9 
10 #include <lk/debug.h>
11 #include <arch.h>
12 #include <arch/ops.h>
13 #include <arch/x86.h>
14 #include <arch/x86/mmu.h>
15 #include <arch/x86/descriptor.h>
16 #include <arch/x86/feature.h>
17 #include <arch/fpu.h>
18 #include <arch/mmu.h>
19 #include <kernel/vm.h>
20 #include <platform.h>
21 #include <sys/types.h>
22 
23 /* Describe how start.S sets up the MMU.
24  * These data structures are later used by vm routines to lookup pointers
25  * to physical pages based on physical addresses.
26  */
27 struct mmu_initial_mapping mmu_initial_mappings[] = {
28     /* 64GB of the first 64GB of memory mapped 1:1 */
29     {
30         .phys = MEMBASE,
31         .virt = KERNEL_ASPACE_BASE,
32         .size = PHYSMAP_SIZE, /* x86-64 maps first 64GB by default, 1GB on x86-32, 16MB in legacy mode */
33         .flags = 0,
34         .name = "physmap"
35     },
36 #if ARCH_X86_64
37     /* Another linear map of the first GB of memory where the kernel image
38      * lives at the top of the address space. */
39     {
40         .phys = MEMBASE,
41         .virt = KERNEL_BASE,
42         .size = 1*GB,
43         .flags = 0,
44         .name = "kernel"
45     },
46 #endif
47 
48     /* null entry to terminate the list */
49     { 0 }
50 };
51 
52 /* early stack */
53 uint8_t _kstack[PAGE_SIZE] __ALIGNED(sizeof(unsigned long));
54 
55 /* save a pointer to the multiboot information coming in from whoever called us */
56 /* make sure it lives in .data to avoid it being wiped out by bss clearing */
57 __SECTION(".data") uint32_t _multiboot_info;
58 
59 /* main tss */
60 static tss_t system_tss __ALIGNED(16);
61 
x86_early_init_percpu(void)62 void x86_early_init_percpu(void) {
63     // enable caches
64     clear_in_cr0(X86_CR0_NW | X86_CR0_CD);
65 
66     // configure the system TSS
67     // XXX move to a per cpu TSS in the percpu structure
68 #if ARCH_X86_32
69     system_tss.esp0 = 0;
70     system_tss.ss0 = DATA_SELECTOR;
71     system_tss.ss1 = 0;
72     system_tss.ss2 = 0;
73     system_tss.eflags = 0x00003002;
74     system_tss.bitmap = offsetof(tss_32_t, tss_bitmap);
75     system_tss.trace = 1; // trap on hardware task switch
76 #elif ARCH_X86_64
77     /* nothing to be done here, a fully zeroed TSS is a good starting point */
78 #endif
79     const uint selector = TSS_SELECTOR_BASE + 8 * arch_curr_cpu_num();
80     x86_set_gdt_descriptor(selector, &system_tss, sizeof(system_tss), 1, 0, 0, SEG_TYPE_TSS, 0, 0);
81     x86_ltr(selector);
82 
83     /* load the kernel's IDT */
84     asm("lidt _idtr");
85 
86     x86_mmu_early_init_percpu();
87 #if X86_WITH_FPU
88     x86_fpu_early_init_percpu();
89 #endif
90 }
91 
92 /* early initialization of the system, on the boot cpu, usually before any sort of
93  * printf output is available.
94  */
arch_early_init(void)95 void arch_early_init(void) {
96     x86_feature_early_init();
97     x86_mmu_early_init();
98 #if X86_WITH_FPU
99     x86_fpu_early_init();
100 #endif
101 
102     x86_early_init_percpu();
103 }
104 
105 /* later initialization pass, once the main kernel is initialized and scheduling has begun */
arch_init(void)106 void arch_init(void) {
107     x86_feature_init();
108     x86_mmu_init();
109 
110 #if X86_WITH_FPU
111     x86_fpu_init();
112 #endif
113 }
114 
arch_chain_load(void * entry,ulong arg0,ulong arg1,ulong arg2,ulong arg3)115 void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3) {
116     PANIC_UNIMPLEMENTED;
117 }
118 
arch_enter_uspace(vaddr_t entry_point,vaddr_t user_stack_top)119 void arch_enter_uspace(vaddr_t entry_point, vaddr_t user_stack_top) {
120     PANIC_UNIMPLEMENTED;
121 #if 0
122     DEBUG_ASSERT(IS_ALIGNED(user_stack_top, 16));
123 
124     thread_t *ct = get_current_thread();
125 
126     vaddr_t kernel_stack_top = (uintptr_t)ct->stack + ct->stack_size;
127     kernel_stack_top = ROUNDDOWN(kernel_stack_top, 16);
128 
129     /* set up a default spsr to get into 64bit user space:
130      * zeroed NZCV
131      * no SS, no IL, no D
132      * all interrupts enabled
133      * mode 0: EL0t
134      */
135     uint32_t spsr = 0;
136 
137     arch_disable_ints();
138 
139     asm volatile(
140         "mov    sp, %[kstack];"
141         "msr    sp_el0, %[ustack];"
142         "msr    elr_el1, %[entry];"
143         "msr    spsr_el1, %[spsr];"
144         "eret;"
145         :
146         : [ustack]"r"(user_stack_top),
147         [kstack]"r"(kernel_stack_top),
148         [entry]"r"(entry_point),
149         [spsr]"r"(spsr)
150         : "memory");
151     __UNREACHABLE;
152 #endif
153 }
154