1 /*
2  * Copyright (c) 2009 Corey Tabaka
3  * Copyright (c) 2014 Travis Geiselbrecht
4  * Copyright (c) 2015 Intel Corporation
5  *
6  * Use of this source code is governed by a MIT-style
7  * license that can be found in the LICENSE file or at
8  * https://opensource.org/licenses/MIT
9  */
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <lk/debug.h>
14 #include <kernel/thread.h>
15 #include <kernel/spinlock.h>
16 #include <arch/x86.h>
17 #include <arch/x86/descriptor.h>
18 #include <arch/fpu.h>
19 
20 #if !WITH_SMP
21 /* we're uniprocessor at this point for x86, so store a global pointer to the current thread */
22 struct thread *_current_thread;
23 #endif
24 
25 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)26 static void initial_thread_func(void) {
27     /* release the thread lock that was implicitly held across the reschedule */
28     spin_unlock(&thread_lock);
29     arch_enable_ints();
30 
31     thread_t *ct = arch_get_current_thread();
32     int ret = ct->entry(ct->arg);
33 
34     thread_exit(ret);
35 }
36 
arch_thread_initialize(thread_t * t)37 void arch_thread_initialize(thread_t *t) {
38     // create a default stack frame on the stack
39     vaddr_t stack_top = (vaddr_t)t->stack + t->stack_size;
40 
41 #if ARCH_X86_32
42     // make sure the top of the stack is 8 byte aligned for ABI compliance
43     stack_top = ROUNDDOWN(stack_top, 8);
44     struct x86_32_context_switch_frame *frame = (struct x86_32_context_switch_frame *)(stack_top);
45 #endif
46 #if ARCH_X86_64
47     // make sure the top of the stack is 16 byte aligned for ABI compliance
48     stack_top = ROUNDDOWN(stack_top, 16);
49 
50     // make sure we start the frame 8 byte unaligned (relative to the 16 byte alignment) because
51     // of the way the context switch will pop the return address off the stack. After the first
52     // context switch, this leaves the stack in unaligned relative to how a called function expects it.
53     stack_top -= 8;
54     struct x86_64_context_switch_frame *frame = (struct x86_64_context_switch_frame *)(stack_top);
55 #endif
56 
57     // move down a frame size and zero it out
58     frame--;
59     memset(frame, 0, sizeof(*frame));
60 
61 #if ARCH_X86_32
62     frame->eip = (vaddr_t) &initial_thread_func;
63     frame->eflags = 0x3002; // IF = 0, NT = 0, IOPL = 3
64 #endif
65 
66 #if ARCH_X86_64
67     frame->rip = (vaddr_t) &initial_thread_func;
68     frame->rflags = 0x3002; /* IF = 0, NT = 0, IOPL = 3 */
69 #endif
70 
71 #if X86_WITH_FPU
72     // initialize the saved fpu state
73     fpu_init_thread_states(t);
74 #endif
75 
76     // set the stack pointer
77     t->arch.sp = (vaddr_t)frame;
78 }
79 
arch_dump_thread(thread_t * t)80 void arch_dump_thread(thread_t *t) {
81     if (t->state != THREAD_RUNNING) {
82         dprintf(INFO, "\tarch: ");
83         dprintf(INFO, "sp 0x%lx\n", t->arch.sp);
84     }
85 }
86 
87 #if ARCH_X86_32
88 
arch_context_switch(thread_t * oldthread,thread_t * newthread)89 void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
90     //dprintf(DEBUG, "arch_context_switch: old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
91 
92 #if X86_WITH_FPU
93     fpu_context_switch(oldthread, newthread);
94 #endif
95 
96     __asm__ __volatile__ (
97         "pushl $1f			\n\t"
98         "pushf				\n\t"
99         "pusha				\n\t"
100         "movl %%esp,(%%edx)	\n\t"
101         "movl %%eax,%%esp	\n\t"
102         "popa				\n\t"
103         "popf				\n\t"
104         "ret				\n\t"
105         "1:					\n\t"
106 
107         :
108         : "d" (&oldthread->arch.sp), "a" (newthread->arch.sp)
109     );
110 
111     /*__asm__ __volatile__ (
112         "pushf              \n\t"
113         "pushl %%cs         \n\t"
114         "pushl $1f          \n\t"
115         "pushl %%gs         \n\t"
116         "pushl %%fs         \n\t"
117         "pushl %%es         \n\t"
118         "pushl %%ds         \n\t"
119         "pusha              \n\t"
120         "movl %%esp,(%%edx) \n\t"
121         "movl %%eax,%%esp   \n\t"
122         "popa               \n\t"
123         "popl %%ds          \n\t"
124         "popl %%es          \n\t"
125         "popl %%fs          \n\t"
126         "popl %%gs          \n\t"
127         "iret               \n\t"
128         "1: "
129         :
130         : "d" (&oldthread->arch.sp), "a" (newthread->arch.sp)
131     );*/
132 }
133 #endif
134 
135 #if ARCH_X86_64
136 
arch_context_switch(thread_t * oldthread,thread_t * newthread)137 void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
138 #if X86_WITH_FPU
139     fpu_context_switch(oldthread, newthread);
140 #endif
141 
142     x86_64_context_switch(&oldthread->arch.sp, newthread->arch.sp);
143 }
144 #endif
145 
146