1 /*
2 * Copyright (c) 2012-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <sys/types.h>
9 #include <stdlib.h>
10 #include <lk/debug.h>
11 #include <lk/trace.h>
12 #include <assert.h>
13 #include <kernel/thread.h>
14 #include <arch/arm/cm.h>
15
16 #define LOCAL_TRACE 0
17
18 /* macros for saving and restoring a context switch frame, depending on what version of
19 * the architecture you are */
20 #if (__CORTEX_M >= 0x03)
21
22 /* cortex-m3 and above (armv7-m) */
23 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
24
25 #define SAVE_REGS \
26 "tst lr, #0x10;" /* check if thread used the FPU */ \
27 "it eq;" \
28 "vpusheq { s16-s31 };" /* also triggers lazy stacking of s0-s15 */ \
29 "push { r4-r11, lr };" /* note: saves 9 words */
30 #define RESTORE_REGS_PC \
31 "pop { r4-r11, lr };" \
32 "tst lr, #0x10;" \
33 "it eq;" \
34 "vpopeq { s16-s31 };" \
35 "bx lr;"
36
37 #else
38
39 #define SAVE_REGS "push { r4-r11, lr };" /* note: saves 9 words */
40 #define RESTORE_REGS_PC "pop { r4-r11, pc };"
41
42 #endif
43
44 #else
45
46 /* cortex-m0 and cortex-m0+ (armv6-m) */
47 #define SAVE_REGS \
48 "push { r4-r7, lr };" \
49 "mov r4, r8;" \
50 "mov r5, r9;" \
51 "mov r6, r10;" \
52 "mov r7, r11;" \
53 "push { r4-r7 };" /* note: saves 9 words */
54 #define RESTORE_REGS_PC \
55 "pop { r4-r7 };" \
56 "mov r8 , r4;" \
57 "mov r9 , r5;" \
58 "mov r10, r6;" \
59 "mov r11, r7;" \
60 "pop { r4-r7, pc };"
61
62 #endif
63
64 /* since we're implicitly uniprocessor, store a pointer to the current thread here */
65 thread_t *_current_thread;
66
67 static thread_t *_prev_running_thread = NULL;
68
69 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)70 static void initial_thread_func(void) {
71 int ret;
72
73 LTRACEF("thread %p calling %p with arg %p\n", _current_thread, _current_thread->entry, _current_thread->arg);
74 #if LOCAL_TRACE
75 dump_thread(_current_thread);
76 #endif
77
78 ret = _current_thread->entry(_current_thread->arg);
79
80 LTRACEF("thread %p exiting with %d\n", _current_thread, ret);
81
82 thread_exit(ret);
83 }
84
arch_thread_initialize(struct thread * t)85 void arch_thread_initialize(struct thread *t) {
86 LTRACEF("thread %p, stack %p\n", t, t->stack);
87
88 /* find the top of the stack and align it on an 8 byte boundary */
89 uint32_t *sp = (void *)ROUNDDOWN((vaddr_t)t->stack + t->stack_size, 8);
90
91 struct arm_cm_exception_frame *frame = (void *)sp;
92 frame--;
93
94 /* arrange for pc to point to our starting routine */
95 frame->pc = (uint32_t)&initial_thread_func;
96 /* set thumb mode bit */
97 frame->psr = xPSR_T_Msk;
98 /* set EXC_RETURN value to thread mode using MSP and no FP */
99 frame->exc_return = 0xfffffff9;
100
101 t->arch.sp = (addr_t)frame;
102 }
103
pendsv_swap_sp(vaddr_t old_frame)104 static vaddr_t pendsv_swap_sp(vaddr_t old_frame) {
105 /* make sure the stack is 8 byte aligned */
106 DEBUG_ASSERT(((uintptr_t)__GET_FRAME() & 0x7) == 0);
107
108 DEBUG_ASSERT_MSG(!spin_lock_held(&thread_lock),
109 "PENDSV: thread lock was held when preempted! pc %#x\n", ((struct arm_cm_exception_frame *)old_frame)->pc);
110
111 DEBUG_ASSERT(_prev_running_thread != NULL);
112 DEBUG_ASSERT(_current_thread != NULL);
113
114 #if (__CORTEX_M >= 0X03) || (__CORTEX_SC >= 300)
115 __CLREX();
116 #endif
117
118 _prev_running_thread->arch.sp = old_frame;
119 _prev_running_thread = NULL;
120 return _current_thread->arch.sp;
121 }
122
123 /*
124 * raw pendsv exception handler, triggered by arch_context_switch()
125 * to do the actual switch.
126 */
_pendsv(void)127 __NAKED void _pendsv(void) {
128 __asm__ volatile(
129 SAVE_REGS
130 "mov r0, sp;"
131 "sub sp, #4;" /* adjust the stack to be 8 byte aligned */
132 "cpsid i;"
133 "bl %c0;"
134 "cpsie i;"
135 "mov sp, r0;"
136 RESTORE_REGS_PC
137 :: "i" (pendsv_swap_sp)
138 );
139 __UNREACHABLE;
140 }
141
142 /*
143 * The raw context switch routine. Called by the scheduler when it decides to switch.
144 * Called either in the context of a thread yielding or blocking (interrupts disabled,
145 * on the system stack), or at the end of an interrupt handler via thread_preempt()
146 * on a thread that is being preempted (interrupts disabled, in handler mode).
147 */
arch_context_switch(struct thread * oldthread,struct thread * newthread)148 void arch_context_switch(struct thread *oldthread, struct thread *newthread) {
149 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
150 LTRACEF("FPCCR.LSPACT %lu, FPCAR 0x%x, CONTROL.FPCA %lu\n",
151 FPU->FPCCR & FPU_FPCCR_LSPACT_Msk, FPU->FPCAR, __get_CONTROL() & CONTROL_FPCA_Msk);
152 #endif
153
154 DEBUG_ASSERT(arch_ints_disabled());
155 DEBUG_ASSERT(spin_lock_held(&thread_lock));
156
157 const bool in_interrupt_context = arch_in_int_handler();
158
159 /*
160 * An interrupt handler might preempt a pending context switch,
161 * but this should never happen in thread mode.
162 */
163 DEBUG_ASSERT(in_interrupt_context || (_prev_running_thread == NULL));
164
165 /*
166 * Since interrupt handlers could preempt PendSV and trigger additional
167 * switches before the first switch happens, we need to remember which
168 * thread was last running to ensure the context is saved to the correct
169 * thread struct.
170 */
171 if (_prev_running_thread == NULL) {
172 _prev_running_thread = oldthread;
173
174 /*
175 * Only trigger preempt if a context switch was not already pending.
176 * This prevents a race where another interrupt could preempt PendSV
177 * after it has started running, but before it has done the sp swap,
178 * and mark it pending again, which could lead to a tail chained
179 * second call to _pendsv() with _prev_running_thread set to NULL.
180 */
181 arm_cm_trigger_preempt();
182 }
183
184 /*
185 * Make sure either pendsv is queued up either via the previous if statement
186 * or via a nested preemption.
187 */
188 DEBUG_ASSERT(arm_cm_is_preempt_triggered());
189
190 if (!in_interrupt_context) {
191 /* we're in thread context, so jump to PendSV immediately */
192
193 /* drop the lock and enable interrupts so PendSV can run */
194 spin_unlock(&thread_lock);
195 arch_enable_ints();
196
197 /* should jump to PendSV here */
198
199 arch_disable_ints();
200 spin_lock(&thread_lock);
201 } else {
202 /*
203 * If we're in interrupt context, then we've come through
204 * thread_preempt() from arm_cm_irq_exit(). The switch will happen when
205 * the current handler exits and tail-chains to PendSV.
206 */
207 }
208 }
209
arch_dump_thread(thread_t * t)210 void arch_dump_thread(thread_t *t) {
211 if (t->state != THREAD_RUNNING) {
212 dprintf(INFO, "\tarch: ");
213 dprintf(INFO, "sp 0x%lx", t->arch.sp);
214 #if (__FPU_PRESENT == 1) && (__FPU_USED == 1)
215 const struct arm_cm_exception_frame *frame = (struct arm_cm_exception_frame *)t->arch.sp;
216 const bool fpused = (frame->exc_return & 0x10) == 0;
217 dprintf(INFO, ", fpused %u", fpused);
218 #endif
219 dprintf(INFO, "\n");
220 }
221 }
222
223