1 /*
2 * Copyright (c) 2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #include <assert.h>
9 #include <lk/debug.h>
10 #include <lk/trace.h>
11 #include <sys/types.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <kernel/thread.h>
15 #include <arch/riscv.h>
16
17 #define LOCAL_TRACE 0
18
19 struct thread *_current_thread;
20
21 static void initial_thread_func(void) __NO_RETURN;
initial_thread_func(void)22 static void initial_thread_func(void) {
23 DEBUG_ASSERT(arch_ints_disabled());
24
25 thread_t *ct = get_current_thread();
26
27 #if LOCAL_TRACE
28 LTRACEF("thread %p calling %p with arg %p\n", ct, ct->entry, ct->arg);
29 dump_thread(ct);
30 #endif
31
32 /* release the thread lock that was implicitly held across the reschedule */
33 spin_unlock(&thread_lock);
34 arch_enable_ints();
35
36 int ret = ct->entry(ct->arg);
37
38 LTRACEF("thread %p exiting with %d\n", ct, ret);
39
40 thread_exit(ret);
41 }
42
arch_thread_initialize(thread_t * t)43 void arch_thread_initialize(thread_t *t) {
44 /* zero out the thread context */
45 memset(&t->arch.cs_frame, 0, sizeof(t->arch.cs_frame));
46
47 /* if FPU is implemented, default state of zero is default for the thread */
48
49 /* make sure the top of the stack is 16 byte aligned */
50 vaddr_t stack_top = ROUNDDOWN((vaddr_t)t->stack + t->stack_size, 16);
51
52 t->arch.cs_frame.sp = stack_top;
53 t->arch.cs_frame.ra = (vaddr_t)&initial_thread_func;
54
55 LTRACEF("t %p (%s) stack top %#lx entry %p arg %p\n", t, t->name, stack_top, t->entry, t->arg);
56 }
57
arch_context_switch(thread_t * oldthread,thread_t * newthread)58 void arch_context_switch(thread_t *oldthread, thread_t *newthread) {
59 DEBUG_ASSERT(arch_ints_disabled());
60
61 LTRACEF("old %p (%s), new %p (%s)\n", oldthread, oldthread->name, newthread, newthread->name);
62
63 /* floating point context switch */
64 #if RISCV_FPU
65 /* based on a combination of current fpu dirty state in hardware and saved state
66 * on the new thread, do a partial or full context switch
67 */
68 ulong status = riscv_csr_read(RISCV_CSR_XSTATUS);
69 ulong hw_state = status & RISCV_CSR_XSTATUS_FS_MASK;
70
71 LTRACEF("old fpu dirty %d, new fpu dirty %d, status %#lx, sd %d\n", oldthread->arch.cs_frame.fpu_dirty,
72 newthread->arch.cs_frame.fpu_dirty, hw_state >> RISCV_CSR_XSTATUS_FS_SHIFT,
73 (status & RISCV_CSR_XSTATUS_SD) ? 1 : 0);
74
75 /* hardware currently is in the dirty state, so save the state of the fpu regs
76 * and mark the thread as dirty.
77 */
78 switch (hw_state) {
79 case RISCV_CSR_XSTATUS_FS_DIRTY:
80 oldthread->arch.cs_frame.fpu_dirty = true;
81 riscv_fpu_save(&oldthread->arch.cs_frame.fpu);
82 break;
83 case RISCV_CSR_XSTATUS_FS_INITIAL:
84 oldthread->arch.cs_frame.fpu_dirty = false;
85 break;
86 case RISCV_CSR_XSTATUS_FS_OFF:
87 // TODO: handle fpu being disabled
88 PANIC_UNIMPLEMENTED;
89 }
90
91 if (newthread->arch.cs_frame.fpu_dirty) {
92 /* if the new thread has dirty saved state, load it here and mark the cpu as in the
93 * clean state, which will transition to dirty if any regs are modified
94 */
95 riscv_fpu_restore(&newthread->arch.cs_frame.fpu);
96
97 /* at this point the FPU hardware should be in the dirty state because of the above routine */
98
99 /* TODO: see if it's totally safe to reduce to a single instruction based on moving from DIRTY -> CLEAN */
100 riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_MASK);
101 riscv_csr_set(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_CLEAN);
102 } else {
103 /* if the thread previously hadn't dirtied the state, zero out the fpu
104 * state and mark hardware as initial.
105 */
106 riscv_fpu_zero();
107
108 /* at this point the FPU hardware should be in the dirty state because of the above routine */
109
110 /* TODO: see if it's totally safe to reduce to a single instruction based on moving from DIRTY -> INITIAL */
111 riscv_csr_clear(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_MASK);
112 riscv_csr_set(RISCV_CSR_XSTATUS, RISCV_CSR_XSTATUS_FS_INITIAL);
113 }
114 #endif
115
116 /* integer context switch.
117 * stack is swapped as part of this routine, so the code will return only when
118 * the current thread context is switched back to.
119 */
120 riscv_context_switch(&oldthread->arch.cs_frame, &newthread->arch.cs_frame);
121 }
122
arch_dump_thread(thread_t * t)123 void arch_dump_thread(thread_t *t) {
124 if (t->state != THREAD_RUNNING) {
125 dprintf(INFO, "\tarch: ");
126 #if RISCV_FPU
127 dprintf(INFO, "fpu dirty %u, ", t->arch.cs_frame.fpu_dirty);
128 #endif
129 dprintf(INFO, "sp %#lx\n", t->arch.cs_frame.sp);
130 }
131 }
132
133