1 /*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8 #pragma once
9
10 #include <arch/defines.h>
11 #include <arch/ops.h>
12 #include <arch/thread.h>
13 #include <arch/arch_ops.h>
14 #include <kernel/spinlock.h>
15 #include <kernel/wait.h>
16 #include <lk/compiler.h>
17 #include <lk/debug.h>
18 #include <lk/list.h>
19 #include <sys/types.h>
20
21 #if WITH_KERNEL_VM
22 /* forward declaration */
23 typedef struct vmm_aspace vmm_aspace_t;
24 #endif
25
26 __BEGIN_CDECLS
27
28 /* debug-enable runtime checks */
29 #if LK_DEBUGLEVEL > 1
30 #define THREAD_STATS 1
31 #define THREAD_STACK_HIGHWATER 1
32 #define THREAD_STACK_BOUNDS_CHECK 1
33 #ifndef THREAD_STACK_PADDING_SIZE
34 #define THREAD_STACK_PADDING_SIZE 256
35 #endif
36 #endif
37
38 enum thread_state {
39 THREAD_SUSPENDED = 0,
40 THREAD_READY,
41 THREAD_RUNNING,
42 THREAD_BLOCKED,
43 THREAD_SLEEPING,
44 THREAD_DEATH,
45 };
46
47 typedef int (*thread_start_routine)(void *arg);
48
49 /* thread local storage */
50 enum thread_tls_list {
51 #ifdef WITH_LIB_CONSOLE
52 TLS_ENTRY_CONSOLE, // current console
53 #endif
54 #ifdef WITH_LIB_UTHREAD
55 TLS_ENTRY_UTHREAD,
56 #endif
57 #ifdef WITH_LIB_LKUSER
58 TLS_ENTRY_LKUSER,
59 #endif
60 MAX_TLS_ENTRY
61 };
62
63 #define THREAD_FLAG_DETACHED (1<<0)
64 #define THREAD_FLAG_FREE_STACK (1<<1)
65 #define THREAD_FLAG_FREE_STRUCT (1<<2)
66 #define THREAD_FLAG_REAL_TIME (1<<3)
67 #define THREAD_FLAG_IDLE (1<<4)
68 #define THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK (1<<5)
69
70 #define THREAD_MAGIC (0x74687264) // 'thrd'
71
72 typedef struct thread {
73 int magic;
74 struct list_node thread_list_node;
75
76 /* active bits */
77 struct list_node queue_node;
78 int priority;
79 enum thread_state state;
80 int remaining_quantum;
81 unsigned int flags;
82 #if WITH_SMP
83 int curr_cpu;
84 int pinned_cpu; /* only run on pinned_cpu if >= 0 */
85 #endif
86 #if WITH_KERNEL_VM
87 vmm_aspace_t *aspace;
88 #endif
89
90 /* if blocked, a pointer to the wait queue */
91 struct wait_queue *blocking_wait_queue;
92 status_t wait_queue_block_ret;
93
94 /* architecture stuff */
95 struct arch_thread arch;
96
97 /* stack stuff */
98 void *stack;
99 size_t stack_size;
100
101 /* entry point */
102 thread_start_routine entry;
103 void *arg;
104
105 /* return code */
106 int retcode;
107 struct wait_queue retcode_wait_queue;
108
109 /* thread local storage */
110 uintptr_t tls[MAX_TLS_ENTRY];
111
112 char name[32];
113 } thread_t;
114
115 #if WITH_SMP
116 #define thread_curr_cpu(t) ((t)->curr_cpu)
117 #define thread_pinned_cpu(t) ((t)->pinned_cpu)
118 #define thread_set_curr_cpu(t,c) ((t)->curr_cpu = (c))
119 #define thread_set_pinned_cpu(t, c) ((t)->pinned_cpu = (c))
120 #else
121 #define thread_curr_cpu(t) (0)
122 #define thread_pinned_cpu(t) (-1)
123 #define thread_set_curr_cpu(t,c) do {} while(0)
124 #define thread_set_pinned_cpu(t, c) do {} while(0)
125 #endif
126
127 /* thread priority */
128 #define NUM_PRIORITIES 32
129 #define LOWEST_PRIORITY 0
130 #define HIGHEST_PRIORITY (NUM_PRIORITIES - 1)
131 #define DPC_PRIORITY (NUM_PRIORITIES - 2)
132 #define IDLE_PRIORITY LOWEST_PRIORITY
133 #define LOW_PRIORITY (NUM_PRIORITIES / 4)
134 #define DEFAULT_PRIORITY (NUM_PRIORITIES / 2)
135 #define HIGH_PRIORITY ((NUM_PRIORITIES / 4) * 3)
136
137 /* stack size */
138 #ifdef CUSTOM_DEFAULT_STACK_SIZE
139 #define DEFAULT_STACK_SIZE CUSTOM_DEFAULT_STACK_SIZE
140 #else
141 #define DEFAULT_STACK_SIZE ARCH_DEFAULT_STACK_SIZE
142 #endif
143
144 /* functions */
145 void thread_init_early(void);
146 void thread_init(void);
147 void thread_become_idle(void) __NO_RETURN;
148 void thread_secondary_cpu_init_early(void);
149 void thread_secondary_cpu_entry(void) __NO_RETURN;
150 void thread_set_name(const char *name);
151 void thread_set_priority(int priority);
152 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size);
153 thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size);
154 status_t thread_resume(thread_t *);
155 void thread_exit(int retcode) __NO_RETURN;
156 void thread_sleep(lk_time_t delay);
157 status_t thread_detach(thread_t *t);
158 status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout);
159 status_t thread_detach_and_resume(thread_t *t);
160 status_t thread_set_real_time(thread_t *t);
161
162 void dump_thread(thread_t *t);
163 void arch_dump_thread(thread_t *t);
164 void dump_all_threads(void);
165 void dump_all_threads_unlocked(void);
166
167 /* scheduler routines */
168 void thread_yield(void); /* give up the cpu voluntarily */
169 void thread_preempt(void); /* get preempted (inserted into head of run queue) */
170 void thread_block(void); /* block on something and reschedule */
171 void thread_unblock(thread_t *t, bool resched); /* go back in the run queue */
172
173 #ifdef WITH_LIB_UTHREAD
174 void uthread_context_switch(thread_t *oldthread, thread_t *newthread);
175 #endif
176
177 /* called on every timer tick for the scheduler to do quantum expiration */
178 struct timer;
179 enum handler_return thread_timer_tick(struct timer *, lk_time_t now, void *arg);
180
181 /* the current thread */
get_current_thread(void)182 static inline thread_t *get_current_thread(void) {
183 return arch_get_current_thread();
184 }
185
set_current_thread(thread_t * t)186 static inline void set_current_thread(thread_t *t) {
187 arch_set_current_thread(t);
188 }
189
190 /* scheduler lock */
191 extern spin_lock_t thread_lock;
192
193 #define THREAD_LOCK(state) spin_lock_saved_state_t state; spin_lock_irqsave(&thread_lock, state)
194 #define THREAD_UNLOCK(state) spin_unlock_irqrestore(&thread_lock, state)
195
thread_lock_held(void)196 static inline bool thread_lock_held(void) {
197 return spin_lock_held(&thread_lock);
198 }
199
200 /* thread local storage */
tls_get(uint entry)201 static inline __ALWAYS_INLINE uintptr_t tls_get(uint entry) {
202 return get_current_thread()->tls[entry];
203 }
204
__tls_set(uint entry,uintptr_t val)205 static inline __ALWAYS_INLINE uintptr_t __tls_set(uint entry, uintptr_t val) {
206 uintptr_t oldval = get_current_thread()->tls[entry];
207 get_current_thread()->tls[entry] = val;
208 return oldval;
209 }
210
211 #define tls_set(e,v) \
212 ({ \
213 STATIC_ASSERT((e) < MAX_TLS_ENTRY); \
214 __tls_set(e, v); \
215 })
216
217 /* thread level statistics */
218 #if THREAD_STATS
219 struct thread_stats {
220 lk_bigtime_t idle_time;
221 lk_bigtime_t last_idle_timestamp;
222 ulong reschedules;
223 ulong context_switches;
224 ulong preempts;
225 ulong yields;
226 ulong interrupts; /* platform code increment this */
227 ulong timer_ints; /* timer code increment this */
228 ulong timers; /* timer code increment this */
229
230 #if WITH_SMP
231 ulong reschedule_ipis;
232 #endif
233 };
234
235 extern struct thread_stats thread_stats[SMP_MAX_CPUS];
236
237 #define THREAD_STATS_INC(name) do { thread_stats[arch_curr_cpu_num()].name++; } while(0)
238
239 #else
240
241 #define THREAD_STATS_INC(name) do { } while (0)
242
243 #endif
244
245 __END_CDECLS
246