1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7 #if !defined(NDEBUG)
8 #include <string.h>
9 #endif
10
11 #include <atomic.h>
12 #include <compiler.h>
13 #include <object.h>
14 #include <panic.h>
15 #include <partition.h>
16 #include <preempt.h>
17 #include <scheduler.h>
18 #include <thread.h>
19 #include <trace.h>
20 #include <util.h>
21
22 #include <events/thread.h>
23
24 #include <asm/barrier.h>
25
26 #include "event_handlers.h"
27 #include "thread_arch.h"
28
29 // Externally visible for assembly
30 extern thread_t _Thread_local current_thread;
31
32 // The current thread
33 thread_t _Thread_local current_thread
34 __attribute__((section(".tbss.current_thread")));
35
36 error_t
thread_standard_handle_object_create_thread(thread_create_t thread_create)37 thread_standard_handle_object_create_thread(thread_create_t thread_create)
38 {
39 error_t err = OK;
40 thread_t *thread = thread_create.thread;
41
42 assert(thread != NULL);
43
44 thread->kind = thread_create.kind;
45 thread->params = thread_create.params;
46
47 size_t stack_size = (thread_create.stack_size != 0U)
48 ? thread_create.stack_size
49 : thread_stack_size_default;
50 if (stack_size > THREAD_STACK_MAX_SIZE) {
51 err = ERROR_ARGUMENT_SIZE;
52 goto out;
53 }
54
55 if (!util_is_baligned(stack_size, thread_stack_alloc_align)) {
56 err = ERROR_ARGUMENT_ALIGNMENT;
57 goto out;
58 }
59
60 void_ptr_result_t stack = partition_alloc(
61 thread->header.partition, stack_size, thread_stack_alloc_align);
62 if (stack.e != OK) {
63 err = stack.e;
64 goto out;
65 }
66
67 #if !defined(NDEBUG)
68 // Fill the stack with a pattern so we can detect maximum stack
69 // depth
70 (void)memset_s(stack.r, stack_size, 0x57, stack_size);
71 #endif
72
73 thread->stack_mem = (uintptr_t)stack.r;
74 thread->stack_size = stack_size;
75
76 scheduler_block_init(thread, SCHEDULER_BLOCK_THREAD_LIFECYCLE);
77
78 out:
79 return err;
80 }
81
82 void
thread_standard_unwind_object_create_thread(error_t result,thread_create_t create)83 thread_standard_unwind_object_create_thread(error_t result,
84 thread_create_t create)
85 {
86 thread_t *thread = create.thread;
87 assert(thread != NULL);
88 assert(result != OK);
89 assert(atomic_load_relaxed(&thread->state) == THREAD_STATE_INIT);
90
91 if (thread->stack_mem != 0U) {
92 (void)partition_free(thread->header.partition,
93 (void *)thread->stack_mem,
94 thread->stack_size);
95 thread->stack_mem = 0U;
96 }
97 }
98
99 error_t
thread_standard_handle_object_activate_thread(thread_t * thread)100 thread_standard_handle_object_activate_thread(thread_t *thread)
101 {
102 error_t err = OK;
103
104 assert(thread != NULL);
105
106 // Get an appropriate address for the stack and map it there.
107 thread->stack_base =
108 trigger_thread_get_stack_base_event(thread->kind, thread);
109 if (thread->stack_base == 0U) {
110 err = ERROR_NOMEM;
111 goto out;
112 }
113
114 assert(util_is_baligned(thread->stack_base, THREAD_STACK_MAP_ALIGN));
115
116 err = thread_arch_map_stack(thread);
117 if (err != OK) {
118 thread->stack_base = 0U;
119 goto out;
120 }
121
122 thread_arch_init_context(thread);
123
124 // Put the thread into ready state and give it a reference to itself.
125 // This reference is released in thread_exit(). At this point the thread
126 // can only be deleted by another thread by calling thread_kill().
127 (void)object_get_thread_additional(thread);
128 atomic_store_relaxed(&thread->state, THREAD_STATE_READY);
129
130 // Remove the lifecycle block, which allows the thread to be scheduled
131 // (assuming nothing else blocked it).
132 scheduler_lock(thread);
133 if (scheduler_unblock(thread, SCHEDULER_BLOCK_THREAD_LIFECYCLE)) {
134 scheduler_trigger();
135 }
136 scheduler_unlock(thread);
137 out:
138 return err;
139 }
140
141 void
thread_standard_handle_object_deactivate_thread(thread_t * thread)142 thread_standard_handle_object_deactivate_thread(thread_t *thread)
143 {
144 assert(thread != NULL);
145
146 thread_state_t state = atomic_load_relaxed(&thread->state);
147 assert((state == THREAD_STATE_INIT) || (state == THREAD_STATE_EXITED));
148
149 if (thread->stack_base != 0U) {
150 thread_arch_unmap_stack(thread);
151 thread->stack_base = 0U;
152 }
153
154 if (thread->stack_mem != 0U) {
155 (void)partition_free(thread->header.partition,
156 (void *)thread->stack_mem,
157 thread->stack_size);
158 thread->stack_mem = 0U;
159 }
160 }
161
162 error_t
thread_standard_handle_thread_context_switch_pre(void)163 thread_standard_handle_thread_context_switch_pre(void)
164 {
165 return OK;
166 }
167
168 thread_t *
thread_get_self(void)169 thread_get_self(void)
170 {
171 return ¤t_thread;
172 }
173
174 error_t
thread_switch_to(thread_t * thread,ticks_t schedtime)175 thread_switch_to(thread_t *thread, ticks_t schedtime)
176 {
177 assert_preempt_disabled();
178
179 thread_t *current = thread_get_self();
180 assert(thread != current);
181
182 TRACE_LOCAL(INFO, INFO, "thread: ctx switch from: {:#x} to: {:#x}",
183 (uintptr_t)current, (uintptr_t)thread);
184
185 trigger_thread_save_state_event();
186 error_t err =
187 trigger_thread_context_switch_pre_event(thread, schedtime);
188 if (compiler_unexpected(err != OK)) {
189 object_put_thread(thread);
190 goto out;
191 }
192
193 ticks_t prevticks = schedtime;
194 thread_t *prev = thread_arch_switch_thread(thread, &schedtime);
195 assert(prev != NULL);
196
197 trigger_thread_context_switch_post_event(prev, schedtime, prevticks);
198 object_put_thread(prev);
199
200 trigger_thread_load_state_event(false);
201
202 asm_context_sync_fence();
203 out:
204 return err;
205 }
206
207 error_t
thread_kill(thread_t * thread)208 thread_kill(thread_t *thread)
209 {
210 assert(thread != NULL);
211
212 error_t err;
213 thread_state_t expected_state = THREAD_STATE_READY;
214 if (atomic_compare_exchange_strong_explicit(
215 &thread->state, &expected_state, THREAD_STATE_KILLED,
216 memory_order_relaxed, memory_order_relaxed)) {
217 trigger_thread_killed_event(thread);
218 err = OK;
219 } else if ((expected_state == THREAD_STATE_KILLED) ||
220 (expected_state == THREAD_STATE_EXITED)) {
221 // Thread was already killed, or has exited
222 err = OK;
223 } else {
224 // Thread had not started yet
225 err = ERROR_OBJECT_STATE;
226 }
227
228 return err;
229 }
230
231 bool
thread_is_dying(const thread_t * thread)232 thread_is_dying(const thread_t *thread)
233 {
234 assert(thread != NULL);
235 return atomic_load_relaxed(&thread->state) == THREAD_STATE_KILLED;
236 }
237
238 bool
thread_has_exited(const thread_t * thread)239 thread_has_exited(const thread_t *thread)
240 {
241 assert(thread != NULL);
242 return atomic_load_relaxed(&thread->state) == THREAD_STATE_EXITED;
243 }
244
245 noreturn void
thread_exit(void)246 thread_exit(void)
247 {
248 thread_t *thread = thread_get_self();
249 assert(thread != NULL);
250 preempt_disable();
251
252 atomic_store_relaxed(&thread->state, THREAD_STATE_EXITED);
253
254 scheduler_lock_nopreempt(thread);
255 scheduler_block(thread, SCHEDULER_BLOCK_THREAD_LIFECYCLE);
256 scheduler_unlock_nopreempt(thread);
257
258 // TODO: wake up anyone waiting in thread_join()
259
260 trigger_thread_exited_event();
261
262 // Release the thread's reference to itself (note that the CPU still
263 // holds a reference, so this won't delete it immediately). This matches
264 // the get in thread_create_finished().
265 object_put_thread(thread);
266
267 scheduler_yield();
268
269 // This thread should never run again, unless it is explicitly reset
270 // (which will prevent a switch returning here).
271 panic("Switched to an exited thread!");
272 }
273
274 void
thread_standard_handle_thread_exit_to_user(void)275 thread_standard_handle_thread_exit_to_user(void)
276 {
277 thread_t *thread = thread_get_self();
278 assert(thread != NULL);
279
280 thread_state_t state = atomic_load_relaxed(&thread->state);
281 if (compiler_unexpected(state == THREAD_STATE_KILLED)) {
282 thread_exit();
283 } else {
284 assert(state == THREAD_STATE_READY);
285 }
286 }
287