1 /*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8
9 /**
10 * @file
11 * @brief Kernel threading
12 *
13 * This file is the core kernel threading interface.
14 *
15 * @defgroup thread Threads
16 * @{
17 */
18 #include <kernel/thread.h>
19
20 #include <assert.h>
21 #include <kernel/debug.h>
22 #include <kernel/mp.h>
23 #include <kernel/timer.h>
24 #include <lib/heap.h>
25 #include <lk/debug.h>
26 #include <lk/err.h>
27 #include <lk/list.h>
28 #include <malloc.h>
29 #include <platform.h>
30 #include <printf.h>
31 #include <string.h>
32 #include <target.h>
33 #if WITH_KERNEL_VM
34 #include <kernel/vm.h>
35 #endif
36
37 #if THREAD_STATS
38 struct thread_stats thread_stats[SMP_MAX_CPUS];
39 #endif
40
41 #define STACK_DEBUG_BYTE (0x99)
42 #define STACK_DEBUG_WORD (0x99999999)
43
44 #define DEBUG_THREAD_CONTEXT_SWITCH 0
45
46 /* global thread list */
47 static struct list_node thread_list;
48
49 /* master thread spinlock */
50 spin_lock_t thread_lock = SPIN_LOCK_INITIAL_VALUE;
51
52 /* the run queue */
53 static struct list_node run_queue[NUM_PRIORITIES];
54 static uint32_t run_queue_bitmap;
55
56 /* make sure the bitmap is large enough to cover our number of priorities */
57 STATIC_ASSERT(NUM_PRIORITIES <= sizeof(run_queue_bitmap) * 8);
58
59 /* the idle thread(s) (statically allocated) */
60 #if WITH_SMP
61 static thread_t _idle_threads[SMP_MAX_CPUS];
62 #define idle_thread(cpu) (&_idle_threads[cpu])
63 #else
64 static thread_t _idle_thread;
65 #define idle_thread(cpu) (&_idle_thread)
66 #endif
67
68 /* local routines */
69 static void thread_resched(void);
70 static void idle_thread_routine(void) __NO_RETURN;
71
72 #if PLATFORM_HAS_DYNAMIC_TIMER
73 /* preemption timer */
74 static timer_t preempt_timer[SMP_MAX_CPUS];
75 #endif
76
77 /* run queue manipulation */
insert_in_run_queue_head(thread_t * t)78 static void insert_in_run_queue_head(thread_t *t) {
79 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
80 DEBUG_ASSERT(t->state == THREAD_READY);
81 DEBUG_ASSERT(!list_in_list(&t->queue_node));
82 DEBUG_ASSERT(arch_ints_disabled());
83 DEBUG_ASSERT(spin_lock_held(&thread_lock));
84
85 list_add_head(&run_queue[t->priority], &t->queue_node);
86 run_queue_bitmap |= (1<<t->priority);
87 }
88
insert_in_run_queue_tail(thread_t * t)89 static void insert_in_run_queue_tail(thread_t *t) {
90 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
91 DEBUG_ASSERT(t->state == THREAD_READY);
92 DEBUG_ASSERT(!list_in_list(&t->queue_node));
93 DEBUG_ASSERT(arch_ints_disabled());
94 DEBUG_ASSERT(spin_lock_held(&thread_lock));
95
96 list_add_tail(&run_queue[t->priority], &t->queue_node);
97 run_queue_bitmap |= (1<<t->priority);
98 }
99
wakeup_cpu_for_thread(thread_t * t)100 static void wakeup_cpu_for_thread(thread_t *t)
101 {
102 /* Wake up the core to which this thread is pinned
103 * or wake up all if thread is unpinned */
104 int pinned_cpu = thread_pinned_cpu(t);
105 if (pinned_cpu < 0)
106 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
107 else
108 mp_reschedule(1U << pinned_cpu, 0);
109 }
110
init_thread_struct(thread_t * t,const char * name)111 static void init_thread_struct(thread_t *t, const char *name) {
112 memset(t, 0, sizeof(thread_t));
113 t->magic = THREAD_MAGIC;
114 thread_set_pinned_cpu(t, -1);
115 strlcpy(t->name, name, sizeof(t->name));
116 }
117
118 /**
119 * @brief Create a new thread
120 *
121 * This function creates a new thread. The thread is initially suspended, so you
122 * need to call thread_resume() to execute it.
123 *
124 * @param name Name of thread
125 * @param entry Entry point of thread
126 * @param arg Arbitrary argument passed to entry()
127 * @param priority Execution priority for the thread.
128 * @param stack_size Stack size for the thread.
129 *
130 * Thread priority is an integer from 0 (lowest) to 31 (highest). Some standard
131 * prioritys are defined in <kernel/thread.h>:
132 *
133 * HIGHEST_PRIORITY
134 * DPC_PRIORITY
135 * HIGH_PRIORITY
136 * DEFAULT_PRIORITY
137 * LOW_PRIORITY
138 * IDLE_PRIORITY
139 * LOWEST_PRIORITY
140 *
141 * Stack size is typically set to DEFAULT_STACK_SIZE
142 *
143 * @return Pointer to thread object, or NULL on failure.
144 */
thread_create_etc(thread_t * t,const char * name,thread_start_routine entry,void * arg,int priority,void * stack,size_t stack_size)145 thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size) {
146 unsigned int flags = 0;
147
148 if (!t) {
149 t = malloc(sizeof(thread_t));
150 if (!t)
151 return NULL;
152 flags |= THREAD_FLAG_FREE_STRUCT;
153 }
154
155 init_thread_struct(t, name);
156
157 t->entry = entry;
158 t->arg = arg;
159 t->priority = priority;
160 t->state = THREAD_SUSPENDED;
161 t->blocking_wait_queue = NULL;
162 t->wait_queue_block_ret = NO_ERROR;
163 thread_set_curr_cpu(t, -1);
164
165 t->retcode = 0;
166 wait_queue_init(&t->retcode_wait_queue);
167
168 #if WITH_KERNEL_VM
169 t->aspace = NULL;
170 #endif
171
172 /* create the stack */
173 if (!stack) {
174 #if THREAD_STACK_BOUNDS_CHECK
175 stack_size += THREAD_STACK_PADDING_SIZE;
176 flags |= THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
177 #endif
178 t->stack = malloc(stack_size);
179 if (!t->stack) {
180 if (flags & THREAD_FLAG_FREE_STRUCT)
181 free(t);
182 return NULL;
183 }
184 flags |= THREAD_FLAG_FREE_STACK;
185 #if THREAD_STACK_BOUNDS_CHECK
186 memset(t->stack, STACK_DEBUG_BYTE, THREAD_STACK_PADDING_SIZE);
187 #endif
188 } else {
189 t->stack = stack;
190 }
191 #if THREAD_STACK_HIGHWATER
192 if (flags & THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK) {
193 memset(t->stack + THREAD_STACK_PADDING_SIZE, STACK_DEBUG_BYTE,
194 stack_size - THREAD_STACK_PADDING_SIZE);
195 } else {
196 memset(t->stack, STACK_DEBUG_BYTE, stack_size);
197 }
198 #endif
199
200 t->stack_size = stack_size;
201
202 /* save whether or not we need to free the thread struct and/or stack */
203 t->flags = flags;
204
205 /* inherit thread local storage from the parent */
206 thread_t *current_thread = get_current_thread();
207 int i;
208 for (i=0; i < MAX_TLS_ENTRY; i++)
209 t->tls[i] = current_thread->tls[i];
210
211 /* set up the initial stack frame */
212 arch_thread_initialize(t);
213
214 /* add it to the global thread list */
215 THREAD_LOCK(state);
216 list_add_head(&thread_list, &t->thread_list_node);
217 THREAD_UNLOCK(state);
218
219 return t;
220 }
221
thread_create(const char * name,thread_start_routine entry,void * arg,int priority,size_t stack_size)222 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size) {
223 return thread_create_etc(NULL, name, entry, arg, priority, NULL, stack_size);
224 }
225
226 /**
227 * @brief Flag a thread as real time
228 *
229 * @param t Thread to flag
230 *
231 * @return NO_ERROR on success
232 */
thread_set_real_time(thread_t * t)233 status_t thread_set_real_time(thread_t *t) {
234 if (!t)
235 return ERR_INVALID_ARGS;
236
237 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
238
239 THREAD_LOCK(state);
240 #if PLATFORM_HAS_DYNAMIC_TIMER
241 if (t == get_current_thread()) {
242 /* if we're currently running, cancel the preemption timer. */
243 timer_cancel(&preempt_timer[arch_curr_cpu_num()]);
244 }
245 #endif
246 t->flags |= THREAD_FLAG_REAL_TIME;
247 THREAD_UNLOCK(state);
248
249 return NO_ERROR;
250 }
251
thread_is_realtime(thread_t * t)252 static bool thread_is_realtime(thread_t *t) {
253 return (t->flags & THREAD_FLAG_REAL_TIME) && t->priority > DEFAULT_PRIORITY;
254 }
255
thread_is_idle(thread_t * t)256 static bool thread_is_idle(thread_t *t) {
257 return !!(t->flags & THREAD_FLAG_IDLE);
258 }
259
thread_is_real_time_or_idle(thread_t * t)260 static bool thread_is_real_time_or_idle(thread_t *t) {
261 return !!(t->flags & (THREAD_FLAG_REAL_TIME | THREAD_FLAG_IDLE));
262 }
263
264 /**
265 * @brief Make a suspended thread executable.
266 *
267 * This function is typically called to start a thread which has just been
268 * created with thread_create()
269 *
270 * @param t Thread to resume
271 *
272 * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
273 */
thread_resume(thread_t * t)274 status_t thread_resume(thread_t *t) {
275 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
276 DEBUG_ASSERT(t->state != THREAD_DEATH);
277
278 bool resched = false;
279 bool ints_disabled = arch_ints_disabled();
280 THREAD_LOCK(state);
281 if (t->state == THREAD_SUSPENDED) {
282 t->state = THREAD_READY;
283 insert_in_run_queue_head(t);
284 if (!ints_disabled) /* HACK, don't resced into bootstrap thread before idle thread is set up */
285 resched = true;
286 }
287
288 wakeup_cpu_for_thread(t);
289
290 THREAD_UNLOCK(state);
291
292 if (resched)
293 thread_yield();
294
295 return NO_ERROR;
296 }
297
thread_detach_and_resume(thread_t * t)298 status_t thread_detach_and_resume(thread_t *t) {
299 status_t err;
300 err = thread_detach(t);
301 if (err < 0)
302 return err;
303 return thread_resume(t);
304 }
305
thread_join(thread_t * t,int * retcode,lk_time_t timeout)306 status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout) {
307 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
308
309 THREAD_LOCK(state);
310
311 if (t->flags & THREAD_FLAG_DETACHED) {
312 /* the thread is detached, go ahead and exit */
313 THREAD_UNLOCK(state);
314 return ERR_THREAD_DETACHED;
315 }
316
317 /* wait for the thread to die */
318 if (t->state != THREAD_DEATH) {
319 status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
320 if (err < 0) {
321 THREAD_UNLOCK(state);
322 return err;
323 }
324 }
325
326 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
327 DEBUG_ASSERT(t->state == THREAD_DEATH);
328 DEBUG_ASSERT(t->blocking_wait_queue == NULL);
329 DEBUG_ASSERT(!list_in_list(&t->queue_node));
330
331 /* save the return code */
332 if (retcode)
333 *retcode = t->retcode;
334
335 /* remove it from the master thread list */
336 list_delete(&t->thread_list_node);
337
338 /* clear the structure's magic */
339 t->magic = 0;
340
341 THREAD_UNLOCK(state);
342
343 /* free its stack and the thread structure itself */
344 if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
345 free(t->stack);
346
347 if (t->flags & THREAD_FLAG_FREE_STRUCT)
348 free(t);
349
350 return NO_ERROR;
351 }
352
thread_detach(thread_t * t)353 status_t thread_detach(thread_t *t) {
354 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
355
356 THREAD_LOCK(state);
357
358 /* if another thread is blocked inside thread_join() on this thread,
359 * wake them up with a specific return code */
360 wait_queue_wake_all(&t->retcode_wait_queue, false, ERR_THREAD_DETACHED);
361
362 /* if it's already dead, then just do what join would have and exit */
363 if (t->state == THREAD_DEATH) {
364 t->flags &= ~THREAD_FLAG_DETACHED; /* makes sure thread_join continues */
365 THREAD_UNLOCK(state);
366 return thread_join(t, NULL, 0);
367 } else {
368 t->flags |= THREAD_FLAG_DETACHED;
369 THREAD_UNLOCK(state);
370 return NO_ERROR;
371 }
372 }
373
374 /**
375 * @brief Terminate the current thread
376 *
377 * Current thread exits with the specified return code.
378 *
379 * This function does not return.
380 */
thread_exit(int retcode)381 void thread_exit(int retcode) {
382 thread_t *current_thread = get_current_thread();
383
384 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
385 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
386 DEBUG_ASSERT(!thread_is_idle(current_thread));
387
388 // dprintf("thread_exit: current %p\n", current_thread);
389
390 THREAD_LOCK(state);
391
392 /* enter the dead state */
393 current_thread->state = THREAD_DEATH;
394 current_thread->retcode = retcode;
395
396 /* if we're detached, then do our teardown here */
397 if (current_thread->flags & THREAD_FLAG_DETACHED) {
398 /* remove it from the master thread list */
399 list_delete(¤t_thread->thread_list_node);
400
401 /* clear the structure's magic */
402 current_thread->magic = 0;
403
404 /* free its stack and the thread structure itself */
405 if (current_thread->flags & THREAD_FLAG_FREE_STACK && current_thread->stack) {
406 heap_delayed_free(current_thread->stack);
407
408 /* make sure its not going to get a bounds check performed on the half-freed stack */
409 current_thread->flags &= ~THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
410 }
411
412 if (current_thread->flags & THREAD_FLAG_FREE_STRUCT)
413 heap_delayed_free(current_thread);
414 } else {
415 /* signal if anyone is waiting */
416 wait_queue_wake_all(¤t_thread->retcode_wait_queue, false, 0);
417 }
418
419 /* reschedule */
420 thread_resched();
421
422 panic("somehow fell through thread_exit()\n");
423 }
424
idle_thread_routine(void)425 static void idle_thread_routine(void) {
426 for (;;)
427 arch_idle();
428 }
429
get_top_thread(int cpu)430 static thread_t *get_top_thread(int cpu) {
431 thread_t *newthread;
432 uint32_t local_run_queue_bitmap = run_queue_bitmap;
433
434 while (local_run_queue_bitmap) {
435 /* find the first (remaining) queue with a thread in it */
436 uint next_queue = sizeof(run_queue_bitmap) * 8 - 1 - __builtin_clz(local_run_queue_bitmap);
437
438 list_for_every_entry(&run_queue[next_queue], newthread, thread_t, queue_node) {
439 #if WITH_SMP
440 if (newthread->pinned_cpu < 0 || newthread->pinned_cpu == cpu)
441 #endif
442 {
443 list_delete(&newthread->queue_node);
444
445 if (list_is_empty(&run_queue[next_queue]))
446 run_queue_bitmap &= ~(1<<next_queue);
447
448 return newthread;
449 }
450 }
451
452 local_run_queue_bitmap &= ~(1<<next_queue);
453 }
454 /* no threads to run, select the idle thread for this cpu */
455 return idle_thread(cpu);
456 }
457
458 /**
459 * @brief Cause another thread to be executed.
460 *
461 * Internal reschedule routine. The current thread needs to already be in whatever
462 * state and queues it needs to be in. This routine simply picks the next thread and
463 * switches to it.
464 *
465 * This is probably not the function you're looking for. See
466 * thread_yield() instead.
467 */
thread_resched(void)468 void thread_resched(void) {
469 thread_t *oldthread;
470 thread_t *newthread;
471
472 thread_t *current_thread = get_current_thread();
473 uint cpu = arch_curr_cpu_num();
474
475 DEBUG_ASSERT(arch_ints_disabled());
476 DEBUG_ASSERT(spin_lock_held(&thread_lock));
477 DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
478
479 THREAD_STATS_INC(reschedules);
480
481 newthread = get_top_thread(cpu);
482
483 DEBUG_ASSERT(newthread);
484
485 newthread->state = THREAD_RUNNING;
486
487 oldthread = current_thread;
488
489 if (newthread == oldthread)
490 return;
491
492 /* set up quantum for the new thread if it was consumed */
493 if (newthread->remaining_quantum <= 0) {
494 newthread->remaining_quantum = 5; // XXX make this smarter
495 }
496
497 /* mark the cpu ownership of the threads */
498 thread_set_curr_cpu(oldthread, -1);
499 thread_set_curr_cpu(newthread, cpu);
500
501 #if WITH_SMP
502 if (thread_is_idle(newthread)) {
503 mp_set_cpu_idle(cpu);
504 } else {
505 mp_set_cpu_busy(cpu);
506 }
507
508 if (thread_is_realtime(newthread)) {
509 mp_set_cpu_realtime(cpu);
510 } else {
511 mp_set_cpu_non_realtime(cpu);
512 }
513 #endif
514
515 #if THREAD_STATS
516 THREAD_STATS_INC(context_switches);
517
518 if (thread_is_idle(oldthread)) {
519 lk_bigtime_t now = current_time_hires();
520 thread_stats[cpu].idle_time += now - thread_stats[cpu].last_idle_timestamp;
521 }
522 if (thread_is_idle(newthread)) {
523 thread_stats[cpu].last_idle_timestamp = current_time_hires();
524 }
525 #endif
526
527 KEVLOG_THREAD_SWITCH(oldthread, newthread);
528
529 #if PLATFORM_HAS_DYNAMIC_TIMER
530 if (thread_is_real_time_or_idle(newthread)) {
531 if (!thread_is_real_time_or_idle(oldthread)) {
532 /* if we're switching from a non real time to a real time, cancel
533 * the preemption timer. */
534 #if DEBUG_THREAD_CONTEXT_SWITCH
535 dprintf(ALWAYS, "arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
536 cpu, oldthread, oldthread->name, newthread, newthread->name);
537 #endif
538 timer_cancel(&preempt_timer[cpu]);
539 }
540 } else if (thread_is_real_time_or_idle(oldthread)) {
541 /* if we're switching from a real time (or idle thread) to a regular one,
542 * set up a periodic timer to run our preemption tick. */
543 #if DEBUG_THREAD_CONTEXT_SWITCH
544 dprintf(ALWAYS, "arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
545 cpu, oldthread, oldthread->name, newthread, newthread->name);
546 #endif
547 timer_set_periodic(&preempt_timer[cpu], 10, thread_timer_tick, NULL);
548 }
549 #endif
550
551 /* set some optional target debug leds */
552 target_set_debug_led(0, !thread_is_idle(newthread));
553
554 /* do the switch */
555 set_current_thread(newthread);
556
557 #if DEBUG_THREAD_CONTEXT_SWITCH
558 dprintf(ALWAYS, "arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
559 cpu, oldthread, oldthread->name, oldthread->priority,
560 oldthread->flags, newthread, newthread->name,
561 newthread->priority, newthread->flags);
562 #endif
563
564 #if THREAD_STACK_BOUNDS_CHECK
565 /* check that the old thread has not blown its stack just before pushing its context */
566 if (oldthread->flags & THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK) {
567 STATIC_ASSERT((THREAD_STACK_PADDING_SIZE % sizeof(uint32_t)) == 0);
568 uint32_t *s = (uint32_t *)oldthread->stack;
569 for (size_t i = 0; i < THREAD_STACK_PADDING_SIZE / sizeof(uint32_t); i++) {
570 if (unlikely(s[i] != STACK_DEBUG_WORD)) {
571 /* NOTE: will probably blow the stack harder here, but hopefully enough
572 * state exists to at least get some sort of debugging done.
573 */
574 panic("stack overrun at %p: thread %p (%s), stack %p\n", &s[i],
575 oldthread, oldthread->name, oldthread->stack);
576 }
577 }
578 }
579 #endif
580
581 #ifdef WITH_LIB_UTHREAD
582 uthread_context_switch(oldthread, newthread);
583 #endif
584
585 #if WITH_KERNEL_VM
586 /* see if we need to swap mmu context */
587 if (newthread->aspace != oldthread->aspace) {
588 vmm_context_switch(oldthread->aspace, newthread->aspace);
589 }
590 #endif
591
592 /* do the low level context switch */
593 arch_context_switch(oldthread, newthread);
594 }
595
596 /**
597 * @brief Yield the cpu to another thread
598 *
599 * This function places the current thread at the end of the run queue
600 * and yields the cpu to another waiting thread (if any.)
601 *
602 * This function will return at some later time. Possibly immediately if
603 * no other threads are waiting to execute.
604 */
thread_yield(void)605 void thread_yield(void) {
606 thread_t *current_thread = get_current_thread();
607
608 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
609 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
610
611 THREAD_LOCK(state);
612
613 THREAD_STATS_INC(yields);
614
615 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
616 current_thread->state = THREAD_READY;
617 current_thread->remaining_quantum = 0;
618 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
619 insert_in_run_queue_tail(current_thread);
620 }
621 thread_resched();
622
623 THREAD_UNLOCK(state);
624 }
625
626 /**
627 * @brief Briefly yield cpu to another thread
628 *
629 * This function is similar to thread_yield(), except that it will
630 * restart more quickly.
631 *
632 * This function places the current thread at the head of the run
633 * queue and then yields the cpu to another thread.
634 *
635 * Exception: If the time slice for this thread has expired, then
636 * the thread goes to the end of the run queue.
637 *
638 * This function will return at some later time. Possibly immediately if
639 * no other threads are waiting to execute.
640 */
thread_preempt(void)641 void thread_preempt(void) {
642 thread_t *current_thread = get_current_thread();
643
644 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
645 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
646
647 #if THREAD_STATS
648 if (!thread_is_idle(current_thread))
649 THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
650 #endif
651
652 KEVLOG_THREAD_PREEMPT(current_thread);
653
654 THREAD_LOCK(state);
655
656 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
657 current_thread->state = THREAD_READY;
658 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
659 if (current_thread->remaining_quantum > 0)
660 insert_in_run_queue_head(current_thread);
661 else
662 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
663 }
664 thread_resched();
665
666 THREAD_UNLOCK(state);
667 }
668
669 /**
670 * @brief Suspend thread until woken.
671 *
672 * This function schedules another thread to execute. This function does not
673 * return until the thread is made runable again by some other module.
674 *
675 * You probably don't want to call this function directly; it's meant to be called
676 * from other modules, such as mutex, which will presumably set the thread's
677 * state to blocked and add it to some queue or another.
678 */
thread_block(void)679 void thread_block(void) {
680 __UNUSED thread_t *current_thread = get_current_thread();
681
682 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
683 DEBUG_ASSERT(current_thread->state == THREAD_BLOCKED);
684 DEBUG_ASSERT(spin_lock_held(&thread_lock));
685 DEBUG_ASSERT(!thread_is_idle(current_thread));
686
687 /* we are blocking on something. the blocking code should have already stuck us on a queue */
688 thread_resched();
689 }
690
thread_unblock(thread_t * t,bool resched)691 void thread_unblock(thread_t *t, bool resched) {
692 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
693 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
694 DEBUG_ASSERT(spin_lock_held(&thread_lock));
695 DEBUG_ASSERT(!thread_is_idle(t));
696
697 t->state = THREAD_READY;
698 insert_in_run_queue_head(t);
699 wakeup_cpu_for_thread(t);
700
701 if (resched)
702 thread_resched();
703 }
704
thread_timer_tick(struct timer * t,lk_time_t now,void * arg)705 enum handler_return thread_timer_tick(struct timer *t, lk_time_t now, void *arg) {
706 thread_t *current_thread = get_current_thread();
707
708 if (thread_is_real_time_or_idle(current_thread))
709 return INT_NO_RESCHEDULE;
710
711 current_thread->remaining_quantum--;
712 if (current_thread->remaining_quantum <= 0) {
713 return INT_RESCHEDULE;
714 } else {
715 return INT_NO_RESCHEDULE;
716 }
717 }
718
719 /* timer callback to wake up a sleeping thread */
thread_sleep_handler(timer_t * timer,lk_time_t now,void * arg)720 static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, void *arg) {
721 thread_t *t = (thread_t *)arg;
722
723 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
724 DEBUG_ASSERT(t->state == THREAD_SLEEPING);
725
726 THREAD_LOCK(state);
727
728 t->state = THREAD_READY;
729 insert_in_run_queue_head(t);
730
731 THREAD_UNLOCK(state);
732
733 return INT_RESCHEDULE;
734 }
735
736 /**
737 * @brief Put thread to sleep; delay specified in ms
738 *
739 * This function puts the current thread to sleep until the specified
740 * delay in ms has expired.
741 *
742 * Note that this function could sleep for longer than the specified delay if
743 * other threads are running. When the timer expires, this thread will
744 * be placed at the head of the run queue.
745 */
thread_sleep(lk_time_t delay)746 void thread_sleep(lk_time_t delay) {
747 timer_t timer;
748
749 thread_t *current_thread = get_current_thread();
750
751 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
752 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
753 DEBUG_ASSERT(!thread_is_idle(current_thread));
754
755 timer_initialize(&timer);
756
757 THREAD_LOCK(state);
758 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
759 current_thread->state = THREAD_SLEEPING;
760 thread_resched();
761 THREAD_UNLOCK(state);
762 }
763
764 /**
765 * @brief Initialize threading system
766 *
767 * This function is called once, from kmain()
768 */
thread_init_early(void)769 void thread_init_early(void) {
770 int i;
771
772 DEBUG_ASSERT(arch_curr_cpu_num() == 0);
773
774 /* initialize the run queues */
775 for (i=0; i < NUM_PRIORITIES; i++)
776 list_initialize(&run_queue[i]);
777
778 /* initialize the thread list */
779 list_initialize(&thread_list);
780
781 /* create a thread to cover the current running state */
782 thread_t *t = idle_thread(0);
783 init_thread_struct(t, "bootstrap");
784
785 /* half construct this thread, since we're already running */
786 t->priority = HIGHEST_PRIORITY;
787 t->state = THREAD_RUNNING;
788 t->flags = THREAD_FLAG_DETACHED;
789 thread_set_curr_cpu(t, 0);
790 thread_set_pinned_cpu(t, 0);
791 wait_queue_init(&t->retcode_wait_queue);
792 list_add_head(&thread_list, &t->thread_list_node);
793 set_current_thread(t);
794 }
795
796 /**
797 * @brief Complete thread initialization
798 *
799 * This function is called once at boot time
800 */
thread_init(void)801 void thread_init(void) {
802 #if PLATFORM_HAS_DYNAMIC_TIMER
803 for (uint i = 0; i < SMP_MAX_CPUS; i++) {
804 timer_initialize(&preempt_timer[i]);
805 }
806 #endif
807 }
808
809 /**
810 * @brief Change name of current thread
811 */
thread_set_name(const char * name)812 void thread_set_name(const char *name) {
813 thread_t *current_thread = get_current_thread();
814 strlcpy(current_thread->name, name, sizeof(current_thread->name));
815 }
816
817 /**
818 * @brief Change priority of current thread
819 *
820 * See thread_create() for a discussion of priority values.
821 */
thread_set_priority(int priority)822 void thread_set_priority(int priority) {
823 thread_t *current_thread = get_current_thread();
824
825 THREAD_LOCK(state);
826
827 if (priority <= IDLE_PRIORITY)
828 priority = IDLE_PRIORITY + 1;
829 if (priority > HIGHEST_PRIORITY)
830 priority = HIGHEST_PRIORITY;
831 current_thread->priority = priority;
832
833 current_thread->state = THREAD_READY;
834 insert_in_run_queue_head(current_thread);
835 thread_resched();
836
837 THREAD_UNLOCK(state);
838 }
839
840 /**
841 * @brief Become an idle thread
842 *
843 * This function marks the current thread as the idle thread -- the one which
844 * executes when there is nothing else to do. This function does not return.
845 * This function is called once at boot time.
846 */
thread_become_idle(void)847 void thread_become_idle(void) {
848 DEBUG_ASSERT(arch_ints_disabled());
849
850 thread_t *t = get_current_thread();
851
852 #if WITH_SMP
853 char name[16];
854 snprintf(name, sizeof(name), "idle %d", arch_curr_cpu_num());
855 thread_set_name(name);
856 #else
857 thread_set_name("idle");
858 #endif
859
860 /* mark ourself as idle */
861 t->priority = IDLE_PRIORITY;
862 t->flags |= THREAD_FLAG_IDLE;
863 thread_set_pinned_cpu(t, arch_curr_cpu_num());
864
865 mp_set_curr_cpu_active(true);
866 mp_set_cpu_idle(arch_curr_cpu_num());
867
868 /* enable interrupts and start the scheduler */
869 arch_enable_ints();
870 thread_yield();
871
872 idle_thread_routine();
873 }
874
875 /* create an idle thread for the cpu we're on, and start scheduling */
876
thread_secondary_cpu_init_early(void)877 void thread_secondary_cpu_init_early(void) {
878 DEBUG_ASSERT(arch_ints_disabled());
879
880 /* construct an idle thread to cover our cpu */
881 uint cpu = arch_curr_cpu_num();
882 thread_t *t = idle_thread(cpu);
883
884 char name[16];
885 snprintf(name, sizeof(name), "idle %u", cpu);
886 init_thread_struct(t, name);
887 thread_set_pinned_cpu(t, cpu);
888
889 /* half construct this thread, since we're already running */
890 t->priority = HIGHEST_PRIORITY;
891 t->state = THREAD_RUNNING;
892 t->flags = THREAD_FLAG_DETACHED | THREAD_FLAG_IDLE;
893 thread_set_curr_cpu(t, cpu);
894 thread_set_pinned_cpu(t, cpu);
895 wait_queue_init(&t->retcode_wait_queue);
896
897 THREAD_LOCK(state);
898
899 list_add_head(&thread_list, &t->thread_list_node);
900 set_current_thread(t);
901
902 THREAD_UNLOCK(state);
903 }
904
thread_secondary_cpu_entry(void)905 void thread_secondary_cpu_entry(void) {
906 uint cpu = arch_curr_cpu_num();
907 thread_t *t = get_current_thread();
908 t->priority = IDLE_PRIORITY;
909
910 mp_set_curr_cpu_active(true);
911 mp_set_cpu_idle(cpu);
912
913 /* enable interrupts and start the scheduler on this cpu */
914 arch_enable_ints();
915 thread_yield();
916
917 idle_thread_routine();
918 }
919
thread_state_to_str(enum thread_state state)920 static const char *thread_state_to_str(enum thread_state state) {
921 switch (state) {
922 case THREAD_SUSPENDED:
923 return "susp";
924 case THREAD_READY:
925 return "rdy";
926 case THREAD_RUNNING:
927 return "run";
928 case THREAD_BLOCKED:
929 return "blok";
930 case THREAD_SLEEPING:
931 return "slep";
932 case THREAD_DEATH:
933 return "deth";
934 default:
935 return "unkn";
936 }
937 }
938
thread_stack_used(thread_t * t)939 static size_t thread_stack_used(thread_t *t) {
940 #ifdef THREAD_STACK_HIGHWATER
941 uint8_t *stack_base;
942 size_t stack_size;
943 size_t i;
944
945 stack_base = t->stack;
946 stack_size = t->stack_size;
947
948 for (i = 0; i < stack_size; i++) {
949 if (stack_base[i] != STACK_DEBUG_BYTE)
950 break;
951 }
952 return stack_size - i;
953 #else
954 return 0;
955 #endif
956 }
957 /**
958 * @brief Dump debugging info about the specified thread.
959 */
dump_thread(thread_t * t)960 void dump_thread(thread_t *t) {
961 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
962 #if WITH_SMP
963 dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
964 thread_state_to_str(t->state), t->curr_cpu, t->pinned_cpu, t->priority, t->remaining_quantum);
965 #else
966 dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
967 thread_state_to_str(t->state), t->priority, t->remaining_quantum);
968 #endif
969 #ifdef THREAD_STACK_HIGHWATER
970 dprintf(INFO, "\tstack %p, stack_size %zd, stack_used %zd\n",
971 t->stack, t->stack_size, thread_stack_used(t));
972 #else
973 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
974 #endif
975 dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
976 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
977 #if WITH_KERNEL_VM
978 dprintf(INFO, "\taspace %p\n", t->aspace);
979 #endif
980 #if (MAX_TLS_ENTRY > 0)
981 dprintf(INFO, "\ttls:");
982 int i;
983 for (i=0; i < MAX_TLS_ENTRY; i++) {
984 dprintf(INFO, " 0x%lx", t->tls[i]);
985 }
986 dprintf(INFO, "\n");
987 #endif
988 arch_dump_thread(t);
989 }
990
dump_all_threads_unlocked(void)991 void dump_all_threads_unlocked(void) {
992 thread_t *t;
993 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
994 if (t->magic != THREAD_MAGIC) {
995 dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
996 hexdump(t, sizeof(thread_t));
997 break;
998 }
999 dump_thread(t);
1000 }
1001 }
1002
1003 /**
1004 * @brief Dump debugging info about all threads
1005 */
dump_all_threads(void)1006 void dump_all_threads(void) {
1007 THREAD_LOCK(state);
1008 dump_all_threads_unlocked();
1009 THREAD_UNLOCK(state);
1010 }
1011
1012 /** @} */
1013
1014
1015 /**
1016 * @defgroup wait Wait Queue
1017 * @{
1018 */
wait_queue_init(wait_queue_t * wait)1019 void wait_queue_init(wait_queue_t *wait) {
1020 *wait = (wait_queue_t)WAIT_QUEUE_INITIAL_VALUE(*wait);
1021 }
1022
wait_queue_timeout_handler(timer_t * timer,lk_time_t now,void * arg)1023 static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t now, void *arg) {
1024 thread_t *thread = (thread_t *)arg;
1025
1026 DEBUG_ASSERT(thread->magic == THREAD_MAGIC);
1027
1028 spin_lock(&thread_lock);
1029
1030 enum handler_return ret = INT_NO_RESCHEDULE;
1031 if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
1032 ret = INT_RESCHEDULE;
1033 }
1034
1035 spin_unlock(&thread_lock);
1036
1037 return ret;
1038 }
1039
1040 /**
1041 * @brief Block until a wait queue is notified.
1042 *
1043 * This function puts the current thread at the end of a wait
1044 * queue and then blocks until some other thread wakes the queue
1045 * up again.
1046 *
1047 * @param wait The wait queue to enter
1048 * @param timeout The maximum time, in ms, to wait
1049 *
1050 * If the timeout is zero, this function returns immediately with
1051 * ERR_TIMED_OUT. If the timeout is INFINITE_TIME, this function
1052 * waits indefinitely. Otherwise, this function returns with
1053 * ERR_TIMED_OUT at the end of the timeout period.
1054 *
1055 * @return ERR_TIMED_OUT on timeout, else returns the return
1056 * value specified when the queue was woken by wait_queue_wake_one().
1057 */
wait_queue_block(wait_queue_t * wait,lk_time_t timeout)1058 status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout) {
1059 timer_t timer;
1060
1061 thread_t *current_thread = get_current_thread();
1062
1063 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1064 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1065 DEBUG_ASSERT(arch_ints_disabled());
1066 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1067
1068 if (timeout == 0)
1069 return ERR_TIMED_OUT;
1070
1071 list_add_tail(&wait->list, ¤t_thread->queue_node);
1072 wait->count++;
1073 current_thread->state = THREAD_BLOCKED;
1074 current_thread->blocking_wait_queue = wait;
1075 current_thread->wait_queue_block_ret = NO_ERROR;
1076
1077 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
1078 if (timeout != INFINITE_TIME) {
1079 timer_initialize(&timer);
1080 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
1081 }
1082
1083 thread_resched();
1084
1085 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
1086 if (timeout != INFINITE_TIME) {
1087 timer_cancel(&timer);
1088 }
1089
1090 return current_thread->wait_queue_block_ret;
1091 }
1092
1093 /**
1094 * @brief Wake up one thread sleeping on a wait queue
1095 *
1096 * This function removes one thread (if any) from the head of the wait queue and
1097 * makes it executable. The new thread will be placed at the head of the
1098 * run queue.
1099 *
1100 * @param wait The wait queue to wake
1101 * @param reschedule If true, the newly-woken thread will run immediately.
1102 * @param wait_queue_error The return value which the new thread will receive
1103 * from wait_queue_block().
1104 *
1105 * @return The number of threads woken (zero or one)
1106 */
wait_queue_wake_one(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1107 int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) {
1108 thread_t *t;
1109 int ret = 0;
1110
1111 thread_t *current_thread = get_current_thread();
1112
1113 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1114 DEBUG_ASSERT(arch_ints_disabled());
1115 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1116
1117 t = list_remove_head_type(&wait->list, thread_t, queue_node);
1118 if (t) {
1119 wait->count--;
1120 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1121 t->state = THREAD_READY;
1122 t->wait_queue_block_ret = wait_queue_error;
1123 t->blocking_wait_queue = NULL;
1124
1125 /* if we're instructed to reschedule, stick the current thread on the head
1126 * of the run queue first, so that the newly awakened thread gets a chance to run
1127 * before the current one, but the current one doesn't get unnecessarilly punished.
1128 */
1129 if (reschedule) {
1130 current_thread->state = THREAD_READY;
1131 insert_in_run_queue_head(current_thread);
1132 }
1133 insert_in_run_queue_head(t);
1134 wakeup_cpu_for_thread(t);
1135 if (reschedule) {
1136 thread_resched();
1137 }
1138 ret = 1;
1139
1140 }
1141
1142 return ret;
1143 }
1144
1145
1146 /**
1147 * @brief Wake all threads sleeping on a wait queue
1148 *
1149 * This function removes all threads (if any) from the wait queue and
1150 * makes them executable. The new threads will be placed at the head of the
1151 * run queue.
1152 *
1153 * @param wait The wait queue to wake
1154 * @param reschedule If true, the newly-woken threads will run immediately.
1155 * @param wait_queue_error The return value which the new thread will receive
1156 * from wait_queue_block().
1157 *
1158 * @return The number of threads woken (zero or one)
1159 */
wait_queue_wake_all(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1160 int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) {
1161 thread_t *t;
1162 int ret = 0;
1163 uint32_t cpu_mask = 0;
1164
1165 thread_t *current_thread = get_current_thread();
1166
1167 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1168 DEBUG_ASSERT(arch_ints_disabled());
1169 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1170
1171 if (reschedule && wait->count > 0) {
1172 /* if we're instructed to reschedule, stick the current thread on the head
1173 * of the run queue first, so that the newly awakened threads get a chance to run
1174 * before the current one, but the current one doesn't get unnecessarilly punished.
1175 */
1176 current_thread->state = THREAD_READY;
1177 insert_in_run_queue_head(current_thread);
1178 }
1179
1180 /* pop all the threads off the wait queue into the run queue */
1181 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
1182 wait->count--;
1183 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1184 t->state = THREAD_READY;
1185 t->wait_queue_block_ret = wait_queue_error;
1186 t->blocking_wait_queue = NULL;
1187 int pinned_cpu = thread_pinned_cpu(t);
1188 if (pinned_cpu < 0) {
1189 /* assumes MP_CPU_ALL_BUT_LOCAL is defined as all bits on */
1190 cpu_mask = MP_CPU_ALL_BUT_LOCAL;
1191 } else {
1192 cpu_mask |= (1U << pinned_cpu);
1193 }
1194 insert_in_run_queue_head(t);
1195 ret++;
1196 }
1197
1198 DEBUG_ASSERT(wait->count == 0);
1199
1200 if (ret > 0) {
1201 mp_reschedule(cpu_mask, 0);
1202 if (reschedule) {
1203 thread_resched();
1204 }
1205 }
1206
1207 return ret;
1208 }
1209
1210 /**
1211 * @brief Free all resources allocated in wait_queue_init()
1212 *
1213 * If any threads were waiting on this queue, they are all woken.
1214 */
wait_queue_destroy(wait_queue_t * wait,bool reschedule)1215 void wait_queue_destroy(wait_queue_t *wait, bool reschedule) {
1216 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1217 DEBUG_ASSERT(arch_ints_disabled());
1218 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1219
1220 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
1221 wait->magic = 0;
1222 }
1223
1224 /**
1225 * @brief Wake a specific thread in a wait queue
1226 *
1227 * This function extracts a specific thread from a wait queue, wakes it, and
1228 * puts it at the head of the run queue.
1229 *
1230 * @param t The thread to wake
1231 * @param wait_queue_error The return value which the new thread will receive
1232 * from wait_queue_block().
1233 *
1234 * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
1235 */
thread_unblock_from_wait_queue(thread_t * t,status_t wait_queue_error)1236 status_t thread_unblock_from_wait_queue(thread_t *t, status_t wait_queue_error) {
1237 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1238 DEBUG_ASSERT(arch_ints_disabled());
1239 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1240
1241 if (t->state != THREAD_BLOCKED)
1242 return ERR_NOT_BLOCKED;
1243
1244 DEBUG_ASSERT(t->blocking_wait_queue != NULL);
1245 DEBUG_ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
1246 DEBUG_ASSERT(list_in_list(&t->queue_node));
1247
1248 list_delete(&t->queue_node);
1249 t->blocking_wait_queue->count--;
1250 t->blocking_wait_queue = NULL;
1251 t->state = THREAD_READY;
1252 t->wait_queue_block_ret = wait_queue_error;
1253 insert_in_run_queue_head(t);
1254 wakeup_cpu_for_thread(t);
1255
1256 return NO_ERROR;
1257 }
1258
1259 #if defined(WITH_DEBUGGER_INFO)
1260 // This is, by necessity, arch-specific, and arm-m specific right now,
1261 // but lives here due to thread_list being static.
1262 //
1263 // It contains sufficient information for a remote debugger to walk
1264 // the thread list without needing the symbols and debug sections in
1265 // the elf binary for lk or the ability to parse them.
1266 const struct __debugger_info__ {
1267 u32 version; // flags:16 major:8 minor:8
1268 void *thread_list_ptr;
1269 void *current_thread_ptr;
1270 u8 off_list_node;
1271 u8 off_state;
1272 u8 off_saved_sp;
1273 u8 off_was_preempted;
1274 u8 off_name;
1275 u8 off_waitq;
1276 } _debugger_info = {
1277 .version = 0x0100,
1278 .thread_list_ptr = &thread_list,
1279 .current_thread_ptr = &_current_thread,
1280 .off_list_node = __builtin_offsetof(thread_t, thread_list_node),
1281 .off_state = __builtin_offsetof(thread_t, state),
1282 .off_saved_sp = __builtin_offsetof(thread_t, arch.sp),
1283 .off_was_preempted = __builtin_offsetof(thread_t, arch.was_preempted),
1284 .off_name = __builtin_offsetof(thread_t, name),
1285 .off_waitq = __builtin_offsetof(thread_t, blocking_wait_queue),
1286 };
1287 #endif
1288