1 /*
2 * Copyright (c) 2008-2015 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8
9 /**
10 * @file
11 * @brief Kernel threading
12 *
13 * This file is the core kernel threading interface.
14 *
15 * @defgroup thread Threads
16 * @{
17 */
18 #include <kernel/thread.h>
19
20 #include <assert.h>
21 #include <kernel/debug.h>
22 #include <kernel/mp.h>
23 #include <kernel/timer.h>
24 #include <lib/heap.h>
25 #include <lk/debug.h>
26 #include <lk/err.h>
27 #include <lk/list.h>
28 #include <lk/trace.h>
29 #include <malloc.h>
30 #include <platform.h>
31 #include <printf.h>
32 #include <string.h>
33 #include <target.h>
34 #if WITH_KERNEL_VM
35 #include <kernel/vm.h>
36 #endif
37
38 #if THREAD_STATS
39 struct thread_stats thread_stats[SMP_MAX_CPUS];
40 #endif
41
42 #define STACK_DEBUG_BYTE (0x99)
43 #define STACK_DEBUG_WORD (0x99999999)
44
45 #define DEBUG_THREAD_CONTEXT_SWITCH 0
46
47 /* global thread list */
48 struct list_node thread_list;
49
50 /* master thread spinlock */
51 spin_lock_t thread_lock = SPIN_LOCK_INITIAL_VALUE;
52
53 /* the run queue */
54 static struct list_node run_queue[NUM_PRIORITIES];
55 static uint32_t run_queue_bitmap;
56
57 /* make sure the bitmap is large enough to cover our number of priorities */
58 STATIC_ASSERT(NUM_PRIORITIES <= sizeof(run_queue_bitmap) * 8);
59
60 /* the idle thread(s) (statically allocated) */
61 #if WITH_SMP
62 static thread_t _idle_threads[SMP_MAX_CPUS];
63 #define idle_thread(cpu) (&_idle_threads[cpu])
64 #else
65 static thread_t _idle_thread;
66 #define idle_thread(cpu) (&_idle_thread)
67 #endif
68
69 /* local routines */
70 static void thread_resched(void);
71 static void idle_thread_routine(void) __NO_RETURN;
72
73 #if PLATFORM_HAS_DYNAMIC_TIMER
74 /* preemption timer */
75 static timer_t preempt_timer[SMP_MAX_CPUS];
76 #endif
77
78 /* run queue manipulation */
insert_in_run_queue_head(thread_t * t)79 static void insert_in_run_queue_head(thread_t *t) {
80 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
81 DEBUG_ASSERT(t->state == THREAD_READY);
82 DEBUG_ASSERT(!list_in_list(&t->queue_node));
83 DEBUG_ASSERT(arch_ints_disabled());
84 DEBUG_ASSERT(spin_lock_held(&thread_lock));
85
86 list_add_head(&run_queue[t->priority], &t->queue_node);
87 run_queue_bitmap |= (1<<t->priority);
88 }
89
insert_in_run_queue_tail(thread_t * t)90 static void insert_in_run_queue_tail(thread_t *t) {
91 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
92 DEBUG_ASSERT(t->state == THREAD_READY);
93 DEBUG_ASSERT(!list_in_list(&t->queue_node));
94 DEBUG_ASSERT(arch_ints_disabled());
95 DEBUG_ASSERT(spin_lock_held(&thread_lock));
96
97 list_add_tail(&run_queue[t->priority], &t->queue_node);
98 run_queue_bitmap |= (1<<t->priority);
99 }
100
wakeup_cpu_for_thread(thread_t * t)101 static void wakeup_cpu_for_thread(thread_t *t)
102 {
103 /* Wake up the core to which this thread is pinned
104 * or wake up all if thread is unpinned */
105 int pinned_cpu = thread_pinned_cpu(t);
106 if (pinned_cpu < 0)
107 mp_reschedule(MP_CPU_ALL_BUT_LOCAL, 0);
108 else
109 mp_reschedule(1U << pinned_cpu, 0);
110 }
111
init_thread_struct(thread_t * t,const char * name)112 static void init_thread_struct(thread_t *t, const char *name) {
113 memset(t, 0, sizeof(thread_t));
114 t->magic = THREAD_MAGIC;
115 thread_set_pinned_cpu(t, -1);
116 strlcpy(t->name, name, sizeof(t->name));
117 }
118
119 /**
120 * @brief Create a new thread
121 *
122 * This function creates a new thread. The thread is initially suspended, so you
123 * need to call thread_resume() to execute it.
124 *
125 * @param name Name of thread
126 * @param entry Entry point of thread
127 * @param arg Arbitrary argument passed to entry()
128 * @param priority Execution priority for the thread.
129 * @param stack_size Stack size for the thread.
130 *
131 * Thread priority is an integer from 0 (lowest) to 31 (highest). Some standard
132 * prioritys are defined in <kernel/thread.h>:
133 *
134 * HIGHEST_PRIORITY
135 * DPC_PRIORITY
136 * HIGH_PRIORITY
137 * DEFAULT_PRIORITY
138 * LOW_PRIORITY
139 * IDLE_PRIORITY
140 * LOWEST_PRIORITY
141 *
142 * Stack size is typically set to DEFAULT_STACK_SIZE
143 *
144 * @return Pointer to thread object, or NULL on failure.
145 */
thread_create_etc(thread_t * t,const char * name,thread_start_routine entry,void * arg,int priority,void * stack,size_t stack_size)146 thread_t *thread_create_etc(thread_t *t, const char *name, thread_start_routine entry, void *arg, int priority, void *stack, size_t stack_size) {
147 unsigned int flags = 0;
148
149 if (!t) {
150 t = malloc(sizeof(thread_t));
151 if (!t)
152 return NULL;
153 flags |= THREAD_FLAG_FREE_STRUCT;
154 }
155
156 init_thread_struct(t, name);
157
158 t->entry = entry;
159 t->arg = arg;
160 t->priority = priority;
161 t->state = THREAD_SUSPENDED;
162 t->blocking_wait_queue = NULL;
163 t->wait_queue_block_ret = NO_ERROR;
164 thread_set_curr_cpu(t, -1);
165
166 t->retcode = 0;
167 wait_queue_init(&t->retcode_wait_queue);
168
169 #if WITH_KERNEL_VM
170 t->aspace = NULL;
171 #endif
172
173 /* create the stack */
174 if (!stack) {
175 #if THREAD_STACK_BOUNDS_CHECK
176 stack_size += THREAD_STACK_PADDING_SIZE;
177 flags |= THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
178 #endif
179 t->stack = malloc(stack_size);
180 if (!t->stack) {
181 if (flags & THREAD_FLAG_FREE_STRUCT)
182 free(t);
183 return NULL;
184 }
185 flags |= THREAD_FLAG_FREE_STACK;
186 #if THREAD_STACK_BOUNDS_CHECK
187 memset(t->stack, STACK_DEBUG_BYTE, THREAD_STACK_PADDING_SIZE);
188 #endif
189 } else {
190 t->stack = stack;
191 }
192 #if THREAD_STACK_HIGHWATER
193 if (flags & THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK) {
194 memset(t->stack + THREAD_STACK_PADDING_SIZE, STACK_DEBUG_BYTE,
195 stack_size - THREAD_STACK_PADDING_SIZE);
196 } else {
197 memset(t->stack, STACK_DEBUG_BYTE, stack_size);
198 }
199 #endif
200
201 t->stack_size = stack_size;
202
203 /* save whether or not we need to free the thread struct and/or stack */
204 t->flags = flags;
205
206 /* inherit thread local storage from the parent */
207 thread_t *current_thread = get_current_thread();
208 int i;
209 for (i=0; i < MAX_TLS_ENTRY; i++) {
210 t->tls[i] = current_thread->tls[i];
211 }
212 t->tls[TLS_ENTRY_ERRNO] = 0; /* clear errno */
213
214 /* set up the initial stack frame */
215 arch_thread_initialize(t);
216
217 /* add it to the global thread list */
218 THREAD_LOCK(state);
219 list_add_head(&thread_list, &t->thread_list_node);
220 THREAD_UNLOCK(state);
221
222 return t;
223 }
224
thread_create(const char * name,thread_start_routine entry,void * arg,int priority,size_t stack_size)225 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size) {
226 return thread_create_etc(NULL, name, entry, arg, priority, NULL, stack_size);
227 }
228
229 /**
230 * @brief Flag a thread as real time
231 *
232 * @param t Thread to flag
233 *
234 * @return NO_ERROR on success
235 */
thread_set_real_time(thread_t * t)236 status_t thread_set_real_time(thread_t *t) {
237 if (!t)
238 return ERR_INVALID_ARGS;
239
240 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
241
242 THREAD_LOCK(state);
243 #if PLATFORM_HAS_DYNAMIC_TIMER
244 if (t == get_current_thread()) {
245 /* if we're currently running, cancel the preemption timer. */
246 timer_cancel(&preempt_timer[arch_curr_cpu_num()]);
247 }
248 #endif
249 t->flags |= THREAD_FLAG_REAL_TIME;
250 THREAD_UNLOCK(state);
251
252 return NO_ERROR;
253 }
254
thread_is_realtime(thread_t * t)255 static bool thread_is_realtime(thread_t *t) {
256 return (t->flags & THREAD_FLAG_REAL_TIME) && t->priority > DEFAULT_PRIORITY;
257 }
258
thread_is_idle(thread_t * t)259 static bool thread_is_idle(thread_t *t) {
260 return !!(t->flags & THREAD_FLAG_IDLE);
261 }
262
thread_is_real_time_or_idle(thread_t * t)263 static bool thread_is_real_time_or_idle(thread_t *t) {
264 return !!(t->flags & (THREAD_FLAG_REAL_TIME | THREAD_FLAG_IDLE));
265 }
266
267 /**
268 * @brief Make a suspended thread executable.
269 *
270 * This function is typically called to start a thread which has just been
271 * created with thread_create()
272 *
273 * @param t Thread to resume
274 *
275 * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
276 */
thread_resume(thread_t * t)277 status_t thread_resume(thread_t *t) {
278 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
279 DEBUG_ASSERT(t->state != THREAD_DEATH);
280
281 bool resched = false;
282 bool ints_disabled = arch_ints_disabled();
283 THREAD_LOCK(state);
284 if (t->state == THREAD_SUSPENDED) {
285 t->state = THREAD_READY;
286 insert_in_run_queue_head(t);
287 if (!ints_disabled) /* HACK, don't resced into bootstrap thread before idle thread is set up */
288 resched = true;
289 }
290
291 wakeup_cpu_for_thread(t);
292
293 THREAD_UNLOCK(state);
294
295 if (resched)
296 thread_yield();
297
298 return NO_ERROR;
299 }
300
thread_detach_and_resume(thread_t * t)301 status_t thread_detach_and_resume(thread_t *t) {
302 status_t err;
303 err = thread_detach(t);
304 if (err < 0)
305 return err;
306 return thread_resume(t);
307 }
308
thread_join(thread_t * t,int * retcode,lk_time_t timeout)309 status_t thread_join(thread_t *t, int *retcode, lk_time_t timeout) {
310 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
311
312 THREAD_LOCK(state);
313
314 if (t->flags & THREAD_FLAG_DETACHED) {
315 /* the thread is detached, go ahead and exit */
316 THREAD_UNLOCK(state);
317 return ERR_THREAD_DETACHED;
318 }
319
320 /* wait for the thread to die */
321 if (t->state != THREAD_DEATH) {
322 status_t err = wait_queue_block(&t->retcode_wait_queue, timeout);
323 if (err < 0) {
324 THREAD_UNLOCK(state);
325 return err;
326 }
327 }
328
329 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
330 DEBUG_ASSERT(t->state == THREAD_DEATH);
331 DEBUG_ASSERT(t->blocking_wait_queue == NULL);
332 DEBUG_ASSERT(!list_in_list(&t->queue_node));
333
334 /* save the return code */
335 if (retcode)
336 *retcode = t->retcode;
337
338 /* remove it from the master thread list */
339 list_delete(&t->thread_list_node);
340
341 /* clear the structure's magic */
342 t->magic = 0;
343
344 THREAD_UNLOCK(state);
345
346 /* free its stack and the thread structure itself */
347 if (t->flags & THREAD_FLAG_FREE_STACK && t->stack)
348 free(t->stack);
349
350 if (t->flags & THREAD_FLAG_FREE_STRUCT)
351 free(t);
352
353 return NO_ERROR;
354 }
355
thread_detach(thread_t * t)356 status_t thread_detach(thread_t *t) {
357 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
358
359 THREAD_LOCK(state);
360
361 /* if another thread is blocked inside thread_join() on this thread,
362 * wake them up with a specific return code */
363 wait_queue_wake_all(&t->retcode_wait_queue, false, ERR_THREAD_DETACHED);
364
365 /* if it's already dead, then just do what join would have and exit */
366 if (t->state == THREAD_DEATH) {
367 t->flags &= ~THREAD_FLAG_DETACHED; /* makes sure thread_join continues */
368 THREAD_UNLOCK(state);
369 return thread_join(t, NULL, 0);
370 } else {
371 t->flags |= THREAD_FLAG_DETACHED;
372 THREAD_UNLOCK(state);
373 return NO_ERROR;
374 }
375 }
376
377 /**
378 * @brief Terminate the current thread
379 *
380 * Current thread exits with the specified return code.
381 *
382 * This function does not return.
383 */
thread_exit(int retcode)384 void thread_exit(int retcode) {
385 thread_t *current_thread = get_current_thread();
386
387 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
388 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
389 DEBUG_ASSERT(!thread_is_idle(current_thread));
390
391 // dprintf("thread_exit: current %p\n", current_thread);
392
393 THREAD_LOCK(state);
394
395 /* enter the dead state */
396 current_thread->state = THREAD_DEATH;
397 current_thread->retcode = retcode;
398
399 /* if we're detached, then do our teardown here */
400 if (current_thread->flags & THREAD_FLAG_DETACHED) {
401 /* remove it from the master thread list */
402 list_delete(¤t_thread->thread_list_node);
403
404 /* clear the structure's magic */
405 current_thread->magic = 0;
406
407 /* free its stack and the thread structure itself */
408 if (current_thread->flags & THREAD_FLAG_FREE_STACK && current_thread->stack) {
409 heap_delayed_free(current_thread->stack);
410
411 /* make sure its not going to get a bounds check performed on the half-freed stack */
412 current_thread->flags &= ~THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK;
413 }
414
415 if (current_thread->flags & THREAD_FLAG_FREE_STRUCT)
416 heap_delayed_free(current_thread);
417 } else {
418 /* signal if anyone is waiting */
419 wait_queue_wake_all(¤t_thread->retcode_wait_queue, false, 0);
420 }
421
422 /* reschedule */
423 thread_resched();
424
425 panic("somehow fell through thread_exit()\n");
426 }
427
idle_thread_routine(void)428 static void idle_thread_routine(void) {
429 for (;;)
430 arch_idle();
431 }
432
get_top_thread(int cpu)433 static thread_t *get_top_thread(int cpu) {
434 thread_t *newthread;
435 uint32_t local_run_queue_bitmap = run_queue_bitmap;
436
437 while (local_run_queue_bitmap) {
438 /* find the first (remaining) queue with a thread in it */
439 uint next_queue = sizeof(run_queue_bitmap) * 8 - 1 - __builtin_clz(local_run_queue_bitmap);
440
441 list_for_every_entry(&run_queue[next_queue], newthread, thread_t, queue_node) {
442 #if WITH_SMP
443 if (newthread->pinned_cpu < 0 || newthread->pinned_cpu == cpu)
444 #endif
445 {
446 list_delete(&newthread->queue_node);
447
448 if (list_is_empty(&run_queue[next_queue]))
449 run_queue_bitmap &= ~(1<<next_queue);
450
451 return newthread;
452 }
453 }
454
455 local_run_queue_bitmap &= ~(1<<next_queue);
456 }
457 /* no threads to run, select the idle thread for this cpu */
458 return idle_thread(cpu);
459 }
460
461 /**
462 * @brief Cause another thread to be executed.
463 *
464 * Internal reschedule routine. The current thread needs to already be in whatever
465 * state and queues it needs to be in. This routine simply picks the next thread and
466 * switches to it.
467 *
468 * This is probably not the function you're looking for. See
469 * thread_yield() instead.
470 */
thread_resched(void)471 void thread_resched(void) {
472 thread_t *oldthread;
473 thread_t *newthread;
474
475 thread_t *current_thread = get_current_thread();
476 uint cpu = arch_curr_cpu_num();
477
478 DEBUG_ASSERT(arch_ints_disabled());
479 DEBUG_ASSERT(spin_lock_held(&thread_lock));
480 DEBUG_ASSERT(current_thread->state != THREAD_RUNNING);
481
482 THREAD_STATS_INC(reschedules);
483
484 newthread = get_top_thread(cpu);
485
486 DEBUG_ASSERT(newthread);
487
488 newthread->state = THREAD_RUNNING;
489
490 oldthread = current_thread;
491
492 if (newthread == oldthread)
493 return;
494
495 /* set up quantum for the new thread if it was consumed */
496 if (newthread->remaining_quantum <= 0) {
497 newthread->remaining_quantum = 5; // XXX make this smarter
498 }
499
500 /* mark the cpu ownership of the threads */
501 thread_set_curr_cpu(oldthread, -1);
502 thread_set_curr_cpu(newthread, cpu);
503
504 #if WITH_SMP
505 if (thread_is_idle(newthread)) {
506 mp_set_cpu_idle(cpu);
507 } else {
508 mp_set_cpu_busy(cpu);
509 }
510
511 if (thread_is_realtime(newthread)) {
512 mp_set_cpu_realtime(cpu);
513 } else {
514 mp_set_cpu_non_realtime(cpu);
515 }
516 #endif
517
518 #if THREAD_STATS
519 THREAD_STATS_INC(context_switches);
520
521 lk_bigtime_t now = current_time_hires();
522 if (thread_is_idle(oldthread)) {
523 thread_stats[cpu].idle_time += now - thread_stats[cpu].last_idle_timestamp;
524 } else {
525 oldthread->stats.total_run_time += now - oldthread->stats.last_run_timestamp;
526 }
527 if (thread_is_idle(newthread)) {
528 thread_stats[cpu].last_idle_timestamp = now;
529 } else {
530 newthread->stats.last_run_timestamp = now;
531 newthread->stats.schedules++;
532 }
533 #endif
534
535 KEVLOG_THREAD_SWITCH(oldthread, newthread);
536
537 #if PLATFORM_HAS_DYNAMIC_TIMER
538 if (thread_is_real_time_or_idle(newthread)) {
539 if (!thread_is_real_time_or_idle(oldthread)) {
540 /* if we're switching from a non real time to a real time, cancel
541 * the preemption timer. */
542 #if DEBUG_THREAD_CONTEXT_SWITCH
543 dprintf(ALWAYS, "arch_context_switch: stop preempt, cpu %d, old %p (%s), new %p (%s)\n",
544 cpu, oldthread, oldthread->name, newthread, newthread->name);
545 #endif
546 timer_cancel(&preempt_timer[cpu]);
547 }
548 } else if (thread_is_real_time_or_idle(oldthread)) {
549 /* if we're switching from a real time (or idle thread) to a regular one,
550 * set up a periodic timer to run our preemption tick. */
551 #if DEBUG_THREAD_CONTEXT_SWITCH
552 dprintf(ALWAYS, "arch_context_switch: start preempt, cpu %d, old %p (%s), new %p (%s)\n",
553 cpu, oldthread, oldthread->name, newthread, newthread->name);
554 #endif
555 timer_set_periodic(&preempt_timer[cpu], 10, thread_timer_tick, NULL);
556 }
557 #endif
558
559 /* set some optional target debug leds */
560 target_set_debug_led(0, !thread_is_idle(newthread));
561
562 /* do the switch */
563 set_current_thread(newthread);
564
565 #if DEBUG_THREAD_CONTEXT_SWITCH
566 dprintf(ALWAYS, "arch_context_switch: cpu %d, old %p (%s, pri %d, flags 0x%x), new %p (%s, pri %d, flags 0x%x)\n",
567 cpu, oldthread, oldthread->name, oldthread->priority,
568 oldthread->flags, newthread, newthread->name,
569 newthread->priority, newthread->flags);
570 #endif
571
572 #if THREAD_STACK_BOUNDS_CHECK
573 /* check that the old thread has not blown its stack just before pushing its context */
574 if (oldthread->flags & THREAD_FLAG_DEBUG_STACK_BOUNDS_CHECK) {
575 STATIC_ASSERT((THREAD_STACK_PADDING_SIZE % sizeof(uint32_t)) == 0);
576 uint32_t *s = (uint32_t *)oldthread->stack;
577 for (size_t i = 0; i < THREAD_STACK_PADDING_SIZE / sizeof(uint32_t); i++) {
578 if (unlikely(s[i] != STACK_DEBUG_WORD)) {
579 /* NOTE: will probably blow the stack harder here, but hopefully enough
580 * state exists to at least get some sort of debugging done.
581 */
582 panic("stack overrun at %p: thread %p (%s), stack %p\n", &s[i],
583 oldthread, oldthread->name, oldthread->stack);
584 }
585 }
586 }
587 #endif
588
589 #ifdef WITH_LIB_UTHREAD
590 uthread_context_switch(oldthread, newthread);
591 #endif
592
593 #if WITH_KERNEL_VM
594 /* see if we need to swap mmu context */
595 if (newthread->aspace != oldthread->aspace) {
596 vmm_context_switch(oldthread->aspace, newthread->aspace);
597 }
598 #endif
599
600 /* do the low level context switch */
601 arch_context_switch(oldthread, newthread);
602 }
603
604 /**
605 * @brief Yield the cpu to another thread
606 *
607 * This function places the current thread at the end of the run queue
608 * and yields the cpu to another waiting thread (if any.)
609 *
610 * This function will return at some later time. Possibly immediately if
611 * no other threads are waiting to execute.
612 */
thread_yield(void)613 void thread_yield(void) {
614 thread_t *current_thread = get_current_thread();
615
616 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
617 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
618
619 THREAD_LOCK(state);
620
621 THREAD_STATS_INC(yields);
622
623 /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
624 current_thread->state = THREAD_READY;
625 current_thread->remaining_quantum = 0;
626 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
627 insert_in_run_queue_tail(current_thread);
628 }
629 thread_resched();
630
631 THREAD_UNLOCK(state);
632 }
633
634 /**
635 * @brief Briefly yield cpu to another thread
636 *
637 * This function is similar to thread_yield(), except that it will
638 * restart more quickly.
639 *
640 * This function places the current thread at the head of the run
641 * queue and then yields the cpu to another thread.
642 *
643 * Exception: If the time slice for this thread has expired, then
644 * the thread goes to the end of the run queue.
645 *
646 * This function will return at some later time. Possibly immediately if
647 * no other threads are waiting to execute.
648 */
thread_preempt(void)649 void thread_preempt(void) {
650 thread_t *current_thread = get_current_thread();
651
652 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
653 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
654
655 #if THREAD_STATS
656 if (!thread_is_idle(current_thread))
657 THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
658 #endif
659
660 KEVLOG_THREAD_PREEMPT(current_thread);
661
662 THREAD_LOCK(state);
663
664 /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
665 current_thread->state = THREAD_READY;
666 if (likely(!thread_is_idle(current_thread))) { /* idle thread doesn't go in the run queue */
667 if (current_thread->remaining_quantum > 0)
668 insert_in_run_queue_head(current_thread);
669 else
670 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
671 }
672 thread_resched();
673
674 THREAD_UNLOCK(state);
675 }
676
677 /**
678 * @brief Suspend thread until woken.
679 *
680 * This function schedules another thread to execute. This function does not
681 * return until the thread is made runable again by some other module.
682 *
683 * You probably don't want to call this function directly; it's meant to be called
684 * from other modules, such as mutex, which will presumably set the thread's
685 * state to blocked and add it to some queue or another.
686 */
thread_block(void)687 void thread_block(void) {
688 __UNUSED thread_t *current_thread = get_current_thread();
689
690 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
691 DEBUG_ASSERT(current_thread->state == THREAD_BLOCKED);
692 DEBUG_ASSERT(spin_lock_held(&thread_lock));
693 DEBUG_ASSERT(!thread_is_idle(current_thread));
694
695 /* we are blocking on something. the blocking code should have already stuck us on a queue */
696 thread_resched();
697 }
698
thread_unblock(thread_t * t,bool resched)699 void thread_unblock(thread_t *t, bool resched) {
700 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
701 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
702 DEBUG_ASSERT(spin_lock_held(&thread_lock));
703 DEBUG_ASSERT(!thread_is_idle(t));
704
705 t->state = THREAD_READY;
706 insert_in_run_queue_head(t);
707 wakeup_cpu_for_thread(t);
708
709 if (resched)
710 thread_resched();
711 }
712
thread_timer_tick(struct timer * t,lk_time_t now,void * arg)713 enum handler_return thread_timer_tick(struct timer *t, lk_time_t now, void *arg) {
714 thread_t *current_thread = get_current_thread();
715
716 if (thread_is_real_time_or_idle(current_thread))
717 return INT_NO_RESCHEDULE;
718
719 current_thread->remaining_quantum--;
720 if (current_thread->remaining_quantum <= 0) {
721 return INT_RESCHEDULE;
722 } else {
723 return INT_NO_RESCHEDULE;
724 }
725 }
726
727 /* timer callback to wake up a sleeping thread */
thread_sleep_handler(timer_t * timer,lk_time_t now,void * arg)728 static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, void *arg) {
729 thread_t *t = (thread_t *)arg;
730
731 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
732 DEBUG_ASSERT(t->state == THREAD_SLEEPING);
733
734 THREAD_LOCK(state);
735
736 t->state = THREAD_READY;
737 insert_in_run_queue_head(t);
738
739 THREAD_UNLOCK(state);
740
741 return INT_RESCHEDULE;
742 }
743
744 /**
745 * @brief Put thread to sleep; delay specified in ms
746 *
747 * This function puts the current thread to sleep until the specified
748 * delay in ms has expired.
749 *
750 * Note that this function could sleep for longer than the specified delay if
751 * other threads are running. When the timer expires, this thread will
752 * be placed at the head of the run queue.
753 */
thread_sleep(lk_time_t delay)754 void thread_sleep(lk_time_t delay) {
755 timer_t timer;
756
757 thread_t *current_thread = get_current_thread();
758
759 DEBUG_ASSERT(current_thread->magic == THREAD_MAGIC);
760 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
761 DEBUG_ASSERT(!thread_is_idle(current_thread));
762
763 timer_initialize(&timer);
764
765 THREAD_LOCK(state);
766 timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
767 current_thread->state = THREAD_SLEEPING;
768 thread_resched();
769 THREAD_UNLOCK(state);
770 }
771
772 /**
773 * @brief Initialize threading system
774 *
775 * This function is called once, from lk_main()
776 */
thread_init_early(void)777 void thread_init_early(void) {
778 int i;
779
780 DEBUG_ASSERT(arch_curr_cpu_num() == 0);
781
782 /* initialize the run queues */
783 for (i=0; i < NUM_PRIORITIES; i++)
784 list_initialize(&run_queue[i]);
785
786 /* initialize the thread list */
787 list_initialize(&thread_list);
788
789 /* create a thread to cover the current running state */
790 thread_t *t = idle_thread(0);
791 init_thread_struct(t, "bootstrap");
792
793 /* half construct this thread, since we're already running */
794 t->priority = HIGHEST_PRIORITY;
795 t->state = THREAD_RUNNING;
796 t->flags = THREAD_FLAG_DETACHED;
797 thread_set_curr_cpu(t, 0);
798 thread_set_pinned_cpu(t, 0);
799 wait_queue_init(&t->retcode_wait_queue);
800 list_add_head(&thread_list, &t->thread_list_node);
801 set_current_thread(t);
802 }
803
804 /**
805 * @brief Complete thread initialization
806 *
807 * This function is called once at boot time
808 */
thread_init(void)809 void thread_init(void) {
810 #if PLATFORM_HAS_DYNAMIC_TIMER
811 for (uint i = 0; i < SMP_MAX_CPUS; i++) {
812 timer_initialize(&preempt_timer[i]);
813 }
814 #endif
815 }
816
817 /**
818 * @brief Change name of current thread
819 */
thread_set_name(const char * name)820 void thread_set_name(const char *name) {
821 thread_t *current_thread = get_current_thread();
822 strlcpy(current_thread->name, name, sizeof(current_thread->name));
823 }
824
825 /**
826 * @brief Change priority of current thread
827 *
828 * See thread_create() for a discussion of priority values.
829 */
thread_set_priority(int priority)830 void thread_set_priority(int priority) {
831 thread_t *current_thread = get_current_thread();
832
833 THREAD_LOCK(state);
834
835 if (priority <= IDLE_PRIORITY)
836 priority = IDLE_PRIORITY + 1;
837 if (priority > HIGHEST_PRIORITY)
838 priority = HIGHEST_PRIORITY;
839 current_thread->priority = priority;
840
841 current_thread->state = THREAD_READY;
842 insert_in_run_queue_head(current_thread);
843 thread_resched();
844
845 THREAD_UNLOCK(state);
846 }
847
848 /**
849 * @brief Become an idle thread
850 *
851 * This function marks the current thread as the idle thread -- the one which
852 * executes when there is nothing else to do. This function does not return.
853 * This function is called once at boot time.
854 */
thread_become_idle(void)855 void thread_become_idle(void) {
856 DEBUG_ASSERT(arch_ints_disabled());
857
858 thread_t *t = get_current_thread();
859
860 #if WITH_SMP
861 char name[16];
862 snprintf(name, sizeof(name), "idle %d", arch_curr_cpu_num());
863 thread_set_name(name);
864 #else
865 thread_set_name("idle");
866 #endif
867
868 /* mark ourself as idle */
869 t->priority = IDLE_PRIORITY;
870 t->flags |= THREAD_FLAG_IDLE;
871 thread_set_pinned_cpu(t, arch_curr_cpu_num());
872
873 mp_set_curr_cpu_active(true);
874 mp_set_cpu_idle(arch_curr_cpu_num());
875
876 /* enable interrupts and start the scheduler */
877 arch_enable_ints();
878 thread_yield();
879
880 idle_thread_routine();
881 }
882
883 /* create an idle thread for the cpu we're on, and start scheduling */
884
thread_secondary_cpu_init_early(void)885 void thread_secondary_cpu_init_early(void) {
886 DEBUG_ASSERT(arch_ints_disabled());
887
888 /* construct an idle thread to cover our cpu */
889 uint cpu = arch_curr_cpu_num();
890 thread_t *t = idle_thread(cpu);
891
892 char name[16];
893 snprintf(name, sizeof(name), "idle %u", cpu);
894 init_thread_struct(t, name);
895 thread_set_pinned_cpu(t, cpu);
896
897 /* half construct this thread, since we're already running */
898 t->priority = HIGHEST_PRIORITY;
899 t->state = THREAD_RUNNING;
900 t->flags = THREAD_FLAG_DETACHED | THREAD_FLAG_IDLE;
901 thread_set_curr_cpu(t, cpu);
902 thread_set_pinned_cpu(t, cpu);
903 wait_queue_init(&t->retcode_wait_queue);
904
905 THREAD_LOCK(state);
906
907 list_add_head(&thread_list, &t->thread_list_node);
908 set_current_thread(t);
909
910 THREAD_UNLOCK(state);
911 }
912
thread_secondary_cpu_entry(void)913 void thread_secondary_cpu_entry(void) {
914 uint cpu = arch_curr_cpu_num();
915 thread_t *t = get_current_thread();
916 t->priority = IDLE_PRIORITY;
917
918 mp_set_curr_cpu_active(true);
919 mp_set_cpu_idle(cpu);
920
921 /* enable interrupts and start the scheduler on this cpu */
922 arch_enable_ints();
923 thread_yield();
924
925 idle_thread_routine();
926 }
927
thread_state_to_str(enum thread_state state)928 static const char *thread_state_to_str(enum thread_state state) {
929 switch (state) {
930 case THREAD_SUSPENDED:
931 return "susp";
932 case THREAD_READY:
933 return "rdy";
934 case THREAD_RUNNING:
935 return "run";
936 case THREAD_BLOCKED:
937 return "blok";
938 case THREAD_SLEEPING:
939 return "slep";
940 case THREAD_DEATH:
941 return "deth";
942 default:
943 return "unkn";
944 }
945 }
946
thread_stack_used(thread_t * t)947 static size_t thread_stack_used(thread_t *t) {
948 #ifdef THREAD_STACK_HIGHWATER
949 uint8_t *stack_base;
950 size_t stack_size;
951 size_t i;
952
953 stack_base = t->stack;
954 stack_size = t->stack_size;
955
956 for (i = 0; i < stack_size; i++) {
957 if (stack_base[i] != STACK_DEBUG_BYTE)
958 break;
959 }
960 return stack_size - i;
961 #else
962 return 0;
963 #endif
964 }
965 /**
966 * @brief Dump debugging info about the specified thread.
967 */
dump_thread(thread_t * t)968 void dump_thread(thread_t *t) {
969 dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
970 #if WITH_SMP
971 dprintf(INFO, "\tstate %s, curr_cpu %d, pinned_cpu %d, priority %d, remaining quantum %d\n",
972 thread_state_to_str(t->state), t->curr_cpu, t->pinned_cpu, t->priority, t->remaining_quantum);
973 #else
974 dprintf(INFO, "\tstate %s, priority %d, remaining quantum %d\n",
975 thread_state_to_str(t->state), t->priority, t->remaining_quantum);
976 #endif
977 #ifdef THREAD_STACK_HIGHWATER
978 dprintf(INFO, "\tstack %p, stack_size %zd, stack_used %zd\n",
979 t->stack, t->stack_size, thread_stack_used(t));
980 #else
981 dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
982 #endif
983 dprintf(INFO, "\tentry %p, arg %p, flags 0x%x\n", t->entry, t->arg, t->flags);
984 dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
985 #if WITH_KERNEL_VM
986 dprintf(INFO, "\taspace %p\n", t->aspace);
987 #endif
988 #if (MAX_TLS_ENTRY > 0)
989 dprintf(INFO, "\ttls:");
990 int i;
991 for (i=0; i < MAX_TLS_ENTRY; i++) {
992 dprintf(INFO, " 0x%lx", t->tls[i]);
993 }
994 dprintf(INFO, "\n");
995 #endif
996 arch_dump_thread(t);
997 }
998
dump_all_threads_unlocked(void)999 void dump_all_threads_unlocked(void) {
1000 thread_t *t;
1001 list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
1002 if (t->magic != THREAD_MAGIC) {
1003 dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
1004 hexdump(t, sizeof(thread_t));
1005 break;
1006 }
1007 dump_thread(t);
1008 }
1009 }
1010
1011 /**
1012 * @brief Dump debugging info about all threads
1013 */
dump_all_threads(void)1014 void dump_all_threads(void) {
1015 THREAD_LOCK(state);
1016 dump_all_threads_unlocked();
1017 THREAD_UNLOCK(state);
1018 }
1019
1020 #if THREAD_STATS
dump_threads_stats(void)1021 void dump_threads_stats(void) {
1022 thread_t *t;
1023
1024 THREAD_LOCK(state);
1025 list_for_every_entry (&thread_list, t, thread_t, thread_list_node) {
1026 if (t->magic != THREAD_MAGIC) {
1027 dprintf(INFO, "bad magic on thread struct %p, aborting.\n", t);
1028 hexdump(t, sizeof(thread_t));
1029 break;
1030 }
1031 if (thread_is_idle(t)) {
1032 continue;
1033 }
1034 // thread specific stats
1035 dprintf(INFO, "\t(%s):\n", t->name);
1036 dprintf(INFO, "\t\tScheduled: %ld\n", t->stats.schedules);
1037 uint percent = (t->stats.total_run_time * 10000) / current_time_hires();
1038 dprintf(INFO, "\t\tTotal run time: %lld, %u.%02u%%\n", t->stats.total_run_time,
1039 percent / 100, percent % 100);
1040 dprintf(INFO, "\t\tLast time run: %lld\n", t->stats.last_run_timestamp);
1041 }
1042 THREAD_UNLOCK(state);
1043 }
1044 #endif
1045
1046 /** @} */
1047
1048
1049 /**
1050 * @defgroup wait Wait Queue
1051 * @{
1052 */
wait_queue_init(wait_queue_t * wait)1053 void wait_queue_init(wait_queue_t *wait) {
1054 *wait = (wait_queue_t)WAIT_QUEUE_INITIAL_VALUE(*wait);
1055 }
1056
wait_queue_timeout_handler(timer_t * timer,lk_time_t now,void * arg)1057 static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t now, void *arg) {
1058 thread_t *thread = (thread_t *)arg;
1059
1060 DEBUG_ASSERT(thread->magic == THREAD_MAGIC);
1061
1062 spin_lock(&thread_lock);
1063
1064 enum handler_return ret = INT_NO_RESCHEDULE;
1065 if (thread_unblock_from_wait_queue(thread, ERR_TIMED_OUT) >= NO_ERROR) {
1066 ret = INT_RESCHEDULE;
1067 }
1068
1069 spin_unlock(&thread_lock);
1070
1071 return ret;
1072 }
1073
1074 /**
1075 * @brief Block until a wait queue is notified.
1076 *
1077 * This function puts the current thread at the end of a wait
1078 * queue and then blocks until some other thread wakes the queue
1079 * up again.
1080 *
1081 * @param wait The wait queue to enter
1082 * @param timeout The maximum time, in ms, to wait
1083 *
1084 * If the timeout is zero, this function returns immediately with
1085 * ERR_TIMED_OUT. If the timeout is INFINITE_TIME, this function
1086 * waits indefinitely. Otherwise, this function returns with
1087 * ERR_TIMED_OUT at the end of the timeout period.
1088 *
1089 * @return ERR_TIMED_OUT on timeout, else returns the return
1090 * value specified when the queue was woken by wait_queue_wake_one().
1091 */
wait_queue_block(wait_queue_t * wait,lk_time_t timeout)1092 status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout) {
1093 timer_t timer;
1094
1095 thread_t *current_thread = get_current_thread();
1096
1097 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1098 DEBUG_ASSERT(current_thread->state == THREAD_RUNNING);
1099 DEBUG_ASSERT(arch_ints_disabled());
1100 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1101
1102 if (timeout == 0)
1103 return ERR_TIMED_OUT;
1104
1105 list_add_tail(&wait->list, ¤t_thread->queue_node);
1106 wait->count++;
1107 current_thread->state = THREAD_BLOCKED;
1108 current_thread->blocking_wait_queue = wait;
1109 current_thread->wait_queue_block_ret = NO_ERROR;
1110
1111 /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
1112 if (timeout != INFINITE_TIME) {
1113 timer_initialize(&timer);
1114 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
1115 }
1116
1117 thread_resched();
1118
1119 /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
1120 if (timeout != INFINITE_TIME) {
1121 timer_cancel(&timer);
1122 }
1123
1124 return current_thread->wait_queue_block_ret;
1125 }
1126
1127 /**
1128 * @brief Wake up one thread sleeping on a wait queue
1129 *
1130 * This function removes one thread (if any) from the head of the wait queue and
1131 * makes it executable. The new thread will be placed at the head of the
1132 * run queue.
1133 *
1134 * @param wait The wait queue to wake
1135 * @param reschedule If true, the newly-woken thread will run immediately.
1136 * @param wait_queue_error The return value which the new thread will receive
1137 * from wait_queue_block().
1138 *
1139 * @return The number of threads woken (zero or one)
1140 */
wait_queue_wake_one(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1141 int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) {
1142 thread_t *t;
1143 int ret = 0;
1144
1145 thread_t *current_thread = get_current_thread();
1146
1147 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1148 DEBUG_ASSERT(arch_ints_disabled());
1149 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1150
1151 t = list_remove_head_type(&wait->list, thread_t, queue_node);
1152 if (t) {
1153 wait->count--;
1154 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1155 t->state = THREAD_READY;
1156 t->wait_queue_block_ret = wait_queue_error;
1157 t->blocking_wait_queue = NULL;
1158
1159 /* if we're instructed to reschedule, stick the current thread on the head
1160 * of the run queue first, so that the newly awakened thread gets a chance to run
1161 * before the current one, but the current one doesn't get unnecessarilly punished.
1162 */
1163 if (reschedule) {
1164 current_thread->state = THREAD_READY;
1165 insert_in_run_queue_head(current_thread);
1166 }
1167 insert_in_run_queue_head(t);
1168 wakeup_cpu_for_thread(t);
1169 if (reschedule) {
1170 thread_resched();
1171 }
1172 ret = 1;
1173
1174 }
1175
1176 return ret;
1177 }
1178
1179
1180 /**
1181 * @brief Wake all threads sleeping on a wait queue
1182 *
1183 * This function removes all threads (if any) from the wait queue and
1184 * makes them executable. The new threads will be placed at the head of the
1185 * run queue.
1186 *
1187 * @param wait The wait queue to wake
1188 * @param reschedule If true, the newly-woken threads will run immediately.
1189 * @param wait_queue_error The return value which the new thread will receive
1190 * from wait_queue_block().
1191 *
1192 * @return The number of threads woken (zero or one)
1193 */
wait_queue_wake_all(wait_queue_t * wait,bool reschedule,status_t wait_queue_error)1194 int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error) {
1195 thread_t *t;
1196 int ret = 0;
1197 uint32_t cpu_mask = 0;
1198
1199 thread_t *current_thread = get_current_thread();
1200
1201 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1202 DEBUG_ASSERT(arch_ints_disabled());
1203 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1204
1205 if (reschedule && wait->count > 0) {
1206 /* if we're instructed to reschedule, stick the current thread on the head
1207 * of the run queue first, so that the newly awakened threads get a chance to run
1208 * before the current one, but the current one doesn't get unnecessarilly punished.
1209 */
1210 current_thread->state = THREAD_READY;
1211 insert_in_run_queue_head(current_thread);
1212 }
1213
1214 /* pop all the threads off the wait queue into the run queue */
1215 while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
1216 wait->count--;
1217 DEBUG_ASSERT(t->state == THREAD_BLOCKED);
1218 t->state = THREAD_READY;
1219 t->wait_queue_block_ret = wait_queue_error;
1220 t->blocking_wait_queue = NULL;
1221 int pinned_cpu = thread_pinned_cpu(t);
1222 if (pinned_cpu < 0) {
1223 /* assumes MP_CPU_ALL_BUT_LOCAL is defined as all bits on */
1224 cpu_mask = MP_CPU_ALL_BUT_LOCAL;
1225 } else {
1226 cpu_mask |= (1U << pinned_cpu);
1227 }
1228 insert_in_run_queue_head(t);
1229 ret++;
1230 }
1231
1232 DEBUG_ASSERT(wait->count == 0);
1233
1234 if (ret > 0) {
1235 mp_reschedule(cpu_mask, 0);
1236 if (reschedule) {
1237 thread_resched();
1238 }
1239 }
1240
1241 return ret;
1242 }
1243
1244 /**
1245 * @brief Free all resources allocated in wait_queue_init()
1246 *
1247 * If any threads were waiting on this queue, they are all woken.
1248 */
wait_queue_destroy(wait_queue_t * wait,bool reschedule)1249 void wait_queue_destroy(wait_queue_t *wait, bool reschedule) {
1250 DEBUG_ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
1251 DEBUG_ASSERT(arch_ints_disabled());
1252 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1253
1254 wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
1255 wait->magic = 0;
1256 }
1257
1258 /**
1259 * @brief Wake a specific thread in a wait queue
1260 *
1261 * This function extracts a specific thread from a wait queue, wakes it, and
1262 * puts it at the head of the run queue.
1263 *
1264 * @param t The thread to wake
1265 * @param wait_queue_error The return value which the new thread will receive
1266 * from wait_queue_block().
1267 *
1268 * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
1269 */
thread_unblock_from_wait_queue(thread_t * t,status_t wait_queue_error)1270 status_t thread_unblock_from_wait_queue(thread_t *t, status_t wait_queue_error) {
1271 DEBUG_ASSERT(t->magic == THREAD_MAGIC);
1272 DEBUG_ASSERT(arch_ints_disabled());
1273 DEBUG_ASSERT(spin_lock_held(&thread_lock));
1274
1275 if (t->state != THREAD_BLOCKED)
1276 return ERR_NOT_BLOCKED;
1277
1278 DEBUG_ASSERT(t->blocking_wait_queue != NULL);
1279 DEBUG_ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
1280 DEBUG_ASSERT(list_in_list(&t->queue_node));
1281
1282 list_delete(&t->queue_node);
1283 t->blocking_wait_queue->count--;
1284 t->blocking_wait_queue = NULL;
1285 t->state = THREAD_READY;
1286 t->wait_queue_block_ret = wait_queue_error;
1287 insert_in_run_queue_head(t);
1288 wakeup_cpu_for_thread(t);
1289
1290 return NO_ERROR;
1291 }
1292