1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/kernel.h>
7 #include <ksched.h>
8 #include <zephyr/spinlock.h>
9 #include <wait_q.h>
10 #include <kthread.h>
11 #include <priority_q.h>
12 #include <kswap.h>
13 #include <ipi.h>
14 #include <kernel_arch_func.h>
15 #include <zephyr/internal/syscall_handler.h>
16 #include <zephyr/drivers/timer/system_timer.h>
17 #include <stdbool.h>
18 #include <kernel_internal.h>
19 #include <zephyr/logging/log.h>
20 #include <zephyr/sys/atomic.h>
21 #include <zephyr/sys/math_extras.h>
22 #include <zephyr/timing/timing.h>
23 #include <zephyr/sys/util.h>
24 
25 LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);
26 
27 #if defined(CONFIG_SWAP_NONATOMIC) && defined(CONFIG_TIMESLICING)
28 extern struct k_thread *pending_current;
29 #endif
30 
31 struct k_spinlock _sched_spinlock;
32 
33 /* Storage to "complete" the context switch from an invalid/incomplete thread
34  * context (ex: exiting an ISR that aborted _current)
35  */
36 __incoherent struct k_thread _thread_dummy;
37 
38 static ALWAYS_INLINE void update_cache(int preempt_ok);
39 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state);
40 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q);
41 
42 
43 BUILD_ASSERT(CONFIG_NUM_COOP_PRIORITIES >= CONFIG_NUM_METAIRQ_PRIORITIES,
44 	     "You need to provide at least as many CONFIG_NUM_COOP_PRIORITIES as "
45 	     "CONFIG_NUM_METAIRQ_PRIORITIES as Meta IRQs are just a special class of cooperative "
46 	     "threads.");
47 
thread_runq(struct k_thread * thread)48 static ALWAYS_INLINE void *thread_runq(struct k_thread *thread)
49 {
50 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
51 	int cpu, m = thread->base.cpu_mask;
52 
53 	/* Edge case: it's legal per the API to "make runnable" a
54 	 * thread with all CPUs masked off (i.e. one that isn't
55 	 * actually runnable!).  Sort of a wart in the API and maybe
56 	 * we should address this in docs/assertions instead to avoid
57 	 * the extra test.
58 	 */
59 	cpu = m == 0 ? 0 : u32_count_trailing_zeros(m);
60 
61 	return &_kernel.cpus[cpu].ready_q.runq;
62 #else
63 	ARG_UNUSED(thread);
64 	return &_kernel.ready_q.runq;
65 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
66 }
67 
curr_cpu_runq(void)68 static ALWAYS_INLINE void *curr_cpu_runq(void)
69 {
70 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
71 	return &arch_curr_cpu()->ready_q.runq;
72 #else
73 	return &_kernel.ready_q.runq;
74 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
75 }
76 
runq_add(struct k_thread * thread)77 static ALWAYS_INLINE void runq_add(struct k_thread *thread)
78 {
79 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
80 
81 	_priq_run_add(thread_runq(thread), thread);
82 }
83 
runq_remove(struct k_thread * thread)84 static ALWAYS_INLINE void runq_remove(struct k_thread *thread)
85 {
86 	__ASSERT_NO_MSG(!z_is_idle_thread_object(thread));
87 
88 	_priq_run_remove(thread_runq(thread), thread);
89 }
90 
runq_yield(void)91 static ALWAYS_INLINE void runq_yield(void)
92 {
93 	_priq_run_yield(curr_cpu_runq());
94 }
95 
runq_best(void)96 static ALWAYS_INLINE struct k_thread *runq_best(void)
97 {
98 	return _priq_run_best(curr_cpu_runq());
99 }
100 
101 /* _current is never in the run queue until context switch on
102  * SMP configurations, see z_requeue_current()
103  */
should_queue_thread(struct k_thread * thread)104 static inline bool should_queue_thread(struct k_thread *thread)
105 {
106 	return !IS_ENABLED(CONFIG_SMP) || (thread != _current);
107 }
108 
queue_thread(struct k_thread * thread)109 static ALWAYS_INLINE void queue_thread(struct k_thread *thread)
110 {
111 	z_mark_thread_as_queued(thread);
112 	if (should_queue_thread(thread)) {
113 		runq_add(thread);
114 	}
115 #ifdef CONFIG_SMP
116 	if (thread == _current) {
117 		/* add current to end of queue means "yield" */
118 		_current_cpu->swap_ok = true;
119 	}
120 #endif /* CONFIG_SMP */
121 }
122 
dequeue_thread(struct k_thread * thread)123 static ALWAYS_INLINE void dequeue_thread(struct k_thread *thread)
124 {
125 	z_mark_thread_as_not_queued(thread);
126 	if (should_queue_thread(thread)) {
127 		runq_remove(thread);
128 	}
129 }
130 
131 /* Called out of z_swap() when CONFIG_SMP.  The current thread can
132  * never live in the run queue until we are inexorably on the context
133  * switch path on SMP, otherwise there is a deadlock condition where a
134  * set of CPUs pick a cycle of threads to run and wait for them all to
135  * context switch forever.
136  */
z_requeue_current(struct k_thread * thread)137 void z_requeue_current(struct k_thread *thread)
138 {
139 	if (z_is_thread_queued(thread)) {
140 		runq_add(thread);
141 	}
142 	signal_pending_ipi();
143 }
144 
145 /* Return true if the thread is aborting, else false */
is_aborting(struct k_thread * thread)146 static inline bool is_aborting(struct k_thread *thread)
147 {
148 	return (thread->base.thread_state & _THREAD_ABORTING) != 0U;
149 }
150 
151 /* Return true if the thread is aborting or suspending, else false */
is_halting(struct k_thread * thread)152 static inline bool is_halting(struct k_thread *thread)
153 {
154 	return (thread->base.thread_state &
155 		(_THREAD_ABORTING | _THREAD_SUSPENDING)) != 0U;
156 }
157 
158 /* Clear the halting bits (_THREAD_ABORTING and _THREAD_SUSPENDING) */
clear_halting(struct k_thread * thread)159 static inline void clear_halting(struct k_thread *thread)
160 {
161 	if (IS_ENABLED(CONFIG_SMP) && (CONFIG_MP_MAX_NUM_CPUS > 1)) {
162 		barrier_dmem_fence_full(); /* Other cpus spin on this locklessly! */
163 		thread->base.thread_state &= ~(_THREAD_ABORTING | _THREAD_SUSPENDING);
164 	}
165 }
166 
next_up(void)167 static ALWAYS_INLINE struct k_thread *next_up(void)
168 {
169 #ifdef CONFIG_SMP
170 	if (is_halting(_current)) {
171 		halt_thread(_current, is_aborting(_current) ?
172 				      _THREAD_DEAD : _THREAD_SUSPENDED);
173 	}
174 #endif /* CONFIG_SMP */
175 
176 	struct k_thread *thread = runq_best();
177 
178 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
179 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
180 	/* MetaIRQs must always attempt to return back to a
181 	 * cooperative thread they preempted and not whatever happens
182 	 * to be highest priority now. The cooperative thread was
183 	 * promised it wouldn't be preempted (by non-metairq threads)!
184 	 */
185 	struct k_thread *mirqp = _current_cpu->metairq_preempted;
186 
187 	if (mirqp != NULL && (thread == NULL || !thread_is_metairq(thread))) {
188 		if (!z_is_thread_prevented_from_running(mirqp)) {
189 			thread = mirqp;
190 		} else {
191 			_current_cpu->metairq_preempted = NULL;
192 		}
193 	}
194 #endif
195 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
196  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
197  */
198 
199 #ifndef CONFIG_SMP
200 	/* In uniprocessor mode, we can leave the current thread in
201 	 * the queue (actually we have to, otherwise the assembly
202 	 * context switch code for all architectures would be
203 	 * responsible for putting it back in z_swap and ISR return!),
204 	 * which makes this choice simple.
205 	 */
206 	return (thread != NULL) ? thread : _current_cpu->idle_thread;
207 #else
208 	/* Under SMP, the "cache" mechanism for selecting the next
209 	 * thread doesn't work, so we have more work to do to test
210 	 * _current against the best choice from the queue.  Here, the
211 	 * thread selected above represents "the best thread that is
212 	 * not current".
213 	 *
214 	 * Subtle note on "queued": in SMP mode, _current does not
215 	 * live in the queue, so this isn't exactly the same thing as
216 	 * "ready", it means "is _current already added back to the
217 	 * queue such that we don't want to re-add it".
218 	 */
219 	bool queued = z_is_thread_queued(_current);
220 	bool active = !z_is_thread_prevented_from_running(_current);
221 
222 	if (thread == NULL) {
223 		thread = _current_cpu->idle_thread;
224 	}
225 
226 	if (active) {
227 		int32_t cmp = z_sched_prio_cmp(_current, thread);
228 
229 		/* Ties only switch if state says we yielded */
230 		if ((cmp > 0) || ((cmp == 0) && !_current_cpu->swap_ok)) {
231 			thread = _current;
232 		}
233 
234 		if (!should_preempt(thread, _current_cpu->swap_ok)) {
235 			thread = _current;
236 		}
237 	}
238 
239 	/* Put _current back into the queue */
240 	if ((thread != _current) && active &&
241 		!z_is_idle_thread_object(_current) && !queued) {
242 		queue_thread(_current);
243 	}
244 
245 	/* Take the new _current out of the queue */
246 	if (z_is_thread_queued(thread)) {
247 		dequeue_thread(thread);
248 	}
249 
250 	_current_cpu->swap_ok = false;
251 	return thread;
252 #endif /* CONFIG_SMP */
253 }
254 
move_thread_to_end_of_prio_q(struct k_thread * thread)255 void move_thread_to_end_of_prio_q(struct k_thread *thread)
256 {
257 	if (z_is_thread_queued(thread)) {
258 		dequeue_thread(thread);
259 	}
260 	queue_thread(thread);
261 	update_cache(thread == _current);
262 }
263 
264 /* Track cooperative threads preempted by metairqs so we can return to
265  * them specifically.  Called at the moment a new thread has been
266  * selected to run.
267  */
update_metairq_preempt(struct k_thread * thread)268 static void update_metairq_preempt(struct k_thread *thread)
269 {
270 #if (CONFIG_NUM_METAIRQ_PRIORITIES > 0) &&                                                         \
271 	(CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES)
272 	if (thread_is_metairq(thread) && !thread_is_metairq(_current) &&
273 	    !thread_is_preemptible(_current)) {
274 		/* Record new preemption */
275 		_current_cpu->metairq_preempted = _current;
276 	} else if (!thread_is_metairq(thread)) {
277 		/* Returning from existing preemption */
278 		_current_cpu->metairq_preempted = NULL;
279 	}
280 #else
281 	ARG_UNUSED(thread);
282 #endif
283 /* CONFIG_NUM_METAIRQ_PRIORITIES > 0 &&
284  * CONFIG_NUM_COOP_PRIORITIES > CONFIG_NUM_METAIRQ_PRIORITIES
285  */
286 }
287 
update_cache(int preempt_ok)288 static ALWAYS_INLINE void update_cache(int preempt_ok)
289 {
290 #ifndef CONFIG_SMP
291 	struct k_thread *thread = next_up();
292 
293 	if (should_preempt(thread, preempt_ok)) {
294 #ifdef CONFIG_TIMESLICING
295 		if (thread != _current) {
296 			z_reset_time_slice(thread);
297 		}
298 #endif /* CONFIG_TIMESLICING */
299 		update_metairq_preempt(thread);
300 		_kernel.ready_q.cache = thread;
301 	} else {
302 		_kernel.ready_q.cache = _current;
303 	}
304 
305 #else
306 	/* The way this works is that the CPU record keeps its
307 	 * "cooperative swapping is OK" flag until the next reschedule
308 	 * call or context switch.  It doesn't need to be tracked per
309 	 * thread because if the thread gets preempted for whatever
310 	 * reason the scheduler will make the same decision anyway.
311 	 */
312 	_current_cpu->swap_ok = preempt_ok;
313 #endif /* CONFIG_SMP */
314 }
315 
thread_active_elsewhere(struct k_thread * thread)316 static struct _cpu *thread_active_elsewhere(struct k_thread *thread)
317 {
318 	/* Returns pointer to _cpu if the thread is currently running on
319 	 * another CPU. There are more scalable designs to answer this
320 	 * question in constant time, but this is fine for now.
321 	 */
322 #ifdef CONFIG_SMP
323 	int currcpu = _current_cpu->id;
324 
325 	unsigned int num_cpus = arch_num_cpus();
326 
327 	for (int i = 0; i < num_cpus; i++) {
328 		if ((i != currcpu) &&
329 		    (_kernel.cpus[i].current == thread)) {
330 			return &_kernel.cpus[i];
331 		}
332 	}
333 #endif /* CONFIG_SMP */
334 	ARG_UNUSED(thread);
335 	return NULL;
336 }
337 
ready_thread(struct k_thread * thread)338 static void ready_thread(struct k_thread *thread)
339 {
340 #ifdef CONFIG_KERNEL_COHERENCE
341 	__ASSERT_NO_MSG(arch_mem_coherent(thread));
342 #endif /* CONFIG_KERNEL_COHERENCE */
343 
344 	/* If thread is queued already, do not try and added it to the
345 	 * run queue again
346 	 */
347 	if (!z_is_thread_queued(thread) && z_is_thread_ready(thread)) {
348 		SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_ready, thread);
349 
350 		queue_thread(thread);
351 		update_cache(0);
352 
353 		flag_ipi(ipi_mask_create(thread));
354 	}
355 }
356 
z_ready_thread(struct k_thread * thread)357 void z_ready_thread(struct k_thread *thread)
358 {
359 	K_SPINLOCK(&_sched_spinlock) {
360 		if (thread_active_elsewhere(thread) == NULL) {
361 			ready_thread(thread);
362 		}
363 	}
364 }
365 
z_move_thread_to_end_of_prio_q(struct k_thread * thread)366 void z_move_thread_to_end_of_prio_q(struct k_thread *thread)
367 {
368 	K_SPINLOCK(&_sched_spinlock) {
369 		move_thread_to_end_of_prio_q(thread);
370 	}
371 }
372 
373 /* Spins in ISR context, waiting for a thread known to be running on
374  * another CPU to catch the IPI we sent and halt.  Note that we check
375  * for ourselves being asynchronously halted first to prevent simple
376  * deadlocks (but not complex ones involving cycles of 3+ threads!).
377  * Acts to release the provided lock before returning.
378  */
thread_halt_spin(struct k_thread * thread,k_spinlock_key_t key)379 static void thread_halt_spin(struct k_thread *thread, k_spinlock_key_t key)
380 {
381 	if (is_halting(_current)) {
382 		halt_thread(_current,
383 			    is_aborting(_current) ? _THREAD_DEAD : _THREAD_SUSPENDED);
384 	}
385 	k_spin_unlock(&_sched_spinlock, key);
386 	while (is_halting(thread)) {
387 		unsigned int k = arch_irq_lock();
388 
389 		arch_spin_relax(); /* Requires interrupts be masked */
390 		arch_irq_unlock(k);
391 	}
392 }
393 
394 /* Shared handler for k_thread_{suspend,abort}().  Called with the
395  * scheduler lock held and the key passed (which it may
396  * release/reacquire!) which will be released before a possible return
397  * (aborting _current will not return, obviously), which may be after
398  * a context switch.
399  */
z_thread_halt(struct k_thread * thread,k_spinlock_key_t key,bool terminate)400 static ALWAYS_INLINE void z_thread_halt(struct k_thread *thread, k_spinlock_key_t key,
401 					bool terminate)
402 {
403 	_wait_q_t *wq = &thread->join_queue;
404 #ifdef CONFIG_SMP
405 	wq = terminate ? wq : &thread->halt_queue;
406 #endif
407 
408 	/* If the target is a thread running on another CPU, flag and
409 	 * poke (note that we might spin to wait, so a true
410 	 * synchronous IPI is needed here, not deferred!), it will
411 	 * halt itself in the IPI.  Otherwise it's unscheduled, so we
412 	 * can clean it up directly.
413 	 */
414 
415 	struct _cpu *cpu = thread_active_elsewhere(thread);
416 
417 	if (cpu != NULL) {
418 		thread->base.thread_state |= (terminate ? _THREAD_ABORTING
419 					      : _THREAD_SUSPENDING);
420 #if defined(CONFIG_SMP) && defined(CONFIG_SCHED_IPI_SUPPORTED)
421 #ifdef CONFIG_ARCH_HAS_DIRECTED_IPIS
422 		arch_sched_directed_ipi(IPI_CPU_MASK(cpu->id));
423 #else
424 		arch_sched_broadcast_ipi();
425 #endif
426 #endif
427 		if (arch_is_in_isr()) {
428 			thread_halt_spin(thread, key);
429 		} else  {
430 			add_to_waitq_locked(_current, wq);
431 			z_swap(&_sched_spinlock, key);
432 		}
433 	} else {
434 		halt_thread(thread, terminate ? _THREAD_DEAD : _THREAD_SUSPENDED);
435 		if ((thread == _current) && !arch_is_in_isr()) {
436 			if (z_is_thread_essential(thread)) {
437 				k_spin_unlock(&_sched_spinlock, key);
438 				k_panic();
439 				key = k_spin_lock(&_sched_spinlock);
440 			}
441 			z_swap(&_sched_spinlock, key);
442 			__ASSERT(!terminate, "aborted _current back from dead");
443 		} else {
444 			k_spin_unlock(&_sched_spinlock, key);
445 		}
446 	}
447 	/* NOTE: the scheduler lock has been released.  Don't put
448 	 * logic here, it's likely to be racy/deadlocky even if you
449 	 * re-take the lock!
450 	 */
451 }
452 
453 
z_impl_k_thread_suspend(k_tid_t thread)454 void z_impl_k_thread_suspend(k_tid_t thread)
455 {
456 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, suspend, thread);
457 
458 	/* Special case "suspend the current thread" as it doesn't
459 	 * need the async complexity below.
460 	 */
461 	if (!IS_ENABLED(CONFIG_SMP) && (thread == _current) && !arch_is_in_isr()) {
462 		k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
463 
464 		z_mark_thread_as_suspended(thread);
465 		dequeue_thread(thread);
466 		update_cache(1);
467 		z_swap(&_sched_spinlock, key);
468 		return;
469 	}
470 
471 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
472 
473 	if (unlikely(z_is_thread_suspended(thread))) {
474 
475 		/* The target thread is already suspended. Nothing to do. */
476 
477 		k_spin_unlock(&_sched_spinlock, key);
478 		return;
479 	}
480 
481 	z_thread_halt(thread, key, false);
482 
483 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, suspend, thread);
484 }
485 
486 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_suspend(k_tid_t thread)487 static inline void z_vrfy_k_thread_suspend(k_tid_t thread)
488 {
489 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
490 	z_impl_k_thread_suspend(thread);
491 }
492 #include <zephyr/syscalls/k_thread_suspend_mrsh.c>
493 #endif /* CONFIG_USERSPACE */
494 
z_impl_k_thread_resume(k_tid_t thread)495 void z_impl_k_thread_resume(k_tid_t thread)
496 {
497 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, resume, thread);
498 
499 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
500 
501 	/* Do not try to resume a thread that was not suspended */
502 	if (unlikely(!z_is_thread_suspended(thread))) {
503 		k_spin_unlock(&_sched_spinlock, key);
504 		return;
505 	}
506 
507 	z_mark_thread_as_not_suspended(thread);
508 	ready_thread(thread);
509 
510 	z_reschedule(&_sched_spinlock, key);
511 
512 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, resume, thread);
513 }
514 
515 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_resume(k_tid_t thread)516 static inline void z_vrfy_k_thread_resume(k_tid_t thread)
517 {
518 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
519 	z_impl_k_thread_resume(thread);
520 }
521 #include <zephyr/syscalls/k_thread_resume_mrsh.c>
522 #endif /* CONFIG_USERSPACE */
523 
unready_thread(struct k_thread * thread)524 static void unready_thread(struct k_thread *thread)
525 {
526 	if (z_is_thread_queued(thread)) {
527 		dequeue_thread(thread);
528 	}
529 	update_cache(thread == _current);
530 }
531 
532 /* _sched_spinlock must be held */
add_to_waitq_locked(struct k_thread * thread,_wait_q_t * wait_q)533 static void add_to_waitq_locked(struct k_thread *thread, _wait_q_t *wait_q)
534 {
535 	unready_thread(thread);
536 	z_mark_thread_as_pending(thread);
537 
538 	SYS_PORT_TRACING_FUNC(k_thread, sched_pend, thread);
539 
540 	if (wait_q != NULL) {
541 		thread->base.pended_on = wait_q;
542 		_priq_wait_add(&wait_q->waitq, thread);
543 	}
544 }
545 
add_thread_timeout(struct k_thread * thread,k_timeout_t timeout)546 static void add_thread_timeout(struct k_thread *thread, k_timeout_t timeout)
547 {
548 	if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
549 		z_add_thread_timeout(thread, timeout);
550 	}
551 }
552 
pend_locked(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)553 static void pend_locked(struct k_thread *thread, _wait_q_t *wait_q,
554 			k_timeout_t timeout)
555 {
556 #ifdef CONFIG_KERNEL_COHERENCE
557 	__ASSERT_NO_MSG(wait_q == NULL || arch_mem_coherent(wait_q));
558 #endif /* CONFIG_KERNEL_COHERENCE */
559 	add_to_waitq_locked(thread, wait_q);
560 	add_thread_timeout(thread, timeout);
561 }
562 
z_pend_thread(struct k_thread * thread,_wait_q_t * wait_q,k_timeout_t timeout)563 void z_pend_thread(struct k_thread *thread, _wait_q_t *wait_q,
564 		   k_timeout_t timeout)
565 {
566 	__ASSERT_NO_MSG(thread == _current || is_thread_dummy(thread));
567 	K_SPINLOCK(&_sched_spinlock) {
568 		pend_locked(thread, wait_q, timeout);
569 	}
570 }
571 
z_unpend_thread_no_timeout(struct k_thread * thread)572 void z_unpend_thread_no_timeout(struct k_thread *thread)
573 {
574 	K_SPINLOCK(&_sched_spinlock) {
575 		if (thread->base.pended_on != NULL) {
576 			unpend_thread_no_timeout(thread);
577 		}
578 	}
579 }
580 
z_sched_wake_thread(struct k_thread * thread,bool is_timeout)581 void z_sched_wake_thread(struct k_thread *thread, bool is_timeout)
582 {
583 	K_SPINLOCK(&_sched_spinlock) {
584 		bool killed = (thread->base.thread_state &
585 				(_THREAD_DEAD | _THREAD_ABORTING));
586 
587 #ifdef CONFIG_EVENTS
588 		bool do_nothing = thread->no_wake_on_timeout && is_timeout;
589 
590 		thread->no_wake_on_timeout = false;
591 
592 		if (do_nothing) {
593 			continue;
594 		}
595 #endif /* CONFIG_EVENTS */
596 
597 		if (!killed) {
598 			/* The thread is not being killed */
599 			if (thread->base.pended_on != NULL) {
600 				unpend_thread_no_timeout(thread);
601 			}
602 			z_mark_thread_as_not_sleeping(thread);
603 			ready_thread(thread);
604 		}
605 	}
606 
607 }
608 
609 #ifdef CONFIG_SYS_CLOCK_EXISTS
610 /* Timeout handler for *_thread_timeout() APIs */
z_thread_timeout(struct _timeout * timeout)611 void z_thread_timeout(struct _timeout *timeout)
612 {
613 	struct k_thread *thread = CONTAINER_OF(timeout,
614 					       struct k_thread, base.timeout);
615 
616 	z_sched_wake_thread(thread, true);
617 }
618 #endif /* CONFIG_SYS_CLOCK_EXISTS */
619 
z_pend_curr(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout)620 int z_pend_curr(struct k_spinlock *lock, k_spinlock_key_t key,
621 	       _wait_q_t *wait_q, k_timeout_t timeout)
622 {
623 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
624 	pending_current = _current;
625 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
626 	__ASSERT_NO_MSG(sizeof(_sched_spinlock) == 0 || lock != &_sched_spinlock);
627 
628 	/* We do a "lock swap" prior to calling z_swap(), such that
629 	 * the caller's lock gets released as desired.  But we ensure
630 	 * that we hold the scheduler lock and leave local interrupts
631 	 * masked until we reach the context switch.  z_swap() itself
632 	 * has similar code; the duplication is because it's a legacy
633 	 * API that doesn't expect to be called with scheduler lock
634 	 * held.
635 	 */
636 	(void) k_spin_lock(&_sched_spinlock);
637 	pend_locked(_current, wait_q, timeout);
638 	k_spin_release(lock);
639 	return z_swap(&_sched_spinlock, key);
640 }
641 
z_unpend1_no_timeout(_wait_q_t * wait_q)642 struct k_thread *z_unpend1_no_timeout(_wait_q_t *wait_q)
643 {
644 	struct k_thread *thread = NULL;
645 
646 	K_SPINLOCK(&_sched_spinlock) {
647 		thread = _priq_wait_best(&wait_q->waitq);
648 
649 		if (thread != NULL) {
650 			unpend_thread_no_timeout(thread);
651 		}
652 	}
653 
654 	return thread;
655 }
656 
z_unpend_thread(struct k_thread * thread)657 void z_unpend_thread(struct k_thread *thread)
658 {
659 	z_unpend_thread_no_timeout(thread);
660 	z_abort_thread_timeout(thread);
661 }
662 
663 /* Priority set utility that does no rescheduling, it just changes the
664  * run queue state, returning true if a reschedule is needed later.
665  */
z_thread_prio_set(struct k_thread * thread,int prio)666 bool z_thread_prio_set(struct k_thread *thread, int prio)
667 {
668 	bool need_sched = 0;
669 	int old_prio = thread->base.prio;
670 
671 	K_SPINLOCK(&_sched_spinlock) {
672 		need_sched = z_is_thread_ready(thread);
673 
674 		if (need_sched) {
675 			if (!IS_ENABLED(CONFIG_SMP) || z_is_thread_queued(thread)) {
676 				dequeue_thread(thread);
677 				thread->base.prio = prio;
678 				queue_thread(thread);
679 
680 				if (old_prio > prio) {
681 					flag_ipi(ipi_mask_create(thread));
682 				}
683 			} else {
684 				/*
685 				 * This is a running thread on SMP. Update its
686 				 * priority, but do not requeue it. An IPI is
687 				 * needed if the priority is both being lowered
688 				 * and it is running on another CPU.
689 				 */
690 
691 				thread->base.prio = prio;
692 
693 				struct _cpu *cpu;
694 
695 				cpu = thread_active_elsewhere(thread);
696 				if ((cpu != NULL) && (old_prio < prio)) {
697 					flag_ipi(IPI_CPU_MASK(cpu->id));
698 				}
699 			}
700 
701 			update_cache(1);
702 		} else if (z_is_thread_pending(thread)) {
703 			/* Thread is pending, remove it from the waitq
704 			 * and reinsert it with the new priority to avoid
705 			 * violating waitq ordering and rb assumptions.
706 			 */
707 			_wait_q_t *wait_q = pended_on_thread(thread);
708 
709 			_priq_wait_remove(&wait_q->waitq, thread);
710 			thread->base.prio = prio;
711 			_priq_wait_add(&wait_q->waitq, thread);
712 		} else {
713 			thread->base.prio = prio;
714 		}
715 	}
716 
717 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, sched_priority_set, thread, prio);
718 
719 	return need_sched;
720 }
721 
resched(uint32_t key)722 static inline bool resched(uint32_t key)
723 {
724 #ifdef CONFIG_SMP
725 	_current_cpu->swap_ok = 0;
726 #endif /* CONFIG_SMP */
727 
728 	return arch_irq_unlocked(key) && !arch_is_in_isr();
729 }
730 
731 /*
732  * Check if the next ready thread is the same as the current thread
733  * and save the trip if true.
734  */
need_swap(void)735 static inline bool need_swap(void)
736 {
737 	/* the SMP case will be handled in C based z_swap() */
738 #ifdef CONFIG_SMP
739 	return true;
740 #else
741 	struct k_thread *new_thread;
742 
743 	/* Check if the next ready thread is the same as the current thread */
744 	new_thread = _kernel.ready_q.cache;
745 	return new_thread != _current;
746 #endif /* CONFIG_SMP */
747 }
748 
z_reschedule(struct k_spinlock * lock,k_spinlock_key_t key)749 void z_reschedule(struct k_spinlock *lock, k_spinlock_key_t key)
750 {
751 	if (resched(key.key) && need_swap()) {
752 		z_swap(lock, key);
753 	} else {
754 		k_spin_unlock(lock, key);
755 		signal_pending_ipi();
756 	}
757 }
758 
z_reschedule_irqlock(uint32_t key)759 void z_reschedule_irqlock(uint32_t key)
760 {
761 	if (resched(key) && need_swap()) {
762 		z_swap_irqlock(key);
763 	} else {
764 		irq_unlock(key);
765 		signal_pending_ipi();
766 	}
767 }
768 
k_sched_lock(void)769 void k_sched_lock(void)
770 {
771 	K_SPINLOCK(&_sched_spinlock) {
772 		SYS_PORT_TRACING_FUNC(k_thread, sched_lock);
773 
774 		__ASSERT(!arch_is_in_isr(), "");
775 		__ASSERT(_current->base.sched_locked != 1U, "");
776 
777 		--_current->base.sched_locked;
778 
779 		compiler_barrier();
780 	}
781 }
782 
k_sched_unlock(void)783 void k_sched_unlock(void)
784 {
785 	K_SPINLOCK(&_sched_spinlock) {
786 		__ASSERT(_current->base.sched_locked != 0U, "");
787 		__ASSERT(!arch_is_in_isr(), "");
788 
789 		++_current->base.sched_locked;
790 		update_cache(0);
791 	}
792 
793 	LOG_DBG("scheduler unlocked (%p:%d)",
794 		_current, _current->base.sched_locked);
795 
796 	SYS_PORT_TRACING_FUNC(k_thread, sched_unlock);
797 
798 	z_reschedule_unlocked();
799 }
800 
z_swap_next_thread(void)801 struct k_thread *z_swap_next_thread(void)
802 {
803 #ifdef CONFIG_SMP
804 	struct k_thread *ret = next_up();
805 
806 	if (ret == _current) {
807 		/* When not swapping, have to signal IPIs here.  In
808 		 * the context switch case it must happen later, after
809 		 * _current gets requeued.
810 		 */
811 		signal_pending_ipi();
812 	}
813 	return ret;
814 #else
815 	return _kernel.ready_q.cache;
816 #endif /* CONFIG_SMP */
817 }
818 
819 #ifdef CONFIG_USE_SWITCH
820 /* Just a wrapper around z_current_thread_set(xxx) with tracing */
set_current(struct k_thread * new_thread)821 static inline void set_current(struct k_thread *new_thread)
822 {
823 	/* If the new thread is the same as the current thread, we
824 	 * don't need to do anything.
825 	 */
826 	if (IS_ENABLED(CONFIG_INSTRUMENT_THREAD_SWITCHING) && new_thread != _current) {
827 		z_thread_mark_switched_out();
828 	}
829 	z_current_thread_set(new_thread);
830 }
831 
832 /**
833  * @brief Determine next thread to execute upon completion of an interrupt
834  *
835  * Thread preemption is performed by context switching after the completion
836  * of a non-recursed interrupt. This function determines which thread to
837  * switch to if any. This function accepts as @p interrupted either:
838  *
839  * - The handle for the interrupted thread in which case the thread's context
840  *   must already be fully saved and ready to be picked up by a different CPU.
841  *
842  * - NULL if more work is required to fully save the thread's state after
843  *   it is known that a new thread is to be scheduled. It is up to the caller
844  *   to store the handle resulting from the thread that is being switched out
845  *   in that thread's "switch_handle" field after its
846  *   context has fully been saved, following the same requirements as with
847  *   the @ref arch_switch() function.
848  *
849  * If a new thread needs to be scheduled then its handle is returned.
850  * Otherwise the same value provided as @p interrupted is returned back.
851  * Those handles are the same opaque types used by the @ref arch_switch()
852  * function.
853  *
854  * @warning
855  * The _current value may have changed after this call and not refer
856  * to the interrupted thread anymore. It might be necessary to make a local
857  * copy before calling this function.
858  *
859  * @param interrupted Handle for the thread that was interrupted or NULL.
860  * @retval Handle for the next thread to execute, or @p interrupted when
861  *         no new thread is to be scheduled.
862  */
z_get_next_switch_handle(void * interrupted)863 void *z_get_next_switch_handle(void *interrupted)
864 {
865 	z_check_stack_sentinel();
866 
867 #ifdef CONFIG_SMP
868 	void *ret = NULL;
869 
870 	K_SPINLOCK(&_sched_spinlock) {
871 		struct k_thread *old_thread = _current, *new_thread;
872 
873 		if (IS_ENABLED(CONFIG_SMP)) {
874 			old_thread->switch_handle = NULL;
875 		}
876 		new_thread = next_up();
877 
878 		z_sched_usage_switch(new_thread);
879 
880 		if (old_thread != new_thread) {
881 			uint8_t  cpu_id;
882 
883 			update_metairq_preempt(new_thread);
884 			z_sched_switch_spin(new_thread);
885 			arch_cohere_stacks(old_thread, interrupted, new_thread);
886 
887 			_current_cpu->swap_ok = 0;
888 			cpu_id = arch_curr_cpu()->id;
889 			new_thread->base.cpu = cpu_id;
890 			set_current(new_thread);
891 
892 #ifdef CONFIG_TIMESLICING
893 			z_reset_time_slice(new_thread);
894 #endif /* CONFIG_TIMESLICING */
895 
896 #ifdef CONFIG_SPIN_VALIDATE
897 			/* Changed _current!  Update the spinlock
898 			 * bookkeeping so the validation doesn't get
899 			 * confused when the "wrong" thread tries to
900 			 * release the lock.
901 			 */
902 			z_spin_lock_set_owner(&_sched_spinlock);
903 #endif /* CONFIG_SPIN_VALIDATE */
904 
905 			/* A queued (runnable) old/current thread
906 			 * needs to be added back to the run queue
907 			 * here, and atomically with its switch handle
908 			 * being set below.  This is safe now, as we
909 			 * will not return into it.
910 			 */
911 			if (z_is_thread_queued(old_thread)) {
912 #ifdef CONFIG_SCHED_IPI_CASCADE
913 				if ((new_thread->base.cpu_mask != -1) &&
914 				    (old_thread->base.cpu_mask != BIT(cpu_id))) {
915 					flag_ipi(ipi_mask_create(old_thread));
916 				}
917 #endif
918 				runq_add(old_thread);
919 			}
920 		}
921 		old_thread->switch_handle = interrupted;
922 		ret = new_thread->switch_handle;
923 		if (IS_ENABLED(CONFIG_SMP)) {
924 			/* Active threads MUST have a null here */
925 			new_thread->switch_handle = NULL;
926 		}
927 	}
928 	signal_pending_ipi();
929 	return ret;
930 #else
931 	z_sched_usage_switch(_kernel.ready_q.cache);
932 	_current->switch_handle = interrupted;
933 	set_current(_kernel.ready_q.cache);
934 	return _current->switch_handle;
935 #endif /* CONFIG_SMP */
936 }
937 #endif /* CONFIG_USE_SWITCH */
938 
z_unpend_all(_wait_q_t * wait_q)939 int z_unpend_all(_wait_q_t *wait_q)
940 {
941 	int need_sched = 0;
942 	struct k_thread *thread;
943 
944 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
945 		z_unpend_thread(thread);
946 		z_ready_thread(thread);
947 		need_sched = 1;
948 	}
949 
950 	return need_sched;
951 }
952 
init_ready_q(struct _ready_q * ready_q)953 void init_ready_q(struct _ready_q *ready_q)
954 {
955 	_priq_run_init(&ready_q->runq);
956 }
957 
z_sched_init(void)958 void z_sched_init(void)
959 {
960 #ifdef CONFIG_SCHED_CPU_MASK_PIN_ONLY
961 	for (int i = 0; i < CONFIG_MP_MAX_NUM_CPUS; i++) {
962 		init_ready_q(&_kernel.cpus[i].ready_q);
963 	}
964 #else
965 	init_ready_q(&_kernel.ready_q);
966 #endif /* CONFIG_SCHED_CPU_MASK_PIN_ONLY */
967 }
968 
z_impl_k_thread_priority_set(k_tid_t thread,int prio)969 void z_impl_k_thread_priority_set(k_tid_t thread, int prio)
970 {
971 	/*
972 	 * Use NULL, since we cannot know what the entry point is (we do not
973 	 * keep track of it) and idle cannot change its priority.
974 	 */
975 	Z_ASSERT_VALID_PRIO(prio, NULL);
976 
977 	bool need_sched = z_thread_prio_set((struct k_thread *)thread, prio);
978 
979 	if ((need_sched) && (IS_ENABLED(CONFIG_SMP) ||
980 			     (_current->base.sched_locked == 0U))) {
981 		z_reschedule_unlocked();
982 	}
983 }
984 
985 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_priority_set(k_tid_t thread,int prio)986 static inline void z_vrfy_k_thread_priority_set(k_tid_t thread, int prio)
987 {
988 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
989 	K_OOPS(K_SYSCALL_VERIFY_MSG(_is_valid_prio(prio, NULL),
990 				    "invalid thread priority %d", prio));
991 #ifndef CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY
992 	K_OOPS(K_SYSCALL_VERIFY_MSG((int8_t)prio >= thread->base.prio,
993 				    "thread priority may only be downgraded (%d < %d)",
994 				    prio, thread->base.prio));
995 #endif /* CONFIG_USERSPACE_THREAD_MAY_RAISE_PRIORITY */
996 	z_impl_k_thread_priority_set(thread, prio);
997 }
998 #include <zephyr/syscalls/k_thread_priority_set_mrsh.c>
999 #endif /* CONFIG_USERSPACE */
1000 
1001 #ifdef CONFIG_SCHED_DEADLINE
z_impl_k_thread_deadline_set(k_tid_t tid,int deadline)1002 void z_impl_k_thread_deadline_set(k_tid_t tid, int deadline)
1003 {
1004 
1005 	deadline = CLAMP(deadline, 0, INT_MAX);
1006 
1007 	struct k_thread *thread = tid;
1008 	int32_t newdl = k_cycle_get_32() + deadline;
1009 
1010 	/* The prio_deadline field changes the sorting order, so can't
1011 	 * change it while the thread is in the run queue (dlists
1012 	 * actually are benign as long as we requeue it before we
1013 	 * release the lock, but an rbtree will blow up if we break
1014 	 * sorting!)
1015 	 */
1016 	K_SPINLOCK(&_sched_spinlock) {
1017 		if (z_is_thread_queued(thread)) {
1018 			dequeue_thread(thread);
1019 			thread->base.prio_deadline = newdl;
1020 			queue_thread(thread);
1021 		} else {
1022 			thread->base.prio_deadline = newdl;
1023 		}
1024 	}
1025 }
1026 
1027 #ifdef CONFIG_USERSPACE
z_vrfy_k_thread_deadline_set(k_tid_t tid,int deadline)1028 static inline void z_vrfy_k_thread_deadline_set(k_tid_t tid, int deadline)
1029 {
1030 	struct k_thread *thread = tid;
1031 
1032 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1033 	K_OOPS(K_SYSCALL_VERIFY_MSG(deadline > 0,
1034 				    "invalid thread deadline %d",
1035 				    (int)deadline));
1036 
1037 	z_impl_k_thread_deadline_set((k_tid_t)thread, deadline);
1038 }
1039 #include <zephyr/syscalls/k_thread_deadline_set_mrsh.c>
1040 #endif /* CONFIG_USERSPACE */
1041 #endif /* CONFIG_SCHED_DEADLINE */
1042 
z_impl_k_reschedule(void)1043 void z_impl_k_reschedule(void)
1044 {
1045 	k_spinlock_key_t key;
1046 
1047 	key = k_spin_lock(&_sched_spinlock);
1048 
1049 	update_cache(0);
1050 
1051 	z_reschedule(&_sched_spinlock, key);
1052 }
1053 
1054 #ifdef CONFIG_USERSPACE
z_vrfy_k_reschedule(void)1055 static inline void z_vrfy_k_reschedule(void)
1056 {
1057 	z_impl_k_reschedule();
1058 }
1059 #include <zephyr/syscalls/k_reschedule_mrsh.c>
1060 #endif /* CONFIG_USERSPACE */
1061 
k_can_yield(void)1062 bool k_can_yield(void)
1063 {
1064 	return !(k_is_pre_kernel() || k_is_in_isr() ||
1065 		 z_is_idle_thread_object(_current));
1066 }
1067 
z_impl_k_yield(void)1068 void z_impl_k_yield(void)
1069 {
1070 	__ASSERT(!arch_is_in_isr(), "");
1071 
1072 	SYS_PORT_TRACING_FUNC(k_thread, yield);
1073 
1074 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1075 
1076 	runq_yield();
1077 
1078 	update_cache(1);
1079 	z_swap(&_sched_spinlock, key);
1080 }
1081 
1082 #ifdef CONFIG_USERSPACE
z_vrfy_k_yield(void)1083 static inline void z_vrfy_k_yield(void)
1084 {
1085 	z_impl_k_yield();
1086 }
1087 #include <zephyr/syscalls/k_yield_mrsh.c>
1088 #endif /* CONFIG_USERSPACE */
1089 
z_tick_sleep(k_timeout_t timeout)1090 static int32_t z_tick_sleep(k_timeout_t timeout)
1091 {
1092 	uint32_t expected_wakeup_ticks;
1093 
1094 	__ASSERT(!arch_is_in_isr(), "");
1095 
1096 	LOG_DBG("thread %p for %lu ticks", _current, (unsigned long)timeout.ticks);
1097 
1098 	/* K_NO_WAIT is treated as a 'yield' */
1099 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1100 		k_yield();
1101 		return 0;
1102 	}
1103 
1104 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1105 
1106 #if defined(CONFIG_TIMESLICING) && defined(CONFIG_SWAP_NONATOMIC)
1107 	pending_current = _current;
1108 #endif /* CONFIG_TIMESLICING && CONFIG_SWAP_NONATOMIC */
1109 	unready_thread(_current);
1110 	expected_wakeup_ticks = (uint32_t)z_add_thread_timeout(_current, timeout);
1111 	z_mark_thread_as_sleeping(_current);
1112 
1113 	(void)z_swap(&_sched_spinlock, key);
1114 
1115 	if (!z_is_aborted_thread_timeout(_current)) {
1116 		return 0;
1117 	}
1118 
1119 	/* We require a 32 bit unsigned subtraction to care a wraparound */
1120 	uint32_t left_ticks = expected_wakeup_ticks - sys_clock_tick_get_32();
1121 
1122 	/* To handle a negative value correctly, once type-cast it to signed 32 bit */
1123 	k_ticks_t ticks = (k_ticks_t)(int32_t)left_ticks;
1124 
1125 	if (ticks > 0) {
1126 		return ticks;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
z_impl_k_sleep(k_timeout_t timeout)1132 int32_t z_impl_k_sleep(k_timeout_t timeout)
1133 {
1134 	k_ticks_t ticks;
1135 
1136 	__ASSERT(!arch_is_in_isr(), "");
1137 
1138 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, sleep, timeout);
1139 
1140 	ticks = z_tick_sleep(timeout);
1141 
1142 	/* k_sleep() still returns 32 bit milliseconds for compatibility */
1143 	int64_t ms = K_TIMEOUT_EQ(timeout, K_FOREVER) ? K_TICKS_FOREVER :
1144 		CLAMP(k_ticks_to_ms_ceil64(ticks), 0, INT_MAX);
1145 
1146 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, sleep, timeout, ms);
1147 	return (int32_t) ms;
1148 }
1149 
1150 #ifdef CONFIG_USERSPACE
z_vrfy_k_sleep(k_timeout_t timeout)1151 static inline int32_t z_vrfy_k_sleep(k_timeout_t timeout)
1152 {
1153 	return z_impl_k_sleep(timeout);
1154 }
1155 #include <zephyr/syscalls/k_sleep_mrsh.c>
1156 #endif /* CONFIG_USERSPACE */
1157 
z_impl_k_usleep(int32_t us)1158 int32_t z_impl_k_usleep(int32_t us)
1159 {
1160 	int32_t ticks;
1161 
1162 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, usleep, us);
1163 
1164 	ticks = k_us_to_ticks_ceil64(us);
1165 	ticks = z_tick_sleep(Z_TIMEOUT_TICKS(ticks));
1166 
1167 	int32_t ret = k_ticks_to_us_ceil64(ticks);
1168 
1169 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, usleep, us, ret);
1170 
1171 	return ret;
1172 }
1173 
1174 #ifdef CONFIG_USERSPACE
z_vrfy_k_usleep(int32_t us)1175 static inline int32_t z_vrfy_k_usleep(int32_t us)
1176 {
1177 	return z_impl_k_usleep(us);
1178 }
1179 #include <zephyr/syscalls/k_usleep_mrsh.c>
1180 #endif /* CONFIG_USERSPACE */
1181 
z_impl_k_wakeup(k_tid_t thread)1182 void z_impl_k_wakeup(k_tid_t thread)
1183 {
1184 	SYS_PORT_TRACING_OBJ_FUNC(k_thread, wakeup, thread);
1185 
1186 	k_spinlock_key_t  key = k_spin_lock(&_sched_spinlock);
1187 
1188 	if (z_is_thread_sleeping(thread)) {
1189 		z_abort_thread_timeout(thread);
1190 		z_mark_thread_as_not_sleeping(thread);
1191 		ready_thread(thread);
1192 		z_reschedule(&_sched_spinlock, key);
1193 	} else {
1194 		k_spin_unlock(&_sched_spinlock, key);
1195 	}
1196 }
1197 
1198 #ifdef CONFIG_USERSPACE
z_vrfy_k_wakeup(k_tid_t thread)1199 static inline void z_vrfy_k_wakeup(k_tid_t thread)
1200 {
1201 	K_OOPS(K_SYSCALL_OBJ(thread, K_OBJ_THREAD));
1202 	z_impl_k_wakeup(thread);
1203 }
1204 #include <zephyr/syscalls/k_wakeup_mrsh.c>
1205 #endif /* CONFIG_USERSPACE */
1206 
z_impl_k_sched_current_thread_query(void)1207 k_tid_t z_impl_k_sched_current_thread_query(void)
1208 {
1209 	return _current;
1210 }
1211 
1212 #ifdef CONFIG_USERSPACE
z_vrfy_k_sched_current_thread_query(void)1213 static inline k_tid_t z_vrfy_k_sched_current_thread_query(void)
1214 {
1215 	return z_impl_k_sched_current_thread_query();
1216 }
1217 #include <zephyr/syscalls/k_sched_current_thread_query_mrsh.c>
1218 #endif /* CONFIG_USERSPACE */
1219 
unpend_all(_wait_q_t * wait_q)1220 static inline void unpend_all(_wait_q_t *wait_q)
1221 {
1222 	struct k_thread *thread;
1223 
1224 	for (thread = z_waitq_head(wait_q); thread != NULL; thread = z_waitq_head(wait_q)) {
1225 		unpend_thread_no_timeout(thread);
1226 		z_abort_thread_timeout(thread);
1227 		arch_thread_return_value_set(thread, 0);
1228 		ready_thread(thread);
1229 	}
1230 }
1231 
1232 #ifdef CONFIG_THREAD_ABORT_HOOK
1233 extern void thread_abort_hook(struct k_thread *thread);
1234 #endif /* CONFIG_THREAD_ABORT_HOOK */
1235 
1236 /**
1237  * @brief Dequeues the specified thread
1238  *
1239  * Dequeues the specified thread and move it into the specified new state.
1240  *
1241  * @param thread Identify the thread to halt
1242  * @param new_state New thread state (_THREAD_DEAD or _THREAD_SUSPENDED)
1243  */
halt_thread(struct k_thread * thread,uint8_t new_state)1244 static ALWAYS_INLINE void halt_thread(struct k_thread *thread, uint8_t new_state)
1245 {
1246 	bool dummify = false;
1247 
1248 	/* We hold the lock, and the thread is known not to be running
1249 	 * anywhere.
1250 	 */
1251 	if ((thread->base.thread_state & new_state) == 0U) {
1252 		thread->base.thread_state |= new_state;
1253 		if (z_is_thread_queued(thread)) {
1254 			dequeue_thread(thread);
1255 		}
1256 
1257 		if (new_state == _THREAD_DEAD) {
1258 			if (thread->base.pended_on != NULL) {
1259 				unpend_thread_no_timeout(thread);
1260 			}
1261 			z_abort_thread_timeout(thread);
1262 			unpend_all(&thread->join_queue);
1263 
1264 			/* Edge case: aborting _current from within an
1265 			 * ISR that preempted it requires clearing the
1266 			 * _current pointer so the upcoming context
1267 			 * switch doesn't clobber the now-freed
1268 			 * memory
1269 			 */
1270 			if (thread == _current && arch_is_in_isr()) {
1271 				dummify = true;
1272 			}
1273 		}
1274 #ifdef CONFIG_SMP
1275 		unpend_all(&thread->halt_queue);
1276 #endif /* CONFIG_SMP */
1277 		update_cache(1);
1278 
1279 		if (new_state == _THREAD_SUSPENDED) {
1280 			clear_halting(thread);
1281 			return;
1282 		}
1283 
1284 		arch_coprocessors_disable(thread);
1285 
1286 		SYS_PORT_TRACING_FUNC(k_thread, sched_abort, thread);
1287 
1288 		z_thread_monitor_exit(thread);
1289 #ifdef CONFIG_THREAD_ABORT_HOOK
1290 		thread_abort_hook(thread);
1291 #endif /* CONFIG_THREAD_ABORT_HOOK */
1292 
1293 #ifdef CONFIG_OBJ_CORE_THREAD
1294 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
1295 		k_obj_core_stats_deregister(K_OBJ_CORE(thread));
1296 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
1297 		k_obj_core_unlink(K_OBJ_CORE(thread));
1298 #endif /* CONFIG_OBJ_CORE_THREAD */
1299 
1300 #ifdef CONFIG_USERSPACE
1301 		z_mem_domain_exit_thread(thread);
1302 		k_thread_perms_all_clear(thread);
1303 		k_object_uninit(thread->stack_obj);
1304 		k_object_uninit(thread);
1305 #endif /* CONFIG_USERSPACE */
1306 
1307 #ifdef CONFIG_THREAD_ABORT_NEED_CLEANUP
1308 		k_thread_abort_cleanup(thread);
1309 #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
1310 
1311 		/* Do this "set _current to dummy" step last so that
1312 		 * subsystems above can rely on _current being
1313 		 * unchanged.  Disabled for posix as that arch
1314 		 * continues to use the _current pointer in its swap
1315 		 * code.  Note that we must leave a non-null switch
1316 		 * handle for any threads spinning in join() (this can
1317 		 * never be used, as our thread is flagged dead, but
1318 		 * it must not be NULL otherwise join can deadlock).
1319 		 */
1320 		if (dummify && !IS_ENABLED(CONFIG_ARCH_POSIX)) {
1321 #ifdef CONFIG_USE_SWITCH
1322 			_current->switch_handle = _current;
1323 #endif
1324 			z_dummy_thread_init(&_thread_dummy);
1325 
1326 		}
1327 
1328 		/* Finally update the halting thread state, on which
1329 		 * other CPUs might be spinning (see
1330 		 * thread_halt_spin()).
1331 		 */
1332 		clear_halting(thread);
1333 	}
1334 }
1335 
z_thread_abort(struct k_thread * thread)1336 void z_thread_abort(struct k_thread *thread)
1337 {
1338 	bool essential = z_is_thread_essential(thread);
1339 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1340 
1341 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1342 		k_spin_unlock(&_sched_spinlock, key);
1343 		return;
1344 	}
1345 
1346 	z_thread_halt(thread, key, true);
1347 
1348 	if (essential) {
1349 		__ASSERT(!essential, "aborted essential thread %p", thread);
1350 		k_panic();
1351 	}
1352 }
1353 
1354 #if !defined(CONFIG_ARCH_HAS_THREAD_ABORT)
z_impl_k_thread_abort(k_tid_t thread)1355 void z_impl_k_thread_abort(k_tid_t thread)
1356 {
1357 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, abort, thread);
1358 
1359 	z_thread_abort(thread);
1360 
1361 	__ASSERT_NO_MSG((thread->base.thread_state & _THREAD_DEAD) != 0);
1362 
1363 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, abort, thread);
1364 }
1365 #endif /* !CONFIG_ARCH_HAS_THREAD_ABORT */
1366 
z_impl_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1367 int z_impl_k_thread_join(struct k_thread *thread, k_timeout_t timeout)
1368 {
1369 	k_spinlock_key_t key = k_spin_lock(&_sched_spinlock);
1370 	int ret;
1371 
1372 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_thread, join, thread, timeout);
1373 
1374 	if ((thread->base.thread_state & _THREAD_DEAD) != 0U) {
1375 		z_sched_switch_spin(thread);
1376 		ret = 0;
1377 	} else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
1378 		ret = -EBUSY;
1379 	} else if ((thread == _current) ||
1380 		   (thread->base.pended_on == &_current->join_queue)) {
1381 		ret = -EDEADLK;
1382 	} else {
1383 		__ASSERT(!arch_is_in_isr(), "cannot join in ISR");
1384 		add_to_waitq_locked(_current, &thread->join_queue);
1385 		add_thread_timeout(_current, timeout);
1386 
1387 		SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_thread, join, thread, timeout);
1388 		ret = z_swap(&_sched_spinlock, key);
1389 		SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1390 
1391 		return ret;
1392 	}
1393 
1394 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_thread, join, thread, timeout, ret);
1395 
1396 	k_spin_unlock(&_sched_spinlock, key);
1397 	return ret;
1398 }
1399 
1400 #ifdef CONFIG_USERSPACE
1401 /* Special case: don't oops if the thread is uninitialized.  This is because
1402  * the initialization bit does double-duty for thread objects; if false, means
1403  * the thread object is truly uninitialized, or the thread ran and exited for
1404  * some reason.
1405  *
1406  * Return true in this case indicating we should just do nothing and return
1407  * success to the caller.
1408  */
thread_obj_validate(struct k_thread * thread)1409 static bool thread_obj_validate(struct k_thread *thread)
1410 {
1411 	struct k_object *ko = k_object_find(thread);
1412 	int ret = k_object_validate(ko, K_OBJ_THREAD, _OBJ_INIT_TRUE);
1413 
1414 	switch (ret) {
1415 	case 0:
1416 		return false;
1417 	case -EINVAL:
1418 		return true;
1419 	default:
1420 #ifdef CONFIG_LOG
1421 		k_object_dump_error(ret, thread, ko, K_OBJ_THREAD);
1422 #endif /* CONFIG_LOG */
1423 		K_OOPS(K_SYSCALL_VERIFY_MSG(ret, "access denied"));
1424 	}
1425 	CODE_UNREACHABLE; /* LCOV_EXCL_LINE */
1426 }
1427 
z_vrfy_k_thread_join(struct k_thread * thread,k_timeout_t timeout)1428 static inline int z_vrfy_k_thread_join(struct k_thread *thread,
1429 				       k_timeout_t timeout)
1430 {
1431 	if (thread_obj_validate(thread)) {
1432 		return 0;
1433 	}
1434 
1435 	return z_impl_k_thread_join(thread, timeout);
1436 }
1437 #include <zephyr/syscalls/k_thread_join_mrsh.c>
1438 
z_vrfy_k_thread_abort(k_tid_t thread)1439 static inline void z_vrfy_k_thread_abort(k_tid_t thread)
1440 {
1441 	if (thread_obj_validate(thread)) {
1442 		return;
1443 	}
1444 
1445 	K_OOPS(K_SYSCALL_VERIFY_MSG(!z_is_thread_essential(thread),
1446 				    "aborting essential thread %p", thread));
1447 
1448 	z_impl_k_thread_abort((struct k_thread *)thread);
1449 }
1450 #include <zephyr/syscalls/k_thread_abort_mrsh.c>
1451 #endif /* CONFIG_USERSPACE */
1452 
1453 /*
1454  * future scheduler.h API implementations
1455  */
z_sched_wake(_wait_q_t * wait_q,int swap_retval,void * swap_data)1456 bool z_sched_wake(_wait_q_t *wait_q, int swap_retval, void *swap_data)
1457 {
1458 	struct k_thread *thread;
1459 	bool ret = false;
1460 
1461 	K_SPINLOCK(&_sched_spinlock) {
1462 		thread = _priq_wait_best(&wait_q->waitq);
1463 
1464 		if (thread != NULL) {
1465 			z_thread_return_value_set_with_data(thread,
1466 							    swap_retval,
1467 							    swap_data);
1468 			unpend_thread_no_timeout(thread);
1469 			z_abort_thread_timeout(thread);
1470 			ready_thread(thread);
1471 			ret = true;
1472 		}
1473 	}
1474 
1475 	return ret;
1476 }
1477 
z_sched_wait(struct k_spinlock * lock,k_spinlock_key_t key,_wait_q_t * wait_q,k_timeout_t timeout,void ** data)1478 int z_sched_wait(struct k_spinlock *lock, k_spinlock_key_t key,
1479 		 _wait_q_t *wait_q, k_timeout_t timeout, void **data)
1480 {
1481 	int ret = z_pend_curr(lock, key, wait_q, timeout);
1482 
1483 	if (data != NULL) {
1484 		*data = _current->base.swap_data;
1485 	}
1486 	return ret;
1487 }
1488 
z_sched_waitq_walk(_wait_q_t * wait_q,int (* func)(struct k_thread *,void *),void * data)1489 int z_sched_waitq_walk(_wait_q_t  *wait_q,
1490 		       int (*func)(struct k_thread *, void *), void *data)
1491 {
1492 	struct k_thread *thread;
1493 	int  status = 0;
1494 
1495 	K_SPINLOCK(&_sched_spinlock) {
1496 		_WAIT_Q_FOR_EACH(wait_q, thread) {
1497 
1498 			/*
1499 			 * Invoke the callback function on each waiting thread
1500 			 * for as long as there are both waiting threads AND
1501 			 * it returns 0.
1502 			 */
1503 
1504 			status = func(thread, data);
1505 			if (status != 0) {
1506 				break;
1507 			}
1508 		}
1509 	}
1510 
1511 	return status;
1512 }
1513 
1514 /* This routine exists for benchmarking purposes. It is not used in
1515  * general production code.
1516  */
z_unready_thread(struct k_thread * thread)1517 void z_unready_thread(struct k_thread *thread)
1518 {
1519 	K_SPINLOCK(&_sched_spinlock) {
1520 		unready_thread(thread);
1521 	}
1522 }
1523