1 /*
2  * Copyright (c) 2010-2014 Wind River Systems, Inc.
3  * Copyright (c) 2024 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/kernel.h>
9 #include <kthread.h>
10 
11 struct k_spinlock z_thread_monitor_lock;
12 /*
13  * Remove a thread from the kernel's list of active threads.
14  */
z_thread_monitor_exit(struct k_thread * thread)15 void z_thread_monitor_exit(struct k_thread *thread)
16 {
17 	k_spinlock_key_t key = k_spin_lock(&z_thread_monitor_lock);
18 
19 	if (thread == _kernel.threads) {
20 		_kernel.threads = _kernel.threads->next_thread;
21 	} else {
22 		struct k_thread *prev_thread;
23 
24 		prev_thread = _kernel.threads;
25 		while ((prev_thread != NULL) &&
26 			(thread != prev_thread->next_thread)) {
27 			prev_thread = prev_thread->next_thread;
28 		}
29 		if (prev_thread != NULL) {
30 			prev_thread->next_thread = thread->next_thread;
31 		}
32 	}
33 
34 	k_spin_unlock(&z_thread_monitor_lock, key);
35 }
36 
37 /*
38  * Helper function to iterate over threads with optional filtering and locking behavior.
39  */
thread_foreach_helper(k_thread_user_cb_t user_cb,void * user_data,bool unlocked,bool filter_by_cpu,unsigned int cpu)40 static void thread_foreach_helper(k_thread_user_cb_t user_cb, void *user_data,
41 		bool unlocked, bool filter_by_cpu, unsigned int cpu)
42 {
43 	struct k_thread *thread;
44 	k_spinlock_key_t key;
45 
46 	__ASSERT(user_cb != NULL, "user_cb can not be NULL");
47 
48 	if (filter_by_cpu) {
49 		__ASSERT(cpu < CONFIG_MP_MAX_NUM_CPUS, "cpu filter out of bounds");
50 	}
51 
52 	key = k_spin_lock(&z_thread_monitor_lock);
53 
54 	for (thread = _kernel.threads; thread; thread = thread->next_thread) {
55 		/* cpu is only defined when SMP=y*/
56 #ifdef CONFIG_SMP
57 		bool on_cpu = (thread->base.cpu == cpu);
58 #else
59 		bool on_cpu = false;
60 #endif
61 		if (filter_by_cpu && !on_cpu) {
62 			continue;
63 		}
64 
65 		if (unlocked) {
66 			k_spin_unlock(&z_thread_monitor_lock, key);
67 			user_cb(thread, user_data);
68 			key = k_spin_lock(&z_thread_monitor_lock);
69 		} else {
70 			user_cb(thread, user_data);
71 		}
72 	}
73 
74 	k_spin_unlock(&z_thread_monitor_lock, key);
75 }
76 
77 /*
78  * Public API functions using the helper.
79  */
k_thread_foreach(k_thread_user_cb_t user_cb,void * user_data)80 void k_thread_foreach(k_thread_user_cb_t user_cb, void *user_data)
81 {
82 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
83 	thread_foreach_helper(user_cb, user_data, false, false, 0);
84 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
85 }
86 
k_thread_foreach_unlocked(k_thread_user_cb_t user_cb,void * user_data)87 void k_thread_foreach_unlocked(k_thread_user_cb_t user_cb, void *user_data)
88 {
89 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
90 	thread_foreach_helper(user_cb, user_data, true, false, 0);
91 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
92 }
93 
94 #ifdef CONFIG_SMP
k_thread_foreach_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)95 void k_thread_foreach_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
96 		void *user_data)
97 {
98 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach);
99 	thread_foreach_helper(user_cb, user_data, false, true, cpu);
100 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach);
101 }
102 
k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu,k_thread_user_cb_t user_cb,void * user_data)103 void k_thread_foreach_unlocked_filter_by_cpu(unsigned int cpu, k_thread_user_cb_t user_cb,
104 		void *user_data)
105 {
106 	SYS_PORT_TRACING_FUNC_ENTER(k_thread, foreach_unlocked);
107 	thread_foreach_helper(user_cb, user_data, true, true, cpu);
108 	SYS_PORT_TRACING_FUNC_EXIT(k_thread, foreach_unlocked);
109 }
110 #endif /* CONFIG_SMP */
111