1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * (scheduler_comm.c) Common API of scheduling routines.
7  *
8  * Change Logs:
9  * Date           Author       Notes
10  * 2024-01-18     Shell        Separate scheduling related codes from thread.c, scheduler_.*
11  */
12 
13 #define DBG_TAG           "kernel.sched"
14 #define DBG_LVL           DBG_INFO
15 #include <rtdbg.h>
16 
17 #include <rtthread.h>
18 
19 /**
20  * @brief Initialize thread scheduling context
21  *
22  * @param thread The thread to be initialized
23  * @param tick Initial time slice value for the thread
24  * @param priority Initial priority of the thread
25  *
26  * @details This function performs the following initialization:
27  *   - Sets thread status to INIT
28  *   - For SMP systems:
29  *     * Sets bind CPU to none (RT_CPUS_NR)
30  *     * Marks CPU as detached (RT_CPU_DETACHED)
31  *   - Calls rt_sched_thread_init_priv() for private scheduling data initialization
32  */
rt_sched_thread_init_ctx(struct rt_thread * thread,rt_uint32_t tick,rt_uint8_t priority)33 void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
34 {
35     /* setup thread status */
36     RT_SCHED_CTX(thread).stat  = RT_THREAD_INIT;
37 
38 #ifdef RT_USING_SMP
39     /* not bind on any cpu */
40     RT_SCHED_CTX(thread).bind_cpu = RT_CPUS_NR;
41     RT_SCHED_CTX(thread).oncpu = RT_CPU_DETACHED;
42 #endif /* RT_USING_SMP */
43 
44     rt_sched_thread_init_priv(thread, tick, priority);
45 }
46 
47 /**
48  * @brief Start the thread timer for scheduling
49  *
50  * @param thread The thread whose timer needs to be started
51  *
52  * @return rt_err_t Always returns RT_EOK on success
53  *
54  * @details This function:
55  *   - Requires scheduler lock to be held.
56  *   - Sets the thread's timer flag (sched_flag_ttmr_set) to indicate timer is active
57  */
rt_sched_thread_timer_start(struct rt_thread * thread)58 rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread)
59 {
60     RT_SCHED_DEBUG_IS_LOCKED;
61     RT_SCHED_CTX(thread).sched_flag_ttmr_set = 1;
62     return RT_EOK;
63 }
64 
65 /**
66  * @brief Stop the thread timer for scheduling
67  *
68  * @param thread The thread whose timer needs to be stopped
69  *
70  * @return rt_err_t
71  *   - RT_EOK if timer was successfully stopped or not active
72  *   - Other error codes from rt_timer_stop() if stop operation failed
73  */
rt_sched_thread_timer_stop(struct rt_thread * thread)74 rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread)
75 {
76     rt_err_t error;
77     RT_SCHED_DEBUG_IS_LOCKED;
78 
79     if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
80     {
81         error = rt_timer_stop(&thread->thread_timer);
82 
83         /* mask out timer flag no matter stop success or not */
84         RT_SCHED_CTX(thread).sched_flag_ttmr_set = 0;
85     }
86     else
87     {
88         error = RT_EOK;
89     }
90     return error;
91 }
92 
93 /**
94  * @brief Get the current status of a thread
95  *
96  * @param thread The thread to get status from
97  *
98  * @return rt_uint8_t The thread status masked with RT_THREAD_STAT_MASK
99  *
100  * @details This function:
101  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
102  *   - Returns the thread's status field masked with RT_THREAD_STAT_MASK
103  */
rt_sched_thread_get_stat(struct rt_thread * thread)104 rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread)
105 {
106     RT_SCHED_DEBUG_IS_LOCKED;
107     return RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK;
108 }
109 
110 /**
111  * @brief Get the current priority of a thread
112  *
113  * @param thread The thread to get priority from
114  *
115  * @return rt_uint8_t The current priority value of the thread
116  *
117  * @details This function:
118  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
119  *   - Returns the thread's current priority field from its private scheduling data
120  */
rt_sched_thread_get_curr_prio(struct rt_thread * thread)121 rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread)
122 {
123     RT_SCHED_DEBUG_IS_LOCKED;
124     return RT_SCHED_PRIV(thread).current_priority;
125 }
126 
127 /**
128  * @brief Get the initial priority of a thread
129  *
130  * @param thread The thread to get priority from
131  *
132  * @return rt_uint8_t The initial priority value of the thread
133  *
134  * @details This function:
135  *   - Returns the thread's initial priority field from its private scheduling data
136  *   - Does not require scheduler lock as it accesses read-only fields
137  */
rt_sched_thread_get_init_prio(struct rt_thread * thread)138 rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread)
139 {
140     /* read only fields, so lock is unnecessary */
141     return RT_SCHED_PRIV(thread).init_priority;
142 }
143 
144 /**
145  * @brief Check if a thread is in suspended state
146  *
147  * @param thread The thread to check
148  *
149  * @return rt_uint8_t
150  *   - 1 if thread is suspended (matches RT_THREAD_SUSPEND_MASK)
151  *   - 0 otherwise
152  *
153  * @details This function:
154  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
155  *   - Checks thread's status field against RT_THREAD_SUSPEND_MASK
156  *
157  * @note Caller must hold the scheduler lock before calling this function
158  */
rt_sched_thread_is_suspended(struct rt_thread * thread)159 rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread)
160 {
161     RT_SCHED_DEBUG_IS_LOCKED;
162     return (RT_SCHED_CTX(thread).stat & RT_THREAD_SUSPEND_MASK) == RT_THREAD_SUSPEND_MASK;
163 }
164 
165 /**
166  * @brief Close a thread by setting its status to CLOSED
167  *
168  * @param thread The thread to be closed
169  * @return rt_err_t Always returns RT_EOK on success
170  *
171  * @details This function:
172  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
173  *   - Sets the thread's status to RT_THREAD_CLOSE
174  *
175  * @note Must be called with scheduler lock held
176  */
rt_sched_thread_close(struct rt_thread * thread)177 rt_err_t rt_sched_thread_close(struct rt_thread *thread)
178 {
179     RT_SCHED_DEBUG_IS_LOCKED;
180     RT_SCHED_CTX(thread).stat = RT_THREAD_CLOSE;
181     return RT_EOK;
182 }
183 
184 /**
185  * @brief Yield the current thread's remaining time slice
186  *
187  * @param thread The thread to yield
188  * @return rt_err_t Always returns RT_EOK on success
189  *
190  * @details This function:
191  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
192  *   - Resets the thread's remaining tick count to its initial value
193  *   - Sets the thread's status to YIELD state
194  *
195  * @note Must be called with scheduler lock held
196  */
rt_sched_thread_yield(struct rt_thread * thread)197 rt_err_t rt_sched_thread_yield(struct rt_thread *thread)
198 {
199     RT_SCHED_DEBUG_IS_LOCKED;
200 
201     RT_SCHED_PRIV(thread).remaining_tick = RT_SCHED_PRIV(thread).init_tick;
202     RT_SCHED_CTX(thread).stat |= RT_THREAD_STAT_YIELD;
203 
204     return RT_EOK;
205 }
206 
207 /**
208  * @brief Make a suspended thread ready for scheduling
209  *
210  * @param thread The thread to be made ready
211  *
212  * @return rt_err_t
213  *   - RT_EOK if operation succeeded
214  *   - -RT_EINVAL if thread is not suspended
215  *   - Other error codes from rt_sched_thread_timer_stop() if timer stop failed
216  *
217  * @details This function:
218  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
219  *   - Checks if thread is suspended (returns -RT_EINVAL if not)
220  *   - Stops thread timer if active
221  *   - Removes thread from suspend list
222  *   - Clears wakeup handler (if RT_USING_SMART is defined)
223  *   - Inserts thread into ready queue
224  *
225  * @note Must be called with scheduler lock held
226  *       May fail due to racing conditions with timeout ISR
227  */
rt_sched_thread_ready(struct rt_thread * thread)228 rt_err_t rt_sched_thread_ready(struct rt_thread *thread)
229 {
230     rt_err_t error;
231 
232     RT_SCHED_DEBUG_IS_LOCKED;
233 
234     if (!rt_sched_thread_is_suspended(thread))
235     {
236         /* failed to proceed, and that's possibly due to a racing condition */
237         error = -RT_EINVAL;
238     }
239     else
240     {
241         if (RT_SCHED_CTX(thread).sched_flag_ttmr_set)
242         {
243             /**
244              * Quiet timeout timer first if set. and don't continue if we
245              * failed, because it probably means that a timeout ISR racing to
246              * resume thread before us.
247              */
248             error = rt_sched_thread_timer_stop(thread);
249         }
250         else
251         {
252             error = RT_EOK;
253         }
254 
255         if (!error)
256         {
257             /* remove from suspend list */
258             rt_list_remove(&RT_THREAD_LIST_NODE(thread));
259 
260         #ifdef RT_USING_SMART
261             thread->wakeup_handle.func = RT_NULL;
262         #endif
263 
264             /* insert to schedule ready list and remove from susp list */
265             rt_sched_insert_thread(thread);
266         }
267     }
268 
269     return error;
270 }
271 
272 /**
273  * @brief Increase the system tick and update thread's remaining time slice
274  *
275  * @param tick The number of ticks to increase
276  * @return rt_err_t Always returns RT_EOK
277  *
278  * @details This function:
279  *   - Gets the current thread
280  *   - Locks the scheduler
281  *   - Decreases the thread's remaining tick count by the specified amount
282  *   - If remaining ticks reach zero:
283  *     * Calls rt_sched_thread_yield() to yield the thread
284  *     * Requests a reschedule with rt_sched_unlock_n_resched()
285  *   - Otherwise simply unlocks the scheduler
286  *
287  * @note This function is typically called from timer interrupt context
288  *       It handles both SMP and non-SMP cases
289  */
rt_sched_tick_increase(rt_tick_t tick)290 rt_err_t rt_sched_tick_increase(rt_tick_t tick)
291 {
292     struct rt_thread *thread;
293     rt_sched_lock_level_t slvl;
294 
295     thread = rt_thread_self();
296 
297     rt_sched_lock(&slvl);
298 
299     if(RT_SCHED_PRIV(thread).remaining_tick > tick)
300     {
301         RT_SCHED_PRIV(thread).remaining_tick -= tick;
302     }
303     else
304     {
305         RT_SCHED_PRIV(thread).remaining_tick = 0;
306     }
307 
308     if (RT_SCHED_PRIV(thread).remaining_tick)
309     {
310         rt_sched_unlock(slvl);
311     }
312     else
313     {
314         rt_sched_thread_yield(thread);
315 
316         /* request a rescheduling even though we are probably in an ISR */
317         rt_sched_unlock_n_resched(slvl);
318     }
319 
320     return RT_EOK;
321 }
322 
323 /**
324  * @brief Update thread priority and adjust scheduling attributes
325  *
326  * @param thread The thread to update priority for
327  * @param priority New priority value to set
328  * @param update_init_prio Flag to determine if initial priority should also be updated
329  * @return rt_err_t Always returns RT_EOK on success
330  *
331  * @details This function:
332  *   - Requires scheduler lock to be held (RT_SCHED_DEBUG_IS_LOCKED)
333  *   - For ready threads:
334  *     * Removes from ready queue
335  *     * Updates priority values
336  *     * Recalculates priority attributes (number, mask, etc.)
337  *     * Reinserts into ready queue with new priority
338  *   - For non-ready threads:
339  *     * Only updates priority values and attributes
340  *   - Handles both 32-bit and >32-bit priority systems
341  *
342  * @note Must be called with scheduler lock held
343  *       Thread status must be valid before calling
344  */
_rt_sched_update_priority(struct rt_thread * thread,rt_uint8_t priority,rt_bool_t update_init_prio)345 static rt_err_t _rt_sched_update_priority(struct rt_thread *thread, rt_uint8_t priority, rt_bool_t update_init_prio)
346 {
347     RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
348     RT_SCHED_DEBUG_IS_LOCKED;
349 
350     /* for ready thread, change queue; otherwise simply update the priority */
351     if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
352     {
353         /* remove thread from schedule queue first */
354         rt_sched_remove_thread(thread);
355 
356         /* change thread priority */
357         if (update_init_prio)
358         {
359             RT_SCHED_PRIV(thread).init_priority = priority;
360         }
361         RT_SCHED_PRIV(thread).current_priority = priority;
362 
363         /* recalculate priority attribute */
364 #if RT_THREAD_PRIORITY_MAX > 32
365         RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;               /* 5bit */
366         RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
367         RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07);   /* 3bit */
368 #else
369         RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
370 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
371         RT_SCHED_CTX(thread).stat = RT_THREAD_INIT;
372 
373         /* insert thread to schedule queue again */
374         rt_sched_insert_thread(thread);
375     }
376     else
377     {
378         if (update_init_prio)
379         {
380             RT_SCHED_PRIV(thread).init_priority = priority;
381         }
382         RT_SCHED_PRIV(thread).current_priority = priority;
383 
384         /* recalculate priority attribute */
385 #if RT_THREAD_PRIORITY_MAX > 32
386         RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;               /* 5bit */
387         RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).number;
388         RT_SCHED_PRIV(thread).high_mask = 1 << (RT_SCHED_PRIV(thread).current_priority & 0x07);   /* 3bit */
389 #else
390         RT_SCHED_PRIV(thread).number_mask = 1 << RT_SCHED_PRIV(thread).current_priority;
391 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
392     }
393 
394     return RT_EOK;
395 }
396 
397 /**
398  * @brief Update priority of the target thread
399  */
rt_sched_thread_change_priority(struct rt_thread * thread,rt_uint8_t priority)400 rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority)
401 {
402     return _rt_sched_update_priority(thread, priority, RT_FALSE);
403 }
404 
405 /**
406  * @brief Reset priority of the target thread
407  */
rt_sched_thread_reset_priority(struct rt_thread * thread,rt_uint8_t priority)408 rt_err_t rt_sched_thread_reset_priority(struct rt_thread *thread, rt_uint8_t priority)
409 {
410     return _rt_sched_update_priority(thread, priority, RT_TRUE);
411 }
412 
413 #ifdef RT_USING_OVERFLOW_CHECK
414 /**
415  * @brief Check thread stack for overflow or near-overflow conditions
416  *
417  * @param thread The thread to check stack for
418  *
419  * @details This function performs the following checks:
420  *   - For SMART mode without MMU: skips check if SP is in user data section
421  *   - Without hardware stack guard:
422  *     * For upward-growing stacks: checks magic number at top and SP range
423  *     * For downward-growing stacks: checks magic number at bottom and SP range
424  *     * Triggers error and infinite loop on overflow
425  *   - Additional warnings when stack pointer is near boundaries
426  */
rt_scheduler_stack_check(struct rt_thread * thread)427 void rt_scheduler_stack_check(struct rt_thread *thread)
428 {
429     RT_ASSERT(thread != RT_NULL);
430 
431 #ifdef RT_USING_SMART
432 #ifndef ARCH_MM_MMU
433     struct rt_lwp *lwp = thread ? (struct rt_lwp *)thread->lwp : 0;
434 
435     /* if stack pointer locate in user data section skip stack check. */
436     if (lwp && ((rt_uint32_t)thread->sp > (rt_uint32_t)lwp->data_entry &&
437     (rt_uint32_t)thread->sp <= (rt_uint32_t)lwp->data_entry + (rt_uint32_t)lwp->data_size))
438     {
439         return;
440     }
441 #endif /* not defined ARCH_MM_MMU */
442 #endif /* RT_USING_SMART */
443 
444 #ifndef RT_USING_HW_STACK_GUARD
445 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
446     if (*((rt_uint8_t *)((rt_uintptr_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
447 #else
448     if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
449 #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
450         (rt_uintptr_t)thread->sp <= (rt_uintptr_t)thread->stack_addr ||
451         (rt_uintptr_t)thread->sp >
452         (rt_uintptr_t)thread->stack_addr + (rt_uintptr_t)thread->stack_size)
453     {
454         rt_base_t dummy = 1;
455 
456         LOG_E("thread:%s stack overflow\n", thread->parent.name);
457 
458         while (dummy);
459     }
460 #endif /* RT_USING_HW_STACK_GUARD */
461 #ifdef ARCH_CPU_STACK_GROWS_UPWARD
462 #ifndef RT_USING_HW_STACK_GUARD
463     else if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
464 #else
465     if ((rt_uintptr_t)thread->sp > ((rt_uintptr_t)thread->stack_addr + thread->stack_size))
466 #endif
467     {
468         LOG_W("warning: %s stack is close to the top of stack address.\n",
469                    thread->parent.name);
470     }
471 #else
472 #ifndef RT_USING_HW_STACK_GUARD
473     else if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
474 #else
475     if ((rt_uintptr_t)thread->sp <= ((rt_uintptr_t)thread->stack_addr + 32))
476 #endif
477     {
478         LOG_W("warning: %s stack is close to end of stack address.\n",
479                    thread->parent.name);
480     }
481 #endif /* ARCH_CPU_STACK_GROWS_UPWARD */
482 }
483 
484 #endif /* RT_USING_OVERFLOW_CHECK */