1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2006-03-17     Bernard      the first version
9  * 2006-04-28     Bernard      fix the scheduler algorthm
10  * 2006-04-30     Bernard      add SCHEDULER_DEBUG
11  * 2006-05-27     Bernard      fix the scheduler algorthm for same priority
12  *                             thread schedule
13  * 2006-06-04     Bernard      rewrite the scheduler algorithm
14  * 2006-08-03     Bernard      add hook support
15  * 2006-09-05     Bernard      add 32 priority level support
16  * 2006-09-24     Bernard      add rt_system_scheduler_start function
17  * 2009-09-16     Bernard      fix _rt_scheduler_stack_check
18  * 2010-04-11     yi.qiu       add module feature
19  * 2010-07-13     Bernard      fix the maximal number of rt_scheduler_lock_nest
20  *                             issue found by kuronca
21  * 2010-12-13     Bernard      add defunct list initialization even if not use heap.
22  * 2011-05-10     Bernard      clean scheduler debug log.
23  * 2013-12-21     Grissiom     add rt_critical_level
24  * 2018-11-22     Jesven       remove the current task from ready queue
25  *                             add per cpu ready queue
26  *                             add _scheduler_get_highest_priority_thread to find highest priority task
27  *                             rt_schedule_insert_thread won't insert current task to ready queue
28  *                             in smp version, rt_hw_context_switch_interrupt maybe switch to
29  *                             new task directly
30  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to scheduler.c
31  * 2023-03-27     rose_man     Split into scheduler upc and scheduler_mp.c
32  * 2023-09-15     xqyjlj       perf rt_hw_interrupt_disable/enable
33  * 2023-12-10     xqyjlj       use rt_hw_spinlock
34  * 2024-01-05     Shell        Fixup of data racing in rt_critical_level
35  * 2024-01-18     Shell        support rt_sched_thread of scheduling status for better mt protection
36  * 2024-01-18     Shell        support rt_hw_thread_self to improve overall performance
37  */
38 
39 #include <rtthread.h>
40 #include <rthw.h>
41 
42 #define DBG_TAG           "kernel.scheduler"
43 #define DBG_LVL           DBG_INFO
44 #include <rtdbg.h>
45 
46 rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
47 static struct rt_spinlock _mp_scheduler_lock;
48 
49 #define SCHEDULER_LOCK_FLAG(percpu) ((percpu)->sched_lock_flag)
50 
51 #define SCHEDULER_ENTER_CRITICAL(curthr)                    \
52     do                                                      \
53     {                                                       \
54         if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest++; \
55     } while (0)
56 
57 #define SCHEDULER_EXIT_CRITICAL(curthr)                     \
58     do                                                      \
59     {                                                       \
60         if (curthr) RT_SCHED_CTX(curthr).critical_lock_nest--; \
61     } while (0)
62 
63 #define SCHEDULER_CONTEXT_LOCK(percpu)               \
64     do                                               \
65     {                                                \
66         RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 0); \
67         _fast_spin_lock(&_mp_scheduler_lock);        \
68         SCHEDULER_LOCK_FLAG(percpu) = 1;             \
69     } while (0)
70 
71 #define SCHEDULER_CONTEXT_UNLOCK(percpu)             \
72     do                                               \
73     {                                                \
74         RT_ASSERT(SCHEDULER_LOCK_FLAG(percpu) == 1); \
75         SCHEDULER_LOCK_FLAG(percpu) = 0;             \
76         _fast_spin_unlock(&_mp_scheduler_lock);      \
77     } while (0)
78 
79 #define SCHEDULER_LOCK(level)              \
80     do                                     \
81     {                                      \
82         rt_thread_t _curthr;               \
83         struct rt_cpu *_percpu;            \
84         level = rt_hw_local_irq_disable(); \
85         _percpu = rt_cpu_self();           \
86         _curthr = _percpu->current_thread; \
87         SCHEDULER_ENTER_CRITICAL(_curthr); \
88         SCHEDULER_CONTEXT_LOCK(_percpu);   \
89     } while (0)
90 
91 #define SCHEDULER_UNLOCK(level)            \
92     do                                     \
93     {                                      \
94         rt_thread_t _curthr;               \
95         struct rt_cpu *_percpu;            \
96         _percpu = rt_cpu_self();           \
97         _curthr = _percpu->current_thread; \
98         SCHEDULER_CONTEXT_UNLOCK(_percpu); \
99         SCHEDULER_EXIT_CRITICAL(_curthr);  \
100         rt_hw_local_irq_enable(level);     \
101     } while (0)
102 
103 #ifdef ARCH_USING_HW_THREAD_SELF
104 #define IS_CRITICAL_SWITCH_PEND(pcpu, curthr)  (RT_SCHED_CTX(curthr).critical_switch_flag)
105 #define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 1)
106 #define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) (RT_SCHED_CTX(curthr).critical_switch_flag = 0)
107 
108 #else /* !ARCH_USING_HW_THREAD_SELF */
109 #define IS_CRITICAL_SWITCH_PEND(pcpu, curthr)  ((pcpu)->critical_switch_flag)
110 #define SET_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 1)
111 #define CLR_CRITICAL_SWITCH_FLAG(pcpu, curthr) ((pcpu)->critical_switch_flag = 0)
112 
113 #endif /* ARCH_USING_HW_THREAD_SELF */
114 
115 static rt_uint32_t rt_thread_ready_priority_group;
116 #if RT_THREAD_PRIORITY_MAX > 32
117 /* Maximum priority level, 256 */
118 static rt_uint8_t rt_thread_ready_table[32];
119 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
120 
121 /**
122  * Used only on scheduler for optimization of control flows, where the critical
123  * region is already guaranteed.
124  */
_fast_spin_lock(struct rt_spinlock * lock)125 rt_inline void _fast_spin_lock(struct rt_spinlock *lock)
126 {
127     rt_hw_spin_lock(&lock->lock);
128 
129     RT_SPIN_LOCK_DEBUG(lock);
130 }
131 
_fast_spin_unlock(struct rt_spinlock * lock)132 rt_inline void _fast_spin_unlock(struct rt_spinlock *lock)
133 {
134     rt_base_t critical_level;
135     RT_SPIN_UNLOCK_DEBUG(lock, critical_level);
136 
137     /* for the scenario of sched, we don't check critical level */
138     RT_UNUSED(critical_level);
139 
140     rt_hw_spin_unlock(&lock->lock);
141 }
142 
143 #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
144 static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
145 static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
146 
147 /**
148  * @addtogroup group_hook
149  */
150 
151 /**@{*/
152 
153 /**
154  * @brief This function will set a hook function, which will be invoked when thread
155  *        switch happens.
156  *
157  * @param hook is the hook function.
158  */
rt_scheduler_sethook(void (* hook)(struct rt_thread * from,struct rt_thread * to))159 void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
160 {
161     rt_scheduler_hook = hook;
162 }
163 
164 /**
165  * @brief This function will set a hook function, which will be invoked when context
166  *        switch happens.
167  *
168  * @param hook is the hook function.
169  */
rt_scheduler_switch_sethook(void (* hook)(struct rt_thread * tid))170 void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
171 {
172     rt_scheduler_switch_hook = hook;
173 }
174 
175 /**@}*/
176 #endif /* RT_USING_HOOK */
177 
178 #if RT_THREAD_PRIORITY_MAX > 32
179 
_get_global_highest_ready_prio(void)180 rt_inline rt_base_t _get_global_highest_ready_prio(void)
181 {
182     rt_ubase_t number;
183     rt_ubase_t highest_ready_priority;
184 
185     number = __rt_ffs(rt_thread_ready_priority_group) - 1;
186     if (number != -1)
187     {
188         highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
189     }
190     else
191     {
192         highest_ready_priority = -1;
193     }
194     return highest_ready_priority;
195 }
196 
_get_local_highest_ready_prio(struct rt_cpu * pcpu)197 rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu* pcpu)
198 {
199     rt_ubase_t number;
200     rt_ubase_t local_highest_ready_priority;
201 
202     number = __rt_ffs(pcpu->priority_group) - 1;
203     if (number != -1)
204     {
205         local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1;
206     }
207     else
208     {
209         local_highest_ready_priority = -1;
210     }
211     return local_highest_ready_priority;
212 }
213 
214 #else /* if RT_THREAD_PRIORITY_MAX <= 32 */
215 
_get_global_highest_ready_prio(void)216 rt_inline rt_base_t _get_global_highest_ready_prio(void)
217 {
218     return __rt_ffs(rt_thread_ready_priority_group) - 1;
219 }
220 
_get_local_highest_ready_prio(struct rt_cpu * pcpu)221 rt_inline rt_base_t _get_local_highest_ready_prio(struct rt_cpu* pcpu)
222 {
223     return __rt_ffs(pcpu->priority_group) - 1;
224 }
225 
226 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
227 
228 /*
229  * get the highest priority thread in ready queue
230  */
_scheduler_get_highest_priority_thread(rt_ubase_t * highest_prio)231 static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
232 {
233     struct rt_thread *highest_priority_thread;
234     rt_ubase_t highest_ready_priority, local_highest_ready_priority;
235     struct rt_cpu* pcpu = rt_cpu_self();
236 
237     highest_ready_priority = _get_global_highest_ready_prio();
238     local_highest_ready_priority = _get_local_highest_ready_prio(pcpu);
239 
240     /* get highest ready priority thread */
241     if (highest_ready_priority < local_highest_ready_priority)
242     {
243         *highest_prio = highest_ready_priority;
244 
245         highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
246             rt_thread_priority_table[highest_ready_priority].next);
247     }
248     else
249     {
250         *highest_prio = local_highest_ready_priority;
251         if (local_highest_ready_priority != -1)
252         {
253             highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(
254                 pcpu->priority_table[local_highest_ready_priority].next);
255         }
256         else
257         {
258             highest_priority_thread = RT_NULL;
259         }
260     }
261 
262     RT_ASSERT(!highest_priority_thread ||
263               rt_object_get_type(&highest_priority_thread->parent) == RT_Object_Class_Thread);
264     return highest_priority_thread;
265 }
266 
267 /**
268  * @brief   set READY and insert thread to ready queue
269  *
270  * @note    caller must holding the `_mp_scheduler_lock` lock
271  */
_sched_insert_thread_locked(struct rt_thread * thread)272 static void _sched_insert_thread_locked(struct rt_thread *thread)
273 {
274     int cpu_id;
275     int bind_cpu;
276     rt_uint32_t cpu_mask;
277 
278     if ((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY)
279     {
280         /* already in ready queue */
281         return ;
282     }
283     else if (RT_SCHED_CTX(thread).oncpu != RT_CPU_DETACHED)
284     {
285         /**
286          * only YIELD -> READY, SUSPEND -> READY is allowed by this API. However,
287          * this is a RUNNING thread. So here we reset it's status and let it go.
288          */
289         RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
290         return ;
291     }
292 
293     /* READY thread, insert to ready queue */
294     RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
295 
296     cpu_id   = rt_hw_cpu_id();
297     bind_cpu = RT_SCHED_CTX(thread).bind_cpu;
298 
299     /* insert thread to ready list */
300     if (bind_cpu == RT_CPUS_NR)
301     {
302 #if RT_THREAD_PRIORITY_MAX > 32
303         rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
304 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
305         rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
306 
307         /* there is no time slices left(YIELD), inserting thread before ready list*/
308         if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
309         {
310             rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
311                                   &RT_THREAD_LIST_NODE(thread));
312         }
313         /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
314         else
315         {
316             rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
317                                  &RT_THREAD_LIST_NODE(thread));
318         }
319 
320         cpu_mask = RT_CPU_MASK ^ (1 << cpu_id);
321         rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
322     }
323     else
324     {
325         struct rt_cpu *pcpu = rt_cpu_index(bind_cpu);
326 
327 #if RT_THREAD_PRIORITY_MAX > 32
328         pcpu->ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
329 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
330         pcpu->priority_group |= RT_SCHED_PRIV(thread).number_mask;
331 
332         /* there is no time slices left(YIELD), inserting thread before ready list*/
333         if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
334         {
335             rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
336                                   &RT_THREAD_LIST_NODE(thread));
337         }
338         /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
339         else
340         {
341             rt_list_insert_after(&(rt_cpu_index(bind_cpu)->priority_table[RT_SCHED_PRIV(thread).current_priority]),
342                                  &RT_THREAD_LIST_NODE(thread));
343         }
344 
345         if (cpu_id != bind_cpu)
346         {
347             cpu_mask = 1 << bind_cpu;
348             rt_hw_ipi_send(RT_SCHEDULE_IPI, cpu_mask);
349         }
350     }
351 
352     LOG_D("insert thread[%.*s], the priority: %d",
353           RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(thread).current_priority);
354 }
355 
356 /* remove thread from ready queue */
_sched_remove_thread_locked(struct rt_thread * thread)357 static void _sched_remove_thread_locked(struct rt_thread *thread)
358 {
359     LOG_D("%s [%.*s], the priority: %d", __func__,
360           RT_NAME_MAX, thread->parent.name,
361           RT_SCHED_PRIV(thread).current_priority);
362 
363     /* remove thread from ready list */
364     rt_list_remove(&RT_THREAD_LIST_NODE(thread));
365 
366     if (RT_SCHED_CTX(thread).bind_cpu == RT_CPUS_NR)
367     {
368         if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
369         {
370 #if RT_THREAD_PRIORITY_MAX > 32
371             rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
372             if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
373             {
374                 rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
375             }
376 #else
377             rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
378 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
379         }
380     }
381     else
382     {
383         struct rt_cpu *pcpu = rt_cpu_index(RT_SCHED_CTX(thread).bind_cpu);
384 
385         if (rt_list_isempty(&(pcpu->priority_table[RT_SCHED_PRIV(thread).current_priority])))
386         {
387 #if RT_THREAD_PRIORITY_MAX > 32
388             pcpu->ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
389             if (pcpu->ready_table[RT_SCHED_PRIV(thread).number] == 0)
390             {
391                 pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
392             }
393 #else
394             pcpu->priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
395 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
396         }
397     }
398 }
399 
400 /**
401  * @brief Initialize the system scheduler.
402  *
403  * @details This function performs the following initialization tasks:
404  *          - Initializes the global scheduler spinlock for multi-core synchronization.
405  *          - Initializes the global priority table for saving ready threads of all priority levels.
406  *          - For each CPU core:
407  *            * Initializes per-CPU priority tables
408  *            * Sets initial CPU state (irq_switch_flag, current_priority, etc.)
409  *            * Initializes per-CPU ready tables (if priority > 32)
410  *            * Initializes per-CPU spinlock (if RT_USING_SMART is defined)
411  *          - Initializes the global ready priority group and tables (if priority > 32) as bitmaps for all priorities.
412  *
413  * @note This function must be called before any thread scheduling can occur.
414  *       It prepares the scheduler data structures for multi-core operation.
415  */
rt_system_scheduler_init(void)416 void rt_system_scheduler_init(void)
417 {
418     int cpu;
419     rt_base_t offset;
420 
421     LOG_D("start scheduler: max priority 0x%02x",
422           RT_THREAD_PRIORITY_MAX);
423 
424     rt_spin_lock_init(&_mp_scheduler_lock);
425 
426     for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
427     {
428         rt_list_init(&rt_thread_priority_table[offset]);
429     }
430 
431     for (cpu = 0; cpu < RT_CPUS_NR; cpu++)
432     {
433         struct rt_cpu *pcpu =  rt_cpu_index(cpu);
434         for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
435         {
436             rt_list_init(&pcpu->priority_table[offset]);
437         }
438 
439         pcpu->irq_switch_flag = 0;
440         pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1;
441         pcpu->current_thread = RT_NULL;
442         pcpu->priority_group = 0;
443 
444 #if RT_THREAD_PRIORITY_MAX > 32
445         rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table));
446 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
447 
448 #ifdef RT_USING_SMART
449         rt_spin_lock_init(&(pcpu->spinlock));
450 #endif
451     }
452 
453     /* initialize ready priority group */
454     rt_thread_ready_priority_group = 0;
455 
456 #if RT_THREAD_PRIORITY_MAX > 32
457     /* initialize ready table */
458     rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
459 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
460 }
461 
462 /**
463  * @brief Start the system scheduler and switch to the highest priority thread.
464  *
465  * @details This function performs the following operations:
466  *          - Releases legacy CPU lock (if any)
467  *          - Disables interrupts to ensure atomic operation
468  *          - Acquires scheduler lock for thread safety
469  *          - Selects the highest priority thread from ready queue
470  *          - Removes the selected thread from ready queue
471  *          - Assigns current CPU core to the selected thread
472  *          - Performs context switch to the selected thread
473  *
474  * @note This function will not return after successful execution.
475  *       It performs the initial thread switch during system startup.
476  *       The scheduler must be initialized before calling this function.
477  */
rt_system_scheduler_start(void)478 void rt_system_scheduler_start(void)
479 {
480     struct rt_thread *to_thread;
481     rt_ubase_t highest_ready_priority;
482 
483     /**
484      * legacy rt_cpus_lock. some bsp codes still use it as for it's critical
485      * region. Since scheduler is never touching this, here we just release it
486      * on the entry.
487      */
488     rt_hw_spin_unlock(&_cpus_lock);
489 
490     /* ISR will corrupt the coherency of running frame */
491     rt_hw_local_irq_disable();
492 
493     /**
494      * for the accessing of the scheduler context. Noted that we don't have
495      * current_thread at this point
496      */
497     _fast_spin_lock(&_mp_scheduler_lock);
498 
499     /* get the thread scheduling to */
500     to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
501     RT_ASSERT(to_thread);
502 
503     /* to_thread is picked to running on current core, so remove it from ready queue */
504     _sched_remove_thread_locked(to_thread);
505 
506     /* dedigate current core to `to_thread` */
507     RT_SCHED_CTX(to_thread).oncpu = rt_hw_cpu_id();
508     RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
509 
510     LOG_D("[cpu#%d] switch to priority#%d thread:%.*s(sp:0x%08x)",
511           rt_hw_cpu_id(), RT_SCHED_PRIV(to_thread).current_priority,
512           RT_NAME_MAX, to_thread->parent.name, to_thread->sp);
513 
514     _fast_spin_unlock(&_mp_scheduler_lock);
515 
516     /* switch to new thread */
517     rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp, to_thread);
518 
519     /* never come back */
520 }
521 
522 /**
523  * @addtogroup group_thread_management
524  * @cond
525  */
526 
527 /**@{*/
528 
529 /**
530  * @brief This function will handle IPI interrupt and do a scheduling in system.
531  *
532  * @param vector is the number of IPI interrupt for system scheduling.
533  *
534  * @param param is not used, and can be set to RT_NULL.
535  *
536  * @note this function should be invoke or register as ISR in BSP.
537  */
rt_scheduler_ipi_handler(int vector,void * param)538 void rt_scheduler_ipi_handler(int vector, void *param)
539 {
540     rt_schedule();
541 }
542 
543 /**
544  * @brief Lock the system scheduler
545  *
546  * @param plvl pointer to the object where lock level stores to
547  *
548  * @return rt_err_t RT_EOK
549  */
rt_sched_lock(rt_sched_lock_level_t * plvl)550 rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
551 {
552     rt_base_t level;
553     if (!plvl)
554         return -RT_EINVAL;
555 
556     SCHEDULER_LOCK(level);
557     *plvl = level;
558 
559     return RT_EOK;
560 }
561 
562 /**
563  * @brief Unlock the system scheduler
564  * @note this will not cause the scheduler to do a reschedule
565  *
566  * @param level the lock level of previous call to rt_sched_lock()
567  *
568  * @return rt_err_t RT_EOK
569  */
rt_sched_unlock(rt_sched_lock_level_t level)570 rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
571 {
572     SCHEDULER_UNLOCK(level);
573 
574     return RT_EOK;
575 }
576 
577 /**
578  * @brief Check if the scheduler is currently locked
579  *
580  * @return rt_bool_t Returns RT_TRUE if scheduler is locked, RT_FALSE otherwise
581  *
582  * @note This function checks the scheduler lock status in a thread-safe manner
583  *       by temporarily disabling interrupts to get consistent state.
584  */
rt_sched_is_locked(void)585 rt_bool_t rt_sched_is_locked(void)
586 {
587     rt_bool_t rc;
588     rt_base_t level;
589     struct rt_cpu *pcpu;
590 
591     level = rt_hw_local_irq_disable();
592     pcpu = rt_cpu_self();
593 
594     /* get lock stat which is a boolean value */
595     rc = pcpu->sched_lock_flag;
596 
597     rt_hw_local_irq_enable(level);
598     return rc;
599 }
600 
601 /**
602  * @brief Pick the highest runnable thread, and pass the control to it
603  *
604  * @note caller should hold the scheduler context lock. lock will be released
605  *       before return from this routine
606  */
_prepare_context_switch_locked(int cpu_id,struct rt_cpu * pcpu,rt_thread_t current_thread)607 static rt_thread_t _prepare_context_switch_locked(int cpu_id,
608                                                   struct rt_cpu *pcpu,
609                                                   rt_thread_t current_thread)
610 {
611     rt_thread_t to_thread = RT_NULL;
612     rt_ubase_t highest_ready_priority;
613 
614     /* quickly check if any other ready threads queuing */
615     if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0)
616     {
617         /* pick the highest ready thread */
618         to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
619 
620         /* detach current thread from percpu scheduling context */
621         RT_SCHED_CTX(current_thread).oncpu = RT_CPU_DETACHED;
622 
623         /* check if current thread should be put to ready queue, or scheduling again */
624         if ((RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
625         {
626             /* check if current thread can be running on current core again */
627             if (RT_SCHED_CTX(current_thread).bind_cpu == RT_CPUS_NR
628                 || RT_SCHED_CTX(current_thread).bind_cpu == cpu_id)
629             {
630                 /* if current_thread is the highest runnable thread */
631                 if (RT_SCHED_PRIV(current_thread).current_priority < highest_ready_priority)
632                 {
633                     to_thread = current_thread;
634                 }
635                 /* or no higher-priority thread existed and it has remaining ticks */
636                 else if (RT_SCHED_PRIV(current_thread).current_priority == highest_ready_priority &&
637                          (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
638                 {
639                     to_thread = current_thread;
640                 }
641                 /* otherwise give out the core */
642                 else
643                 {
644                     _sched_insert_thread_locked(current_thread);
645                 }
646             }
647             else
648             {
649                 /* put current_thread to ready queue of another core */
650                 _sched_insert_thread_locked(current_thread);
651             }
652 
653             /* consume the yield flags after scheduling */
654             RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
655         }
656 
657         /**
658          * Now destination thread is determined, core is passed to it. Though
659          * the percpu scheduling context is not updated here, since the cpu
660          * is locked contiguously before all the scheduling works are done, it's
661          * safe to observe that current thread as the running thread on this
662          * core for any observers if they properly do the synchronization
663          * (take the SCHEDULER_LOCK).
664          */
665         RT_SCHED_CTX(to_thread).oncpu = cpu_id;
666 
667         /* check if context switch is required */
668         if (to_thread != current_thread)
669         {
670             pcpu->current_priority = (rt_uint8_t)highest_ready_priority;
671 
672             RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread));
673 
674             /* remove to_thread from ready queue and update its status to RUNNING */
675             _sched_remove_thread_locked(to_thread);
676             RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
677 
678             RT_SCHEDULER_STACK_CHECK(to_thread);
679 
680             RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (current_thread));
681         }
682         else
683         {
684             /* current thread is still the best runnable thread */
685             to_thread = RT_NULL;
686         }
687     }
688     else
689     {
690         /* no ready threads */
691         to_thread = RT_NULL;
692     }
693 
694     return to_thread;
695 }
696 
697 #ifdef RT_USING_SIGNALS
698 /**
699  * @brief Preprocess pending signals for a suspended thread
700  *
701  * @param current_thread The thread to check for pending signals
702  *
703  * @details This function checks if the specified thread is suspended and has pending signals.
704  *          If both conditions are met, it will wake up/resume the thread to process the signals.
705  */
_sched_thread_preprocess_signal(struct rt_thread * current_thread)706 static void _sched_thread_preprocess_signal(struct rt_thread *current_thread)
707 {
708     /* should process signal? */
709     if (rt_sched_thread_is_suspended(current_thread))
710     {
711         /* if current_thread signal is in pending */
712         if ((RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL_PENDING)
713         {
714 #ifdef RT_USING_SMART
715             rt_thread_wakeup(current_thread);
716 #else
717             rt_thread_resume(current_thread);
718 #endif
719         }
720     }
721 }
722 
723 /**
724  * @brief Process pending signals for the current thread
725  *
726  * @param current_thread The thread to process signals for
727  *
728  * @details This function:
729  *          - Locks the scheduler to ensure thread safety
730  *          - Checks if the thread has pending signals
731  *          - If signals are pending:
732  *            * Clears the pending flag
733  *            * Unlocks the scheduler
734  *            * Calls signal handler to process the signals
735  *          - If no signals pending, simply unlocks the scheduler
736  */
_sched_thread_process_signal(struct rt_thread * current_thread)737 static void _sched_thread_process_signal(struct rt_thread *current_thread)
738 {
739     rt_base_t level;
740     SCHEDULER_LOCK(level);
741 
742     /* check stat of thread for signal */
743     if (RT_SCHED_CTX(current_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
744     {
745         extern void rt_thread_handle_sig(rt_bool_t clean_state);
746 
747         RT_SCHED_CTX(current_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
748 
749         SCHEDULER_UNLOCK(level);
750 
751         /* check signal status */
752         rt_thread_handle_sig(RT_TRUE);
753     }
754     else
755     {
756         SCHEDULER_UNLOCK(level);
757     }
758 
759     /* lock is released above */
760 }
761 
762 #define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr)    \
763     do                                            \
764     {                                             \
765         SCHEDULER_CONTEXT_LOCK(pcpu);   \
766         _sched_thread_preprocess_signal(curthr);  \
767         SCHEDULER_CONTEXT_UNLOCK(pcpu); \
768     } while (0)
769 #define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr) \
770     _sched_thread_preprocess_signal(curthr)
771 #define SCHED_THREAD_PROCESS_SIGNAL(curthr) _sched_thread_process_signal(curthr)
772 
773 #else /* ! RT_USING_SIGNALS */
774 
775 #define SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, curthr)
776 #define SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(curthr)
777 #define SCHED_THREAD_PROCESS_SIGNAL(curthr)
778 #endif /* RT_USING_SIGNALS */
779 
780 /**
781  * @brief Unlock scheduler and perform rescheduling if needed
782  *
783  * @param level The scheduler lock level obtained from rt_sched_lock()
784  *
785  * @return rt_err_t
786  *   - RT_EOK: Success
787  *   - -RT_EBUSY: Scheduler not available
788  *   - -RT_ESCHEDISR: Called in interrupt context
789  *   - -RT_ESCHEDLOCKED: Scheduler still locked by others
790  *
791  * @details This function:
792  *   - Releases scheduler lock at specified level
793  *   - Checks if rescheduling is needed
794  *   - If needed, finds highest priority thread and switches to it
795  *   - Processes pending signals for current thread
796  *   - Handles various error conditions
797  *
798  * @note Must be called in pair with rt_sched_lock()
799  *       May trigger context switch if conditions met
800  */
rt_sched_unlock_n_resched(rt_sched_lock_level_t level)801 rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
802 {
803     struct rt_thread *to_thread;
804     struct rt_thread *current_thread;
805     struct rt_cpu    *pcpu;
806     int              cpu_id;
807     rt_err_t         error = RT_EOK;
808 
809     cpu_id = rt_hw_cpu_id();
810     pcpu   = rt_cpu_index(cpu_id);
811     current_thread = pcpu->current_thread;
812 
813     if (!current_thread)
814     {
815         /* scheduler is unavailable yet */
816         SCHEDULER_CONTEXT_UNLOCK(pcpu);
817         SCHEDULER_EXIT_CRITICAL(current_thread);
818         rt_hw_local_irq_enable(level);
819         return -RT_EBUSY;
820     }
821 
822     /* whether do switch in interrupt */
823     if (rt_atomic_load(&(pcpu->irq_nest)))
824     {
825         pcpu->irq_switch_flag = 1;
826         SCHEDULER_CONTEXT_UNLOCK(pcpu);
827         SCHEDULER_EXIT_CRITICAL(current_thread);
828         rt_hw_local_irq_enable(level);
829         return -RT_ESCHEDISR;
830     }
831 
832     /* prepare current_thread for processing if signals existed */
833     SCHED_THREAD_PREPROCESS_SIGNAL_LOCKED(current_thread);
834 
835     /* whether caller had locked the local scheduler already */
836     if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
837     {
838         /* leaving critical region of global context since we can't schedule */
839         SCHEDULER_CONTEXT_UNLOCK(pcpu);
840 
841         SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
842         error = -RT_ESCHEDLOCKED;
843 
844         SCHEDULER_EXIT_CRITICAL(current_thread);
845     }
846     else
847     {
848         /* flush critical switch flag since a scheduling is done */
849         CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
850 
851         /* pick the highest runnable thread, and pass the control to it */
852         to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
853         if (to_thread)
854         {
855             /* switch to new thread */
856             LOG_D("[cpu#%d] UNLOCK switch to priority#%d "
857                   "thread:%.*s(sp:0x%08x), "
858                   "from thread:%.*s(sp: 0x%08x)",
859                   cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
860                   RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
861                   RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
862 
863             rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
864                                  (rt_ubase_t)&to_thread->sp, to_thread);
865         }
866         else
867         {
868             SCHEDULER_CONTEXT_UNLOCK(pcpu);
869             SCHEDULER_EXIT_CRITICAL(current_thread);
870         }
871     }
872 
873     /* leaving critical region of percpu scheduling context */
874     rt_hw_local_irq_enable(level);
875 
876     /* process signals on thread if any existed */
877     SCHED_THREAD_PROCESS_SIGNAL(current_thread);
878 
879     return error;
880 }
881 
882 /**
883  * @brief This function will perform one scheduling. It will select one thread
884  *        with the highest priority level in global ready queue or local ready queue,
885  *        then switch to it.
886  *
887  * @details This function performs the following operations:
888  *   - Disables interrupts to enter critical section
889  *   - Gets current CPU and thread context
890  *   - Checks if called from interrupt context
891  *   - Finds highest priority ready thread
892  *   - Performs context switch if needed
893  *   - Processes pending signals
894  *   - Restores interrupt state
895  */
rt_schedule(void)896 void rt_schedule(void)
897 {
898     rt_base_t level;
899     struct rt_thread *to_thread;
900     struct rt_thread *current_thread;
901     struct rt_cpu    *pcpu;
902     int              cpu_id;
903 
904     /* enter ciritical region of percpu scheduling context */
905     level = rt_hw_local_irq_disable();
906 
907     /* get percpu scheduling context */
908     cpu_id = rt_hw_cpu_id();
909     pcpu   = rt_cpu_index(cpu_id);
910     current_thread = pcpu->current_thread;
911 
912     /* whether do switch in interrupt */
913     if (rt_atomic_load(&(pcpu->irq_nest)))
914     {
915         pcpu->irq_switch_flag = 1;
916         rt_hw_local_irq_enable(level);
917         return ; /* -RT_ESCHEDISR */
918     }
919 
920     /* forbid any recursive entries of schedule() */
921     SCHEDULER_ENTER_CRITICAL(current_thread);
922 
923     /* prepare current_thread for processing if signals existed */
924     SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
925 
926     /* whether caller had locked the local scheduler already */
927     if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
928     {
929         SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
930 
931         SCHEDULER_EXIT_CRITICAL(current_thread);
932 
933         /* -RT_ESCHEDLOCKED */
934     }
935     else
936     {
937         /* flush critical switch flag since a scheduling is done */
938         CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
939         pcpu->irq_switch_flag = 0;
940 
941         /**
942          * take the context lock before we do the real scheduling works. Context
943          * lock will be released before returning from this _schedule_locked()
944          */
945         SCHEDULER_CONTEXT_LOCK(pcpu);
946 
947         /* pick the highest runnable thread, and pass the control to it */
948         to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
949 
950         if (to_thread)
951         {
952             LOG_D("[cpu#%d] switch to priority#%d "
953                   "thread:%.*s(sp:0x%08x), "
954                   "from thread:%.*s(sp: 0x%08x)",
955                   cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
956                   RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
957                   RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
958 
959             rt_hw_context_switch((rt_ubase_t)&current_thread->sp,
960                                  (rt_ubase_t)&to_thread->sp, to_thread);
961         }
962         else
963         {
964             /* current thread continue to take the core */
965             SCHEDULER_CONTEXT_UNLOCK(pcpu);
966             SCHEDULER_EXIT_CRITICAL(current_thread);
967         }
968     }
969 
970     /* leaving critical region of percpu scheduling context */
971     rt_hw_local_irq_enable(level);
972 
973     /* process signals on thread if any existed */
974     SCHED_THREAD_PROCESS_SIGNAL(current_thread);
975 }
976 
977 /**
978  * @brief Perform thread scheduling after an interrupt context switch
979  *
980  * @param context The interrupt context pointer
981  *
982  * @details This function handles scheduling when returning from interrupt context:
983  *   - Checks if scheduling is needed (irq_switch_flag set)
984  *   - If needed, finds highest priority ready thread
985  *   - Performs context switch to new thread if available
986  *   - Handles cases where scheduler is locked or still in interrupt context
987  *   - Processes pending signals before scheduling
988  */
rt_scheduler_do_irq_switch(void * context)989 void rt_scheduler_do_irq_switch(void *context)
990 {
991     int              cpu_id;
992     rt_base_t        level;
993     struct rt_cpu    *pcpu;
994     struct rt_thread *to_thread;
995     struct rt_thread *current_thread;
996 
997     level = rt_hw_local_irq_disable();
998 
999     cpu_id = rt_hw_cpu_id();
1000     pcpu   = rt_cpu_index(cpu_id);
1001     current_thread = pcpu->current_thread;
1002 
1003     /* forbid any recursive entries of schedule() */
1004     SCHEDULER_ENTER_CRITICAL(current_thread);
1005 
1006     SCHED_THREAD_PREPROCESS_SIGNAL(pcpu, current_thread);
1007 
1008     /* any pending scheduling existed? */
1009     if (pcpu->irq_switch_flag == 0)
1010     {
1011         /* if no, just continue execution of current_thread */
1012         SCHEDULER_EXIT_CRITICAL(current_thread);
1013         rt_hw_local_irq_enable(level);
1014         return;
1015     }
1016 
1017     /* whether caller had locked the local scheduler already */
1018     if (RT_SCHED_CTX(current_thread).critical_lock_nest > 1)
1019     {
1020         SET_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
1021         SCHEDULER_EXIT_CRITICAL(current_thread);
1022     }
1023     else if (rt_atomic_load(&(pcpu->irq_nest)) == 0)
1024     {
1025         /* flush critical & irq switch flag since a scheduling is done */
1026         CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
1027         pcpu->irq_switch_flag = 0;
1028 
1029         SCHEDULER_CONTEXT_LOCK(pcpu);
1030 
1031         /* pick the highest runnable thread, and pass the control to it */
1032         to_thread = _prepare_context_switch_locked(cpu_id, pcpu, current_thread);
1033         if (to_thread)
1034         {
1035             LOG_D("[cpu#%d] IRQ switch to priority#%d "
1036                   "thread:%.*s(sp:0x%08x), "
1037                   "from thread:%.*s(sp: 0x%08x)",
1038                   cpu_id, RT_SCHED_PRIV(to_thread).current_priority,
1039                   RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
1040                   RT_NAME_MAX, current_thread->parent.name, current_thread->sp);
1041 
1042             rt_hw_context_switch_interrupt(context, (rt_ubase_t)&current_thread->sp,
1043                                            (rt_ubase_t)&to_thread->sp, to_thread);
1044         }
1045         else
1046         {
1047             /* current thread continue to take the core */
1048             SCHEDULER_CONTEXT_UNLOCK(pcpu);
1049             SCHEDULER_EXIT_CRITICAL(current_thread);
1050         }
1051     }
1052     else
1053     {
1054         SCHEDULER_EXIT_CRITICAL(current_thread);
1055     }
1056 
1057     /* leaving critical region of percpu scheduling context */
1058     rt_hw_local_irq_enable(level);
1059 }
1060 
1061 /**
1062  * @brief This function will insert a thread to the system ready queue. The state of
1063  *        thread will be set as READY and the thread will be removed from suspend queue.
1064  *
1065  * @param thread is the thread to be inserted.
1066  *
1067  * @note  Please do not invoke this function in user application.
1068  *        Caller must hold the scheduler lock
1069  */
rt_sched_insert_thread(struct rt_thread * thread)1070 void rt_sched_insert_thread(struct rt_thread *thread)
1071 {
1072     RT_ASSERT(thread != RT_NULL);
1073     RT_SCHED_DEBUG_IS_LOCKED;
1074 
1075     /* set READY and insert thread to ready queue */
1076     _sched_insert_thread_locked(thread);
1077 }
1078 
1079 /**
1080  * @brief This function will remove a thread from system ready queue.
1081  *
1082  * @param thread is the thread to be removed.
1083  *
1084  * @note  Please do not invoke this function in user application.
1085  */
rt_sched_remove_thread(struct rt_thread * thread)1086 void rt_sched_remove_thread(struct rt_thread *thread)
1087 {
1088     RT_ASSERT(thread != RT_NULL);
1089     RT_SCHED_DEBUG_IS_LOCKED;
1090 
1091     /* remove thread from scheduler ready list  */
1092     _sched_remove_thread_locked(thread);
1093 
1094     RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND_UNINTERRUPTIBLE;
1095 }
1096 
1097 /**
1098  * @brief Initialize thread's scheduling private data
1099  *
1100  * @param thread The thread to be initialized
1101  * @param tick Initial time slice value for the thread
1102  * @param priority Initial priority of the thread
1103  *
1104  * @details This function performs the following initialization:
1105  *   - Initializes thread's ready list node
1106  *   - Sets initial and current priority (must be < RT_THREAD_PRIORITY_MAX)
1107  *   - Initializes priority bitmasks (handles >32 priorities if needed)
1108  *   - Sets initial time slice values
1109  *   - For SMP systems, initializes critical section nesting counter
1110  */
rt_sched_thread_init_priv(struct rt_thread * thread,rt_uint32_t tick,rt_uint8_t priority)1111 void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
1112 {
1113     rt_list_init(&RT_THREAD_LIST_NODE(thread));
1114 
1115     /* priority init */
1116     RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
1117     RT_SCHED_PRIV(thread).init_priority    = priority;
1118     RT_SCHED_PRIV(thread).current_priority = priority;
1119 
1120     /* don't add to scheduler queue as init thread */
1121     RT_SCHED_PRIV(thread).number_mask = 0;
1122 #if RT_THREAD_PRIORITY_MAX > 32
1123     RT_SCHED_PRIV(thread).number = 0;
1124     RT_SCHED_PRIV(thread).high_mask = 0;
1125 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
1126 
1127     /* tick init */
1128     RT_SCHED_PRIV(thread).init_tick = tick;
1129     RT_SCHED_PRIV(thread).remaining_tick = tick;
1130 
1131 #ifdef RT_USING_SMP
1132 
1133     /* lock init */
1134     RT_SCHED_CTX(thread).critical_lock_nest = 0;
1135 #endif /* RT_USING_SMP */
1136 
1137 }
1138 
1139 /**
1140  * @brief Initialize thread scheduling attributes for startup
1141  *
1142  * @param thread The thread to be initialized
1143  *
1144  * @details This function:
1145  *   - For systems with >32 priorities:
1146  *     * Sets priority number (5 bits)
1147  *     * Initializes number_mask and high_mask (3 bits)
1148  *   - For systems with <=32 priorities:
1149  *     * Initializes number_mask
1150  *   - Sets thread state to SUSPEND
1151  *
1152  * @note This is a lockless operation as it's called during thread creation
1153  *       when no concurrent access is possible
1154  */
rt_sched_thread_startup(struct rt_thread * thread)1155 void rt_sched_thread_startup(struct rt_thread *thread)
1156 {
1157 #if RT_THREAD_PRIORITY_MAX > 32
1158     RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;            /* 5bit */
1159     RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
1160     RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07);  /* 3bit */
1161 #else
1162     RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
1163 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
1164 
1165     /* change thread stat, so we can resume it */
1166     RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
1167 }
1168 
1169 /**
1170  * @brief Update thread scheduling status after context switch
1171  *
1172  * @param thread The thread that will be running after the context switch
1173  *
1174  * @details This function performs critical post-context-switch operations:
1175  *   - Verifies interrupts are disabled (RT_ASSERT)
1176  *   - Releases scheduler lock from previous thread if exists
1177  *   - Updates current thread pointer for the CPU
1178  *   - Must be called with interrupts disabled
1179  *
1180  * @note this operation is taken as an atomic operation of the update of SP.
1181  *       Since the local irq is disabled, it's okay to assume that the stack
1182  *       will not be modified meanwhile.
1183  */
rt_sched_post_ctx_switch(struct rt_thread * thread)1184 void rt_sched_post_ctx_switch(struct rt_thread *thread)
1185 {
1186     struct rt_cpu* pcpu = rt_cpu_self();
1187     rt_thread_t from_thread = pcpu->current_thread;
1188 
1189     RT_ASSERT(rt_hw_interrupt_is_disabled());
1190 
1191     if (from_thread)
1192     {
1193         RT_ASSERT(RT_SCHED_CTX(from_thread).critical_lock_nest == 1);
1194 
1195         /* release the scheduler lock since we are done with critical region */
1196         RT_SCHED_CTX(from_thread).critical_lock_nest = 0;
1197         SCHEDULER_CONTEXT_UNLOCK(pcpu);
1198     }
1199     /* safe to access since irq is masked out */
1200     pcpu->current_thread = thread;
1201 }
1202 
1203 #ifdef RT_DEBUGING_CRITICAL
1204 
1205 static volatile int _critical_error_occurred = 0;
1206 
1207 /**
1208  * @brief Safely exit critical section with level checking
1209  *
1210  * @param critical_level The expected critical level to match
1211  *
1212  * @details This function provides a safe way to exit critical sections by:
1213  *   - Verifying the current critical level matches the expected level
1214  *   - If mismatch detected (debug build only):
1215  *     * Prints error message with current and expected levels
1216  *     * Triggers backtrace for debugging
1217  *     * Enters infinite loop to halt execution
1218  *   - Always calls rt_exit_critical() to ensure critical section is exited
1219  *
1220  * @note This is primarily used for debugging critical section mismatches.
1221  */
rt_exit_critical_safe(rt_base_t critical_level)1222 void rt_exit_critical_safe(rt_base_t critical_level)
1223 {
1224     struct rt_cpu *pcpu = rt_cpu_self();
1225     rt_thread_t current_thread = pcpu->current_thread;
1226     if (current_thread && !_critical_error_occurred)
1227     {
1228         if (critical_level != RT_SCHED_CTX(current_thread).critical_lock_nest)
1229         {
1230             int dummy = 1;
1231             _critical_error_occurred = 1;
1232 
1233             rt_kprintf("%s: un-compatible critical level\n" \
1234                        "\tCurrent %d\n\tCaller %d\n",
1235                        __func__, RT_SCHED_CTX(current_thread).critical_lock_nest,
1236                        critical_level);
1237             rt_backtrace();
1238 
1239             while (dummy) ;
1240         }
1241     }
1242     rt_exit_critical();
1243 }
1244 
1245 #else /* !RT_DEBUGING_CRITICAL */
1246 
1247 /**
1248  * @brief Safely exit critical section (non-debug version)
1249  *
1250  * @param critical_level The expected critical level (unused in non-debug build)
1251  *
1252  * @details This is the non-debug version that simply calls rt_exit_critical().
1253  *          The critical_level parameter is ignored in this implementation.
1254  */
rt_exit_critical_safe(rt_base_t critical_level)1255 void rt_exit_critical_safe(rt_base_t critical_level)
1256 {
1257     RT_UNUSED(critical_level);
1258     return rt_exit_critical();
1259 }
1260 
1261 #endif /* RT_DEBUGING_CRITICAL */
1262 RTM_EXPORT(rt_exit_critical_safe);
1263 
1264 #ifdef ARCH_USING_HW_THREAD_SELF
1265 #define FREE_THREAD_SELF(lvl)
1266 
1267 #else /* !ARCH_USING_HW_THREAD_SELF */
1268 #define FREE_THREAD_SELF(lvl)        \
1269     do                               \
1270     {                                \
1271         rt_hw_local_irq_enable(lvl); \
1272     } while (0)
1273 
1274 #endif /* ARCH_USING_HW_THREAD_SELF */
1275 
1276 /**
1277  * @brief Enter a critical section and lock the scheduler
1278  *
1279  * @return The critical level after entering (nesting count)
1280  *         Returns -RT_EINVAL if scheduler is unavailable
1281  *
1282  * @details This function:
1283  *   - Disables interrupts to ensure atomic operation
1284  *   - Gets current thread context
1285  *   - Increments critical section nesting counter
1286  *   - Returns the new nesting level
1287  *   - Handles both hardware-assisted and software thread context cases
1288  *
1289  * @note Must be paired with rt_exit_critical()
1290  *       Can be nested, each call must have matching exit call
1291  */
rt_enter_critical(void)1292 rt_base_t rt_enter_critical(void)
1293 {
1294     rt_base_t critical_level;
1295     struct rt_thread *current_thread;
1296 
1297 #ifndef ARCH_USING_HW_THREAD_SELF
1298     rt_base_t level;
1299     struct rt_cpu *pcpu;
1300 
1301     /* disable interrupt */
1302     level = rt_hw_local_irq_disable();
1303 
1304     pcpu = rt_cpu_self();
1305     current_thread = pcpu->current_thread;
1306 
1307 #else /* !ARCH_USING_HW_THREAD_SELF */
1308     current_thread = rt_hw_thread_self();
1309 
1310 #endif /* ARCH_USING_HW_THREAD_SELF */
1311 
1312     if (!current_thread)
1313     {
1314         FREE_THREAD_SELF(level);
1315         /* scheduler unavailable */
1316         return -RT_EINVAL;
1317     }
1318 
1319     /* critical for local cpu */
1320     RT_SCHED_CTX(current_thread).critical_lock_nest++;
1321     critical_level = RT_SCHED_CTX(current_thread).critical_lock_nest;
1322 
1323     FREE_THREAD_SELF(level);
1324 
1325     return critical_level;
1326 }
1327 RTM_EXPORT(rt_enter_critical);
1328 
1329 /**
1330  * @brief Exit a critical section and unlock the scheduler
1331  *
1332  * @details This function performs the following operations:
1333  *   - Gets current thread context (using hardware-assisted or software method)
1334  *   - Decrements critical section nesting counter
1335  *   - If nesting level reaches 0:
1336  *     * Checks if rescheduling is needed
1337  *     * Clears critical switch flag
1338  *     * Performs rescheduling if needed
1339  *   - Verifies critical section nesting is valid (debug assertion)
1340  *   - Restores interrupt state
1341  *
1342  * @note Must be called in pair with rt_enter_critical()
1343  *       Handles both hardware-assisted and software thread context cases
1344  *       May trigger rescheduling if conditions met
1345  */
rt_exit_critical(void)1346 void rt_exit_critical(void)
1347 {
1348     struct rt_thread *current_thread;
1349     rt_bool_t need_resched;
1350 
1351 #ifndef ARCH_USING_HW_THREAD_SELF
1352     rt_base_t level;
1353     struct rt_cpu *pcpu;
1354 
1355     /* disable interrupt */
1356     level = rt_hw_local_irq_disable();
1357 
1358     pcpu = rt_cpu_self();
1359     current_thread = pcpu->current_thread;
1360 
1361 #else /* !ARCH_USING_HW_THREAD_SELF */
1362     current_thread = rt_hw_thread_self();
1363 
1364 #endif /* ARCH_USING_HW_THREAD_SELF */
1365 
1366     if (!current_thread)
1367     {
1368         FREE_THREAD_SELF(level);
1369         return;
1370     }
1371 
1372     /* the necessary memory barrier is done on irq_(dis|en)able */
1373     RT_SCHED_CTX(current_thread).critical_lock_nest--;
1374 
1375     /* may need a rescheduling */
1376     if (RT_SCHED_CTX(current_thread).critical_lock_nest == 0)
1377     {
1378         /* is there any scheduling request unfinished? */
1379         need_resched = IS_CRITICAL_SWITCH_PEND(pcpu, current_thread);
1380         CLR_CRITICAL_SWITCH_FLAG(pcpu, current_thread);
1381 
1382         FREE_THREAD_SELF(level);
1383 
1384         if (need_resched)
1385             rt_schedule();
1386     }
1387     else
1388     {
1389         /* each exit_critical is strictly corresponding to an enter_critical */
1390         RT_ASSERT(RT_SCHED_CTX(current_thread).critical_lock_nest > 0);
1391 
1392         FREE_THREAD_SELF(level);
1393     }
1394 }
1395 RTM_EXPORT(rt_exit_critical);
1396 
1397 /**
1398  * @brief Get the scheduler lock level.
1399  *
1400  * @return the level of the scheduler lock. 0 means unlocked.
1401  */
rt_critical_level(void)1402 rt_uint16_t rt_critical_level(void)
1403 {
1404     rt_base_t level;
1405     rt_uint16_t critical_lvl;
1406     struct rt_thread *current_thread;
1407 
1408     level = rt_hw_local_irq_disable();
1409 
1410     current_thread = rt_cpu_self()->current_thread;
1411 
1412     if (current_thread)
1413     {
1414         /* the necessary memory barrier is done on irq_(dis|en)able */
1415         critical_lvl = RT_SCHED_CTX(current_thread).critical_lock_nest;
1416     }
1417     else
1418     {
1419         critical_lvl = 0;
1420     }
1421 
1422     rt_hw_local_irq_enable(level);
1423     return critical_lvl;
1424 }
1425 RTM_EXPORT(rt_critical_level);
1426 
1427 /**
1428  * @brief Bind a thread to a specific CPU core
1429  *
1430  * @param thread The thread to be bound
1431  * @param cpu The target CPU core index (RT_CPUS_NR for no binding)
1432  *
1433  * @return rt_err_t
1434  *   - RT_EOK: Operation successful
1435  *
1436  * @details This function handles thread-CPU binding with the following logic:
1437  *   - If thread is READY:
1438  *     * Removes from current ready queue
1439  *     * Updates bind CPU information
1440  *     * Inserts to new ready queue
1441  *     * Triggers rescheduling if needed
1442  *   - If thread is RUNNING:
1443  *     * Updates bind CPU information
1444  *     * Sends IPI to target CPU if binding changed
1445  *     * Triggers rescheduling if needed
1446  *   - For other states, just updates bind CPU info
1447  *
1448  * @note Caller must ensure scheduler is not locked before calling
1449  *       This function will acquire scheduler lock internally
1450  */
rt_sched_thread_bind_cpu(struct rt_thread * thread,int cpu)1451 rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
1452 {
1453     rt_sched_lock_level_t slvl;
1454     rt_uint8_t thread_stat;
1455 
1456     RT_SCHED_DEBUG_IS_UNLOCKED;
1457 
1458     if (cpu >= RT_CPUS_NR)
1459     {
1460         cpu = RT_CPUS_NR;
1461     }
1462 
1463     rt_sched_lock(&slvl);
1464 
1465     thread_stat = rt_sched_thread_get_stat(thread);
1466 
1467     if (thread_stat == RT_THREAD_READY)
1468     {
1469         /* unbind */
1470         /* remove from old ready queue */
1471         rt_sched_remove_thread(thread);
1472         /* change thread bind cpu */
1473         RT_SCHED_CTX(thread).bind_cpu = cpu;
1474         /* add to new ready queue */
1475         rt_sched_insert_thread(thread);
1476 
1477         if (rt_thread_self() != RT_NULL)
1478         {
1479             rt_sched_unlock_n_resched(slvl);
1480         }
1481         else
1482         {
1483             rt_sched_unlock(slvl);
1484         }
1485     }
1486     else
1487     {
1488         RT_SCHED_CTX(thread).bind_cpu = cpu;
1489         if (thread_stat == RT_THREAD_RUNNING)
1490         {
1491             /* thread is running on a cpu */
1492             int current_cpu = rt_hw_cpu_id();
1493 
1494             if (cpu != RT_CPUS_NR)
1495             {
1496                 if (RT_SCHED_CTX(thread).oncpu == current_cpu)
1497                 {
1498                     /* current thread on current cpu */
1499                     if (cpu != current_cpu)
1500                     {
1501                         /* bind to other cpu */
1502                         rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << cpu);
1503                         /* self cpu need reschedule */
1504                         rt_sched_unlock_n_resched(slvl);
1505                     }
1506                     else
1507                     {
1508                         /* else do nothing */
1509                         rt_sched_unlock(slvl);
1510                     }
1511                 }
1512                 else
1513                 {
1514                     /* no running on self cpu, but dest cpu can be itself */
1515                     rt_hw_ipi_send(RT_SCHEDULE_IPI, 1U << RT_SCHED_CTX(thread).oncpu);
1516                     rt_sched_unlock(slvl);
1517                 }
1518             }
1519             else
1520             {
1521                 /* else do nothing */
1522                 rt_sched_unlock(slvl);
1523             }
1524         }
1525         else
1526         {
1527             rt_sched_unlock(slvl);
1528         }
1529     }
1530 
1531     return RT_EOK;
1532 }
1533 
1534 /**@}*/
1535 /**@endcond*/